aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS2
-rw-r--r--Documentation/connector/ucon.c206
-rw-r--r--Documentation/cpusets.txt6
-rw-r--r--Documentation/filesystems/00-INDEX4
-rw-r--r--Documentation/filesystems/relay.txt479
-rw-r--r--Documentation/filesystems/relayfs.txt442
-rw-r--r--Documentation/input/joystick.txt1
-rw-r--r--Documentation/scsi/ChangeLog.megaraid123
-rw-r--r--Documentation/sysctl/fs.txt20
-rw-r--r--Documentation/sysctl/kernel.txt20
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile2
-rw-r--r--arch/arm/common/dmabounce.c8
-rw-r--r--arch/arm/kernel/entry-armv.S21
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/mach-s3c2410/Makefile36
-rw-r--r--arch/arm/mach-s3c2410/dma.c163
-rw-r--r--arch/arm/mach-versatile/core.c2
-rw-r--r--arch/i386/Kconfig4
-rw-r--r--arch/i386/kernel/acpi/boot.c2
-rw-r--r--arch/i386/kernel/acpi/wakeup.S5
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c9
-rw-r--r--arch/i386/pci/init.c8
-rw-r--r--arch/i386/pci/mmconfig.c2
-rw-r--r--arch/ia64/hp/sim/simscsi.c3
-rw-r--r--arch/ia64/kernel/acpi.c2
-rw-r--r--arch/powerpc/boot/dts/mpc8540ads.dts257
-rw-r--r--arch/powerpc/boot/dts/mpc8541cds.dts244
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts287
-rw-r--r--arch/powerpc/boot/dts/mpc8555cds.dts244
-rw-r--r--arch/powerpc/kernel/legacy_serial.c8
-rw-r--r--arch/powerpc/kernel/prom_parse.c13
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c162
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c210
-rw-r--r--arch/powerpc/platforms/86xx/mpc8641_hpcn.h32
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c324
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c73
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c15
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c30
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c10
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c21
-rw-r--r--arch/sparc/kernel/setup.c4
-rw-r--r--arch/sparc/kernel/smp.c1
-rw-r--r--arch/sparc/kernel/sun4d_smp.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c2
-rw-r--r--block/cfq-iosched.c2
-rw-r--r--block/elevator.c3
-rw-r--r--block/ll_rw_blk.c2
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/battery.c3
-rw-r--r--drivers/acpi/bus.c11
-rw-r--r--drivers/acpi/hotkey.c281
-rw-r--r--drivers/acpi/i2c_ec.c2
-rw-r--r--drivers/acpi/osl.c10
-rw-r--r--drivers/acpi/sbs.c3
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/cdrom/gscd.c2
-rw-r--r--drivers/char/moxa.c8
-rw-r--r--drivers/char/tty_io.c808
-rw-r--r--drivers/char/tty_ioctl.c59
-rw-r--r--drivers/char/vt_ioctl.c2
-rw-r--r--drivers/hwmon/abituguru.c99
-rw-r--r--drivers/i2c/chips/tps65010.c12
-rw-r--r--drivers/ieee1394/ohci1394.c4
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/sa_query.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c54
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c22
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/misc/wistron_btns.c16
-rw-r--r--drivers/input/mouse/psmouse-base.c7
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/md.c13
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptfc.c92
-rw-r--r--drivers/mtd/nand/ams-delta.c10
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c54
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/quirks.c59
-rw-r--r--drivers/rtc/rtc-s3c.c124
-rw-r--r--drivers/s390/block/dasd_devmap.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c14
-rw-r--r--drivers/s390/scsi/zfcp_aux.c120
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c5
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c212
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c122
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c79
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c73
-rw-r--r--drivers/scsi/ata_piix.c84
-rw-r--r--drivers/scsi/esp.c3
-rw-r--r--drivers/scsi/hptiop.c568
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c209
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/libata-core.c2
-rw-r--r--drivers/scsi/libiscsi.c214
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/mega_common.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c42
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h4
-rw-r--r--drivers/scsi/pdc_adma.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/sata_via.c117
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c15
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/serial/sunsab.c9
-rw-r--r--drivers/serial/sunzilog.c3
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/imacfb.c4
-rw-r--r--drivers/video/matrox/g450_pll.c8
-rw-r--r--fs/block_dev.c114
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c10
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext3/balloc.c6
-rw-r--r--fs/ioprio.c30
-rw-r--r--fs/jbd/commit.c6
-rw-r--r--fs/jbd/journal.c92
-rw-r--r--fs/jbd/transaction.c9
-rw-r--r--fs/lockd/svcsubs.c15
-rw-r--r--fs/minix/inode.c13
-rw-r--r--fs/namei.c11
-rw-r--r--fs/nfs/file.c8
-rw-r--r--fs/nfs/idmap.c4
-rw-r--r--fs/nfs/nfs4proc.c29
-rw-r--r--fs/nfs/nfs4xdr.c21
-rw-r--r--fs/nfs/read.c23
-rw-r--r--fs/partitions/sun.c2
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/udf/truncate.c64
-rw-r--r--fs/ufs/inode.c35
-rw-r--r--fs/ufs/truncate.c77
-rw-r--r--include/asm-arm/arch-s3c2410/dma.h1
-rw-r--r--include/asm-arm/arch-s3c2410/regs-rtc.h2
-rw-r--r--include/asm-arm/procinfo.h1
-rw-r--r--include/asm-i386/mmzone.h2
-rw-r--r--include/asm-powerpc/pgalloc.h2
-rw-r--r--include/asm-powerpc/system.h9
-rw-r--r--include/asm-powerpc/tsi108.h14
-rw-r--r--include/asm-powerpc/tsi108_irq.h124
-rw-r--r--include/asm-sparc64/pgtable.h2
-rw-r--r--include/linux/compat_ioctl.h1
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/ioprio.h23
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/netfilter_bridge.h14
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/node.h10
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h4
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/vt.h1
-rw-r--r--include/net/sctp/sctp.h13
-rw-r--r--include/net/sctp/sm.h3
-rw-r--r--include/scsi/libiscsi.h19
-rw-r--r--include/scsi/scsi_transport_iscsi.h4
-rw-r--r--kernel/cpuset.c35
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/stop_machine.c1
-rw-r--r--lib/ts_bm.c11
-rw-r--r--mm/swapfile.c3
-rw-r--r--net/bridge/br_forward.c10
-rw-r--r--net/dccp/ccids/ccid3.c153
-rw-r--r--net/dccp/ccids/ccid3.h9
-rw-r--r--net/dccp/ccids/lib/loss_interval.c36
-rw-r--r--net/dccp/ccids/lib/loss_interval.h9
-rw-r--r--net/dccp/ccids/lib/packet_history.c168
-rw-r--r--net/dccp/ccids/lib/packet_history.h17
-rw-r--r--net/dccp/ccids/lib/tfrc.h2
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c2
-rw-r--r--net/dccp/dccp.h10
-rw-r--r--net/dccp/options.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c3
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/sctp/sm_make_chunk.c30
-rw-r--r--net/sctp/sm_statefuns.c20
-rw-r--r--net/sctp/socket.c10
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c3
-rw-r--r--net/sunrpc/clnt.c30
-rw-r--r--net/sunrpc/rpc_pipe.c55
223 files changed, 5830 insertions, 3372 deletions
diff --git a/CREDITS b/CREDITS
index 29be6d1fdf49..0fe904ebb7c7 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2209,7 +2209,7 @@ S: (address available on request)
2209S: USA 2209S: USA
2210 2210
2211N: Ian McDonald 2211N: Ian McDonald
2212E: iam4@cs.waikato.ac.nz 2212E: ian.mcdonald@jandi.co.nz
2213E: imcdnzl@gmail.com 2213E: imcdnzl@gmail.com
2214W: http://wand.net.nz/~iam4 2214W: http://wand.net.nz/~iam4
2215W: http://imcdnzl.blogspot.com 2215W: http://imcdnzl.blogspot.com
diff --git a/Documentation/connector/ucon.c b/Documentation/connector/ucon.c
new file mode 100644
index 000000000000..d738cde2a8d5
--- /dev/null
+++ b/Documentation/connector/ucon.c
@@ -0,0 +1,206 @@
1/*
2 * ucon.c
3 *
4 * Copyright (c) 2004+ Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <asm/types.h>
23
24#include <sys/types.h>
25#include <sys/socket.h>
26#include <sys/poll.h>
27
28#include <linux/netlink.h>
29#include <linux/rtnetlink.h>
30
31#include <arpa/inet.h>
32
33#include <stdio.h>
34#include <stdlib.h>
35#include <unistd.h>
36#include <string.h>
37#include <errno.h>
38#include <time.h>
39
40#include <linux/connector.h>
41
42#define DEBUG
43#define NETLINK_CONNECTOR 11
44
45#ifdef DEBUG
46#define ulog(f, a...) fprintf(stdout, f, ##a)
47#else
48#define ulog(f, a...) do {} while (0)
49#endif
50
51static int need_exit;
52static __u32 seq;
53
54static int netlink_send(int s, struct cn_msg *msg)
55{
56 struct nlmsghdr *nlh;
57 unsigned int size;
58 int err;
59 char buf[128];
60 struct cn_msg *m;
61
62 size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len);
63
64 nlh = (struct nlmsghdr *)buf;
65 nlh->nlmsg_seq = seq++;
66 nlh->nlmsg_pid = getpid();
67 nlh->nlmsg_type = NLMSG_DONE;
68 nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
69 nlh->nlmsg_flags = 0;
70
71 m = NLMSG_DATA(nlh);
72#if 0
73 ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n",
74 __func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack);
75#endif
76 memcpy(m, msg, sizeof(*m) + msg->len);
77
78 err = send(s, nlh, size, 0);
79 if (err == -1)
80 ulog("Failed to send: %s [%d].\n",
81 strerror(errno), errno);
82
83 return err;
84}
85
86int main(int argc, char *argv[])
87{
88 int s;
89 char buf[1024];
90 int len;
91 struct nlmsghdr *reply;
92 struct sockaddr_nl l_local;
93 struct cn_msg *data;
94 FILE *out;
95 time_t tm;
96 struct pollfd pfd;
97
98 if (argc < 2)
99 out = stdout;
100 else {
101 out = fopen(argv[1], "a+");
102 if (!out) {
103 ulog("Unable to open %s for writing: %s\n",
104 argv[1], strerror(errno));
105 out = stdout;
106 }
107 }
108
109 memset(buf, 0, sizeof(buf));
110
111 s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
112 if (s == -1) {
113 perror("socket");
114 return -1;
115 }
116
117 l_local.nl_family = AF_NETLINK;
118 l_local.nl_groups = 0x123; /* bitmask of requested groups */
119 l_local.nl_pid = 0;
120
121 if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) {
122 perror("bind");
123 close(s);
124 return -1;
125 }
126
127#if 0
128 {
129 int on = 0x57; /* Additional group number */
130 setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on));
131 }
132#endif
133 if (0) {
134 int i, j;
135
136 memset(buf, 0, sizeof(buf));
137
138 data = (struct cn_msg *)buf;
139
140 data->id.idx = 0x123;
141 data->id.val = 0x456;
142 data->seq = seq++;
143 data->ack = 0;
144 data->len = 0;
145
146 for (j=0; j<10; ++j) {
147 for (i=0; i<1000; ++i) {
148 len = netlink_send(s, data);
149 }
150
151 ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val);
152 }
153
154 return 0;
155 }
156
157
158 pfd.fd = s;
159
160 while (!need_exit) {
161 pfd.events = POLLIN;
162 pfd.revents = 0;
163 switch (poll(&pfd, 1, -1)) {
164 case 0:
165 need_exit = 1;
166 break;
167 case -1:
168 if (errno != EINTR) {
169 need_exit = 1;
170 break;
171 }
172 continue;
173 }
174 if (need_exit)
175 break;
176
177 memset(buf, 0, sizeof(buf));
178 len = recv(s, buf, sizeof(buf), 0);
179 if (len == -1) {
180 perror("recv buf");
181 close(s);
182 return -1;
183 }
184 reply = (struct nlmsghdr *)buf;
185
186 switch (reply->nlmsg_type) {
187 case NLMSG_ERROR:
188 fprintf(out, "Error message received.\n");
189 fflush(out);
190 break;
191 case NLMSG_DONE:
192 data = (struct cn_msg *)NLMSG_DATA(reply);
193
194 time(&tm);
195 fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n",
196 ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack);
197 fflush(out);
198 break;
199 default:
200 break;
201 }
202 }
203
204 close(s);
205 return 0;
206}
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index 159e2a0c3e80..76b44290c154 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -217,6 +217,12 @@ exclusive cpuset. Also, the use of a Linux virtual file system (vfs)
217to represent the cpuset hierarchy provides for a familiar permission 217to represent the cpuset hierarchy provides for a familiar permission
218and name space for cpusets, with a minimum of additional kernel code. 218and name space for cpusets, with a minimum of additional kernel code.
219 219
220The cpus file in the root (top_cpuset) cpuset is read-only.
221It automatically tracks the value of cpu_online_map, using a CPU
222hotplug notifier. If and when memory nodes can be hotplugged,
223we expect to make the mems file in the root cpuset read-only
224as well, and have it track the value of node_online_map.
225
220 226
2211.4 What are exclusive cpusets ? 2271.4 What are exclusive cpusets ?
222-------------------------------- 228--------------------------------
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 66fdc0744fe0..16dec61d7671 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -62,8 +62,8 @@ ramfs-rootfs-initramfs.txt
62 - info on the 'in memory' filesystems ramfs, rootfs and initramfs. 62 - info on the 'in memory' filesystems ramfs, rootfs and initramfs.
63reiser4.txt 63reiser4.txt
64 - info on the Reiser4 filesystem based on dancing tree algorithms. 64 - info on the Reiser4 filesystem based on dancing tree algorithms.
65relayfs.txt 65relay.txt
66 - info on relayfs, for efficient streaming from kernel to user space. 66 - info on relay, for efficient streaming from kernel to user space.
67romfs.txt 67romfs.txt
68 - description of the ROMFS filesystem. 68 - description of the ROMFS filesystem.
69smbfs.txt 69smbfs.txt
diff --git a/Documentation/filesystems/relay.txt b/Documentation/filesystems/relay.txt
new file mode 100644
index 000000000000..d6788dae0349
--- /dev/null
+++ b/Documentation/filesystems/relay.txt
@@ -0,0 +1,479 @@
1relay interface (formerly relayfs)
2==================================
3
4The relay interface provides a means for kernel applications to
5efficiently log and transfer large quantities of data from the kernel
6to userspace via user-defined 'relay channels'.
7
8A 'relay channel' is a kernel->user data relay mechanism implemented
9as a set of per-cpu kernel buffers ('channel buffers'), each
10represented as a regular file ('relay file') in user space. Kernel
11clients write into the channel buffers using efficient write
12functions; these automatically log into the current cpu's channel
13buffer. User space applications mmap() or read() from the relay files
14and retrieve the data as it becomes available. The relay files
15themselves are files created in a host filesystem, e.g. debugfs, and
16are associated with the channel buffers using the API described below.
17
18The format of the data logged into the channel buffers is completely
19up to the kernel client; the relay interface does however provide
20hooks which allow kernel clients to impose some structure on the
21buffer data. The relay interface doesn't implement any form of data
22filtering - this also is left to the kernel client. The purpose is to
23keep things as simple as possible.
24
25This document provides an overview of the relay interface API. The
26details of the function parameters are documented along with the
27functions in the relay interface code - please see that for details.
28
29Semantics
30=========
31
32Each relay channel has one buffer per CPU, each buffer has one or more
33sub-buffers. Messages are written to the first sub-buffer until it is
34too full to contain a new message, in which case it it is written to
35the next (if available). Messages are never split across sub-buffers.
36At this point, userspace can be notified so it empties the first
37sub-buffer, while the kernel continues writing to the next.
38
39When notified that a sub-buffer is full, the kernel knows how many
40bytes of it are padding i.e. unused space occurring because a complete
41message couldn't fit into a sub-buffer. Userspace can use this
42knowledge to copy only valid data.
43
44After copying it, userspace can notify the kernel that a sub-buffer
45has been consumed.
46
47A relay channel can operate in a mode where it will overwrite data not
48yet collected by userspace, and not wait for it to be consumed.
49
50The relay channel itself does not provide for communication of such
51data between userspace and kernel, allowing the kernel side to remain
52simple and not impose a single interface on userspace. It does
53provide a set of examples and a separate helper though, described
54below.
55
56The read() interface both removes padding and internally consumes the
57read sub-buffers; thus in cases where read(2) is being used to drain
58the channel buffers, special-purpose communication between kernel and
59user isn't necessary for basic operation.
60
61One of the major goals of the relay interface is to provide a low
62overhead mechanism for conveying kernel data to userspace. While the
63read() interface is easy to use, it's not as efficient as the mmap()
64approach; the example code attempts to make the tradeoff between the
65two approaches as small as possible.
66
67klog and relay-apps example code
68================================
69
70The relay interface itself is ready to use, but to make things easier,
71a couple simple utility functions and a set of examples are provided.
72
73The relay-apps example tarball, available on the relay sourceforge
74site, contains a set of self-contained examples, each consisting of a
75pair of .c files containing boilerplate code for each of the user and
76kernel sides of a relay application. When combined these two sets of
77boilerplate code provide glue to easily stream data to disk, without
78having to bother with mundane housekeeping chores.
79
80The 'klog debugging functions' patch (klog.patch in the relay-apps
81tarball) provides a couple of high-level logging functions to the
82kernel which allow writing formatted text or raw data to a channel,
83regardless of whether a channel to write into exists or not, or even
84whether the relay interface is compiled into the kernel or not. These
85functions allow you to put unconditional 'trace' statements anywhere
86in the kernel or kernel modules; only when there is a 'klog handler'
87registered will data actually be logged (see the klog and kleak
88examples for details).
89
90It is of course possible to use the relay interface from scratch,
91i.e. without using any of the relay-apps example code or klog, but
92you'll have to implement communication between userspace and kernel,
93allowing both to convey the state of buffers (full, empty, amount of
94padding). The read() interface both removes padding and internally
95consumes the read sub-buffers; thus in cases where read(2) is being
96used to drain the channel buffers, special-purpose communication
97between kernel and user isn't necessary for basic operation. Things
98such as buffer-full conditions would still need to be communicated via
99some channel though.
100
101klog and the relay-apps examples can be found in the relay-apps
102tarball on http://relayfs.sourceforge.net
103
104The relay interface user space API
105==================================
106
107The relay interface implements basic file operations for user space
108access to relay channel buffer data. Here are the file operations
109that are available and some comments regarding their behavior:
110
111open() enables user to open an _existing_ channel buffer.
112
113mmap() results in channel buffer being mapped into the caller's
114 memory space. Note that you can't do a partial mmap - you
115 must map the entire file, which is NRBUF * SUBBUFSIZE.
116
117read() read the contents of a channel buffer. The bytes read are
118 'consumed' by the reader, i.e. they won't be available
119 again to subsequent reads. If the channel is being used
120 in no-overwrite mode (the default), it can be read at any
121 time even if there's an active kernel writer. If the
122 channel is being used in overwrite mode and there are
123 active channel writers, results may be unpredictable -
124 users should make sure that all logging to the channel has
125 ended before using read() with overwrite mode. Sub-buffer
126 padding is automatically removed and will not be seen by
127 the reader.
128
129sendfile() transfer data from a channel buffer to an output file
130 descriptor. Sub-buffer padding is automatically removed
131 and will not be seen by the reader.
132
133poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
134 notified when sub-buffer boundaries are crossed.
135
136close() decrements the channel buffer's refcount. When the refcount
137 reaches 0, i.e. when no process or kernel client has the
138 buffer open, the channel buffer is freed.
139
140In order for a user application to make use of relay files, the
141host filesystem must be mounted. For example,
142
143 mount -t debugfs debugfs /debug
144
145NOTE: the host filesystem doesn't need to be mounted for kernel
146 clients to create or use channels - it only needs to be
147 mounted when user space applications need access to the buffer
148 data.
149
150
151The relay interface kernel API
152==============================
153
154Here's a summary of the API the relay interface provides to in-kernel clients:
155
156TBD(curr. line MT:/API/)
157 channel management functions:
158
159 relay_open(base_filename, parent, subbuf_size, n_subbufs,
160 callbacks)
161 relay_close(chan)
162 relay_flush(chan)
163 relay_reset(chan)
164
165 channel management typically called on instigation of userspace:
166
167 relay_subbufs_consumed(chan, cpu, subbufs_consumed)
168
169 write functions:
170
171 relay_write(chan, data, length)
172 __relay_write(chan, data, length)
173 relay_reserve(chan, length)
174
175 callbacks:
176
177 subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
178 buf_mapped(buf, filp)
179 buf_unmapped(buf, filp)
180 create_buf_file(filename, parent, mode, buf, is_global)
181 remove_buf_file(dentry)
182
183 helper functions:
184
185 relay_buf_full(buf)
186 subbuf_start_reserve(buf, length)
187
188
189Creating a channel
190------------------
191
192relay_open() is used to create a channel, along with its per-cpu
193channel buffers. Each channel buffer will have an associated file
194created for it in the host filesystem, which can be and mmapped or
195read from in user space. The files are named basename0...basenameN-1
196where N is the number of online cpus, and by default will be created
197in the root of the filesystem (if the parent param is NULL). If you
198want a directory structure to contain your relay files, you should
199create it using the host filesystem's directory creation function,
200e.g. debugfs_create_dir(), and pass the parent directory to
201relay_open(). Users are responsible for cleaning up any directory
202structure they create, when the channel is closed - again the host
203filesystem's directory removal functions should be used for that,
204e.g. debugfs_remove().
205
206In order for a channel to be created and the host filesystem's files
207associated with its channel buffers, the user must provide definitions
208for two callback functions, create_buf_file() and remove_buf_file().
209create_buf_file() is called once for each per-cpu buffer from
210relay_open() and allows the user to create the file which will be used
211to represent the corresponding channel buffer. The callback should
212return the dentry of the file created to represent the channel buffer.
213remove_buf_file() must also be defined; it's responsible for deleting
214the file(s) created in create_buf_file() and is called during
215relay_close().
216
217Here are some typical definitions for these callbacks, in this case
218using debugfs:
219
220/*
221 * create_buf_file() callback. Creates relay file in debugfs.
222 */
223static struct dentry *create_buf_file_handler(const char *filename,
224 struct dentry *parent,
225 int mode,
226 struct rchan_buf *buf,
227 int *is_global)
228{
229 return debugfs_create_file(filename, mode, parent, buf,
230 &relay_file_operations);
231}
232
233/*
234 * remove_buf_file() callback. Removes relay file from debugfs.
235 */
236static int remove_buf_file_handler(struct dentry *dentry)
237{
238 debugfs_remove(dentry);
239
240 return 0;
241}
242
243/*
244 * relay interface callbacks
245 */
246static struct rchan_callbacks relay_callbacks =
247{
248 .create_buf_file = create_buf_file_handler,
249 .remove_buf_file = remove_buf_file_handler,
250};
251
252And an example relay_open() invocation using them:
253
254 chan = relay_open("cpu", NULL, SUBBUF_SIZE, N_SUBBUFS, &relay_callbacks);
255
256If the create_buf_file() callback fails, or isn't defined, channel
257creation and thus relay_open() will fail.
258
259The total size of each per-cpu buffer is calculated by multiplying the
260number of sub-buffers by the sub-buffer size passed into relay_open().
261The idea behind sub-buffers is that they're basically an extension of
262double-buffering to N buffers, and they also allow applications to
263easily implement random-access-on-buffer-boundary schemes, which can
264be important for some high-volume applications. The number and size
265of sub-buffers is completely dependent on the application and even for
266the same application, different conditions will warrant different
267values for these parameters at different times. Typically, the right
268values to use are best decided after some experimentation; in general,
269though, it's safe to assume that having only 1 sub-buffer is a bad
270idea - you're guaranteed to either overwrite data or lose events
271depending on the channel mode being used.
272
273The create_buf_file() implementation can also be defined in such a way
274as to allow the creation of a single 'global' buffer instead of the
275default per-cpu set. This can be useful for applications interested
276mainly in seeing the relative ordering of system-wide events without
277the need to bother with saving explicit timestamps for the purpose of
278merging/sorting per-cpu files in a postprocessing step.
279
280To have relay_open() create a global buffer, the create_buf_file()
281implementation should set the value of the is_global outparam to a
282non-zero value in addition to creating the file that will be used to
283represent the single buffer. In the case of a global buffer,
284create_buf_file() and remove_buf_file() will be called only once. The
285normal channel-writing functions, e.g. relay_write(), can still be
286used - writes from any cpu will transparently end up in the global
287buffer - but since it is a global buffer, callers should make sure
288they use the proper locking for such a buffer, either by wrapping
289writes in a spinlock, or by copying a write function from relay.h and
290creating a local version that internally does the proper locking.
291
292Channel 'modes'
293---------------
294
295relay channels can be used in either of two modes - 'overwrite' or
296'no-overwrite'. The mode is entirely determined by the implementation
297of the subbuf_start() callback, as described below. The default if no
298subbuf_start() callback is defined is 'no-overwrite' mode. If the
299default mode suits your needs, and you plan to use the read()
300interface to retrieve channel data, you can ignore the details of this
301section, as it pertains mainly to mmap() implementations.
302
303In 'overwrite' mode, also known as 'flight recorder' mode, writes
304continuously cycle around the buffer and will never fail, but will
305unconditionally overwrite old data regardless of whether it's actually
306been consumed. In no-overwrite mode, writes will fail, i.e. data will
307be lost, if the number of unconsumed sub-buffers equals the total
308number of sub-buffers in the channel. It should be clear that if
309there is no consumer or if the consumer can't consume sub-buffers fast
310enough, data will be lost in either case; the only difference is
311whether data is lost from the beginning or the end of a buffer.
312
313As explained above, a relay channel is made of up one or more
314per-cpu channel buffers, each implemented as a circular buffer
315subdivided into one or more sub-buffers. Messages are written into
316the current sub-buffer of the channel's current per-cpu buffer via the
317write functions described below. Whenever a message can't fit into
318the current sub-buffer, because there's no room left for it, the
319client is notified via the subbuf_start() callback that a switch to a
320new sub-buffer is about to occur. The client uses this callback to 1)
321initialize the next sub-buffer if appropriate 2) finalize the previous
322sub-buffer if appropriate and 3) return a boolean value indicating
323whether or not to actually move on to the next sub-buffer.
324
325To implement 'no-overwrite' mode, the userspace client would provide
326an implementation of the subbuf_start() callback something like the
327following:
328
329static int subbuf_start(struct rchan_buf *buf,
330 void *subbuf,
331 void *prev_subbuf,
332 unsigned int prev_padding)
333{
334 if (prev_subbuf)
335 *((unsigned *)prev_subbuf) = prev_padding;
336
337 if (relay_buf_full(buf))
338 return 0;
339
340 subbuf_start_reserve(buf, sizeof(unsigned int));
341
342 return 1;
343}
344
345If the current buffer is full, i.e. all sub-buffers remain unconsumed,
346the callback returns 0 to indicate that the buffer switch should not
347occur yet, i.e. until the consumer has had a chance to read the
348current set of ready sub-buffers. For the relay_buf_full() function
349to make sense, the consumer is reponsible for notifying the relay
350interface when sub-buffers have been consumed via
351relay_subbufs_consumed(). Any subsequent attempts to write into the
352buffer will again invoke the subbuf_start() callback with the same
353parameters; only when the consumer has consumed one or more of the
354ready sub-buffers will relay_buf_full() return 0, in which case the
355buffer switch can continue.
356
357The implementation of the subbuf_start() callback for 'overwrite' mode
358would be very similar:
359
360static int subbuf_start(struct rchan_buf *buf,
361 void *subbuf,
362 void *prev_subbuf,
363 unsigned int prev_padding)
364{
365 if (prev_subbuf)
366 *((unsigned *)prev_subbuf) = prev_padding;
367
368 subbuf_start_reserve(buf, sizeof(unsigned int));
369
370 return 1;
371}
372
373In this case, the relay_buf_full() check is meaningless and the
374callback always returns 1, causing the buffer switch to occur
375unconditionally. It's also meaningless for the client to use the
376relay_subbufs_consumed() function in this mode, as it's never
377consulted.
378
379The default subbuf_start() implementation, used if the client doesn't
380define any callbacks, or doesn't define the subbuf_start() callback,
381implements the simplest possible 'no-overwrite' mode, i.e. it does
382nothing but return 0.
383
384Header information can be reserved at the beginning of each sub-buffer
385by calling the subbuf_start_reserve() helper function from within the
386subbuf_start() callback. This reserved area can be used to store
387whatever information the client wants. In the example above, room is
388reserved in each sub-buffer to store the padding count for that
389sub-buffer. This is filled in for the previous sub-buffer in the
390subbuf_start() implementation; the padding value for the previous
391sub-buffer is passed into the subbuf_start() callback along with a
392pointer to the previous sub-buffer, since the padding value isn't
393known until a sub-buffer is filled. The subbuf_start() callback is
394also called for the first sub-buffer when the channel is opened, to
395give the client a chance to reserve space in it. In this case the
396previous sub-buffer pointer passed into the callback will be NULL, so
397the client should check the value of the prev_subbuf pointer before
398writing into the previous sub-buffer.
399
400Writing to a channel
401--------------------
402
403Kernel clients write data into the current cpu's channel buffer using
404relay_write() or __relay_write(). relay_write() is the main logging
405function - it uses local_irqsave() to protect the buffer and should be
406used if you might be logging from interrupt context. If you know
407you'll never be logging from interrupt context, you can use
408__relay_write(), which only disables preemption. These functions
409don't return a value, so you can't determine whether or not they
410failed - the assumption is that you wouldn't want to check a return
411value in the fast logging path anyway, and that they'll always succeed
412unless the buffer is full and no-overwrite mode is being used, in
413which case you can detect a failed write in the subbuf_start()
414callback by calling the relay_buf_full() helper function.
415
416relay_reserve() is used to reserve a slot in a channel buffer which
417can be written to later. This would typically be used in applications
418that need to write directly into a channel buffer without having to
419stage data in a temporary buffer beforehand. Because the actual write
420may not happen immediately after the slot is reserved, applications
421using relay_reserve() can keep a count of the number of bytes actually
422written, either in space reserved in the sub-buffers themselves or as
423a separate array. See the 'reserve' example in the relay-apps tarball
424at http://relayfs.sourceforge.net for an example of how this can be
425done. Because the write is under control of the client and is
426separated from the reserve, relay_reserve() doesn't protect the buffer
427at all - it's up to the client to provide the appropriate
428synchronization when using relay_reserve().
429
430Closing a channel
431-----------------
432
433The client calls relay_close() when it's finished using the channel.
434The channel and its associated buffers are destroyed when there are no
435longer any references to any of the channel buffers. relay_flush()
436forces a sub-buffer switch on all the channel buffers, and can be used
437to finalize and process the last sub-buffers before the channel is
438closed.
439
440Misc
441----
442
443Some applications may want to keep a channel around and re-use it
444rather than open and close a new channel for each use. relay_reset()
445can be used for this purpose - it resets a channel to its initial
446state without reallocating channel buffer memory or destroying
447existing mappings. It should however only be called when it's safe to
448do so, i.e. when the channel isn't currently being written to.
449
450Finally, there are a couple of utility callbacks that can be used for
451different purposes. buf_mapped() is called whenever a channel buffer
452is mmapped from user space and buf_unmapped() is called when it's
453unmapped. The client can use this notification to trigger actions
454within the kernel application, such as enabling/disabling logging to
455the channel.
456
457
458Resources
459=========
460
461For news, example code, mailing list, etc. see the relay interface homepage:
462
463 http://relayfs.sourceforge.net
464
465
466Credits
467=======
468
469The ideas and specs for the relay interface came about as a result of
470discussions on tracing involving the following:
471
472Michel Dagenais <michel.dagenais@polymtl.ca>
473Richard Moore <richardj_moore@uk.ibm.com>
474Bob Wisniewski <bob@watson.ibm.com>
475Karim Yaghmour <karim@opersys.com>
476Tom Zanussi <zanussi@us.ibm.com>
477
478Also thanks to Hubertus Franke for a lot of useful suggestions and bug
479reports.
diff --git a/Documentation/filesystems/relayfs.txt b/Documentation/filesystems/relayfs.txt
deleted file mode 100644
index 5832377b7340..000000000000
--- a/Documentation/filesystems/relayfs.txt
+++ /dev/null
@@ -1,442 +0,0 @@
1
2relayfs - a high-speed data relay filesystem
3============================================
4
5relayfs is a filesystem designed to provide an efficient mechanism for
6tools and facilities to relay large and potentially sustained streams
7of data from kernel space to user space.
8
9The main abstraction of relayfs is the 'channel'. A channel consists
10of a set of per-cpu kernel buffers each represented by a file in the
11relayfs filesystem. Kernel clients write into a channel using
12efficient write functions which automatically log to the current cpu's
13channel buffer. User space applications mmap() the per-cpu files and
14retrieve the data as it becomes available.
15
16The format of the data logged into the channel buffers is completely
17up to the relayfs client; relayfs does however provide hooks which
18allow clients to impose some structure on the buffer data. Nor does
19relayfs implement any form of data filtering - this also is left to
20the client. The purpose is to keep relayfs as simple as possible.
21
22This document provides an overview of the relayfs API. The details of
23the function parameters are documented along with the functions in the
24filesystem code - please see that for details.
25
26Semantics
27=========
28
29Each relayfs channel has one buffer per CPU, each buffer has one or
30more sub-buffers. Messages are written to the first sub-buffer until
31it is too full to contain a new message, in which case it it is
32written to the next (if available). Messages are never split across
33sub-buffers. At this point, userspace can be notified so it empties
34the first sub-buffer, while the kernel continues writing to the next.
35
36When notified that a sub-buffer is full, the kernel knows how many
37bytes of it are padding i.e. unused. Userspace can use this knowledge
38to copy only valid data.
39
40After copying it, userspace can notify the kernel that a sub-buffer
41has been consumed.
42
43relayfs can operate in a mode where it will overwrite data not yet
44collected by userspace, and not wait for it to consume it.
45
46relayfs itself does not provide for communication of such data between
47userspace and kernel, allowing the kernel side to remain simple and
48not impose a single interface on userspace. It does provide a set of
49examples and a separate helper though, described below.
50
51klog and relay-apps example code
52================================
53
54relayfs itself is ready to use, but to make things easier, a couple
55simple utility functions and a set of examples are provided.
56
57The relay-apps example tarball, available on the relayfs sourceforge
58site, contains a set of self-contained examples, each consisting of a
59pair of .c files containing boilerplate code for each of the user and
60kernel sides of a relayfs application; combined these two sets of
61boilerplate code provide glue to easily stream data to disk, without
62having to bother with mundane housekeeping chores.
63
64The 'klog debugging functions' patch (klog.patch in the relay-apps
65tarball) provides a couple of high-level logging functions to the
66kernel which allow writing formatted text or raw data to a channel,
67regardless of whether a channel to write into exists or not, or
68whether relayfs is compiled into the kernel or is configured as a
69module. These functions allow you to put unconditional 'trace'
70statements anywhere in the kernel or kernel modules; only when there
71is a 'klog handler' registered will data actually be logged (see the
72klog and kleak examples for details).
73
74It is of course possible to use relayfs from scratch i.e. without
75using any of the relay-apps example code or klog, but you'll have to
76implement communication between userspace and kernel, allowing both to
77convey the state of buffers (full, empty, amount of padding).
78
79klog and the relay-apps examples can be found in the relay-apps
80tarball on http://relayfs.sourceforge.net
81
82
83The relayfs user space API
84==========================
85
86relayfs implements basic file operations for user space access to
87relayfs channel buffer data. Here are the file operations that are
88available and some comments regarding their behavior:
89
90open() enables user to open an _existing_ buffer.
91
92mmap() results in channel buffer being mapped into the caller's
93 memory space. Note that you can't do a partial mmap - you must
94 map the entire file, which is NRBUF * SUBBUFSIZE.
95
96read() read the contents of a channel buffer. The bytes read are
97 'consumed' by the reader i.e. they won't be available again
98 to subsequent reads. If the channel is being used in
99 no-overwrite mode (the default), it can be read at any time
100 even if there's an active kernel writer. If the channel is
101 being used in overwrite mode and there are active channel
102 writers, results may be unpredictable - users should make
103 sure that all logging to the channel has ended before using
104 read() with overwrite mode.
105
106poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
107 notified when sub-buffer boundaries are crossed.
108
109close() decrements the channel buffer's refcount. When the refcount
110 reaches 0 i.e. when no process or kernel client has the buffer
111 open, the channel buffer is freed.
112
113
114In order for a user application to make use of relayfs files, the
115relayfs filesystem must be mounted. For example,
116
117 mount -t relayfs relayfs /mnt/relay
118
119NOTE: relayfs doesn't need to be mounted for kernel clients to create
120 or use channels - it only needs to be mounted when user space
121 applications need access to the buffer data.
122
123
124The relayfs kernel API
125======================
126
127Here's a summary of the API relayfs provides to in-kernel clients:
128
129
130 channel management functions:
131
132 relay_open(base_filename, parent, subbuf_size, n_subbufs,
133 callbacks)
134 relay_close(chan)
135 relay_flush(chan)
136 relay_reset(chan)
137 relayfs_create_dir(name, parent)
138 relayfs_remove_dir(dentry)
139 relayfs_create_file(name, parent, mode, fops, data)
140 relayfs_remove_file(dentry)
141
142 channel management typically called on instigation of userspace:
143
144 relay_subbufs_consumed(chan, cpu, subbufs_consumed)
145
146 write functions:
147
148 relay_write(chan, data, length)
149 __relay_write(chan, data, length)
150 relay_reserve(chan, length)
151
152 callbacks:
153
154 subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
155 buf_mapped(buf, filp)
156 buf_unmapped(buf, filp)
157 create_buf_file(filename, parent, mode, buf, is_global)
158 remove_buf_file(dentry)
159
160 helper functions:
161
162 relay_buf_full(buf)
163 subbuf_start_reserve(buf, length)
164
165
166Creating a channel
167------------------
168
169relay_open() is used to create a channel, along with its per-cpu
170channel buffers. Each channel buffer will have an associated file
171created for it in the relayfs filesystem, which can be opened and
172mmapped from user space if desired. The files are named
173basename0...basenameN-1 where N is the number of online cpus, and by
174default will be created in the root of the filesystem. If you want a
175directory structure to contain your relayfs files, you can create it
176with relayfs_create_dir() and pass the parent directory to
177relay_open(). Clients are responsible for cleaning up any directory
178structure they create when the channel is closed - use
179relayfs_remove_dir() for that.
180
181The total size of each per-cpu buffer is calculated by multiplying the
182number of sub-buffers by the sub-buffer size passed into relay_open().
183The idea behind sub-buffers is that they're basically an extension of
184double-buffering to N buffers, and they also allow applications to
185easily implement random-access-on-buffer-boundary schemes, which can
186be important for some high-volume applications. The number and size
187of sub-buffers is completely dependent on the application and even for
188the same application, different conditions will warrant different
189values for these parameters at different times. Typically, the right
190values to use are best decided after some experimentation; in general,
191though, it's safe to assume that having only 1 sub-buffer is a bad
192idea - you're guaranteed to either overwrite data or lose events
193depending on the channel mode being used.
194
195Channel 'modes'
196---------------
197
198relayfs channels can be used in either of two modes - 'overwrite' or
199'no-overwrite'. The mode is entirely determined by the implementation
200of the subbuf_start() callback, as described below. In 'overwrite'
201mode, also known as 'flight recorder' mode, writes continuously cycle
202around the buffer and will never fail, but will unconditionally
203overwrite old data regardless of whether it's actually been consumed.
204In no-overwrite mode, writes will fail i.e. data will be lost, if the
205number of unconsumed sub-buffers equals the total number of
206sub-buffers in the channel. It should be clear that if there is no
207consumer or if the consumer can't consume sub-buffers fast enought,
208data will be lost in either case; the only difference is whether data
209is lost from the beginning or the end of a buffer.
210
211As explained above, a relayfs channel is made of up one or more
212per-cpu channel buffers, each implemented as a circular buffer
213subdivided into one or more sub-buffers. Messages are written into
214the current sub-buffer of the channel's current per-cpu buffer via the
215write functions described below. Whenever a message can't fit into
216the current sub-buffer, because there's no room left for it, the
217client is notified via the subbuf_start() callback that a switch to a
218new sub-buffer is about to occur. The client uses this callback to 1)
219initialize the next sub-buffer if appropriate 2) finalize the previous
220sub-buffer if appropriate and 3) return a boolean value indicating
221whether or not to actually go ahead with the sub-buffer switch.
222
223To implement 'no-overwrite' mode, the userspace client would provide
224an implementation of the subbuf_start() callback something like the
225following:
226
227static int subbuf_start(struct rchan_buf *buf,
228 void *subbuf,
229 void *prev_subbuf,
230 unsigned int prev_padding)
231{
232 if (prev_subbuf)
233 *((unsigned *)prev_subbuf) = prev_padding;
234
235 if (relay_buf_full(buf))
236 return 0;
237
238 subbuf_start_reserve(buf, sizeof(unsigned int));
239
240 return 1;
241}
242
243If the current buffer is full i.e. all sub-buffers remain unconsumed,
244the callback returns 0 to indicate that the buffer switch should not
245occur yet i.e. until the consumer has had a chance to read the current
246set of ready sub-buffers. For the relay_buf_full() function to make
247sense, the consumer is reponsible for notifying relayfs when
248sub-buffers have been consumed via relay_subbufs_consumed(). Any
249subsequent attempts to write into the buffer will again invoke the
250subbuf_start() callback with the same parameters; only when the
251consumer has consumed one or more of the ready sub-buffers will
252relay_buf_full() return 0, in which case the buffer switch can
253continue.
254
255The implementation of the subbuf_start() callback for 'overwrite' mode
256would be very similar:
257
258static int subbuf_start(struct rchan_buf *buf,
259 void *subbuf,
260 void *prev_subbuf,
261 unsigned int prev_padding)
262{
263 if (prev_subbuf)
264 *((unsigned *)prev_subbuf) = prev_padding;
265
266 subbuf_start_reserve(buf, sizeof(unsigned int));
267
268 return 1;
269}
270
271In this case, the relay_buf_full() check is meaningless and the
272callback always returns 1, causing the buffer switch to occur
273unconditionally. It's also meaningless for the client to use the
274relay_subbufs_consumed() function in this mode, as it's never
275consulted.
276
277The default subbuf_start() implementation, used if the client doesn't
278define any callbacks, or doesn't define the subbuf_start() callback,
279implements the simplest possible 'no-overwrite' mode i.e. it does
280nothing but return 0.
281
282Header information can be reserved at the beginning of each sub-buffer
283by calling the subbuf_start_reserve() helper function from within the
284subbuf_start() callback. This reserved area can be used to store
285whatever information the client wants. In the example above, room is
286reserved in each sub-buffer to store the padding count for that
287sub-buffer. This is filled in for the previous sub-buffer in the
288subbuf_start() implementation; the padding value for the previous
289sub-buffer is passed into the subbuf_start() callback along with a
290pointer to the previous sub-buffer, since the padding value isn't
291known until a sub-buffer is filled. The subbuf_start() callback is
292also called for the first sub-buffer when the channel is opened, to
293give the client a chance to reserve space in it. In this case the
294previous sub-buffer pointer passed into the callback will be NULL, so
295the client should check the value of the prev_subbuf pointer before
296writing into the previous sub-buffer.
297
298Writing to a channel
299--------------------
300
301kernel clients write data into the current cpu's channel buffer using
302relay_write() or __relay_write(). relay_write() is the main logging
303function - it uses local_irqsave() to protect the buffer and should be
304used if you might be logging from interrupt context. If you know
305you'll never be logging from interrupt context, you can use
306__relay_write(), which only disables preemption. These functions
307don't return a value, so you can't determine whether or not they
308failed - the assumption is that you wouldn't want to check a return
309value in the fast logging path anyway, and that they'll always succeed
310unless the buffer is full and no-overwrite mode is being used, in
311which case you can detect a failed write in the subbuf_start()
312callback by calling the relay_buf_full() helper function.
313
314relay_reserve() is used to reserve a slot in a channel buffer which
315can be written to later. This would typically be used in applications
316that need to write directly into a channel buffer without having to
317stage data in a temporary buffer beforehand. Because the actual write
318may not happen immediately after the slot is reserved, applications
319using relay_reserve() can keep a count of the number of bytes actually
320written, either in space reserved in the sub-buffers themselves or as
321a separate array. See the 'reserve' example in the relay-apps tarball
322at http://relayfs.sourceforge.net for an example of how this can be
323done. Because the write is under control of the client and is
324separated from the reserve, relay_reserve() doesn't protect the buffer
325at all - it's up to the client to provide the appropriate
326synchronization when using relay_reserve().
327
328Closing a channel
329-----------------
330
331The client calls relay_close() when it's finished using the channel.
332The channel and its associated buffers are destroyed when there are no
333longer any references to any of the channel buffers. relay_flush()
334forces a sub-buffer switch on all the channel buffers, and can be used
335to finalize and process the last sub-buffers before the channel is
336closed.
337
338Creating non-relay files
339------------------------
340
341relay_open() automatically creates files in the relayfs filesystem to
342represent the per-cpu kernel buffers; it's often useful for
343applications to be able to create their own files alongside the relay
344files in the relayfs filesystem as well e.g. 'control' files much like
345those created in /proc or debugfs for similar purposes, used to
346communicate control information between the kernel and user sides of a
347relayfs application. For this purpose the relayfs_create_file() and
348relayfs_remove_file() API functions exist. For relayfs_create_file(),
349the caller passes in a set of user-defined file operations to be used
350for the file and an optional void * to a user-specified data item,
351which will be accessible via inode->u.generic_ip (see the relay-apps
352tarball for examples). The file_operations are a required parameter
353to relayfs_create_file() and thus the semantics of these files are
354completely defined by the caller.
355
356See the relay-apps tarball at http://relayfs.sourceforge.net for
357examples of how these non-relay files are meant to be used.
358
359Creating relay files in other filesystems
360-----------------------------------------
361
362By default of course, relay_open() creates relay files in the relayfs
363filesystem. Because relay_file_operations is exported, however, it's
364also possible to create and use relay files in other pseudo-filesytems
365such as debugfs.
366
367For this purpose, two callback functions are provided,
368create_buf_file() and remove_buf_file(). create_buf_file() is called
369once for each per-cpu buffer from relay_open() to allow the client to
370create a file to be used to represent the corresponding buffer; if
371this callback is not defined, the default implementation will create
372and return a file in the relayfs filesystem to represent the buffer.
373The callback should return the dentry of the file created to represent
374the relay buffer. Note that the parent directory passed to
375relay_open() (and passed along to the callback), if specified, must
376exist in the same filesystem the new relay file is created in. If
377create_buf_file() is defined, remove_buf_file() must also be defined;
378it's responsible for deleting the file(s) created in create_buf_file()
379and is called during relay_close().
380
381The create_buf_file() implementation can also be defined in such a way
382as to allow the creation of a single 'global' buffer instead of the
383default per-cpu set. This can be useful for applications interested
384mainly in seeing the relative ordering of system-wide events without
385the need to bother with saving explicit timestamps for the purpose of
386merging/sorting per-cpu files in a postprocessing step.
387
388To have relay_open() create a global buffer, the create_buf_file()
389implementation should set the value of the is_global outparam to a
390non-zero value in addition to creating the file that will be used to
391represent the single buffer. In the case of a global buffer,
392create_buf_file() and remove_buf_file() will be called only once. The
393normal channel-writing functions e.g. relay_write() can still be used
394- writes from any cpu will transparently end up in the global buffer -
395but since it is a global buffer, callers should make sure they use the
396proper locking for such a buffer, either by wrapping writes in a
397spinlock, or by copying a write function from relayfs_fs.h and
398creating a local version that internally does the proper locking.
399
400See the 'exported-relayfile' examples in the relay-apps tarball for
401examples of creating and using relay files in debugfs.
402
403Misc
404----
405
406Some applications may want to keep a channel around and re-use it
407rather than open and close a new channel for each use. relay_reset()
408can be used for this purpose - it resets a channel to its initial
409state without reallocating channel buffer memory or destroying
410existing mappings. It should however only be called when it's safe to
411do so i.e. when the channel isn't currently being written to.
412
413Finally, there are a couple of utility callbacks that can be used for
414different purposes. buf_mapped() is called whenever a channel buffer
415is mmapped from user space and buf_unmapped() is called when it's
416unmapped. The client can use this notification to trigger actions
417within the kernel application, such as enabling/disabling logging to
418the channel.
419
420
421Resources
422=========
423
424For news, example code, mailing list, etc. see the relayfs homepage:
425
426 http://relayfs.sourceforge.net
427
428
429Credits
430=======
431
432The ideas and specs for relayfs came about as a result of discussions
433on tracing involving the following:
434
435Michel Dagenais <michel.dagenais@polymtl.ca>
436Richard Moore <richardj_moore@uk.ibm.com>
437Bob Wisniewski <bob@watson.ibm.com>
438Karim Yaghmour <karim@opersys.com>
439Tom Zanussi <zanussi@us.ibm.com>
440
441Also thanks to Hubertus Franke for a lot of useful suggestions and bug
442reports.
diff --git a/Documentation/input/joystick.txt b/Documentation/input/joystick.txt
index d53b857a3710..841c353297e6 100644
--- a/Documentation/input/joystick.txt
+++ b/Documentation/input/joystick.txt
@@ -39,7 +39,6 @@ them. Bug reports and success stories are also welcome.
39 39
40 The input project website is at: 40 The input project website is at:
41 41
42 http://www.suse.cz/development/input/
43 http://atrey.karlin.mff.cuni.cz/~vojtech/input/ 42 http://atrey.karlin.mff.cuni.cz/~vojtech/input/
44 43
45 There is also a mailing list for the driver at: 44 There is also a mailing list for the driver at:
diff --git a/Documentation/scsi/ChangeLog.megaraid b/Documentation/scsi/ChangeLog.megaraid
index c173806c91fa..a056bbe67c7e 100644
--- a/Documentation/scsi/ChangeLog.megaraid
+++ b/Documentation/scsi/ChangeLog.megaraid
@@ -1,3 +1,126 @@
1Release Date : Fri May 19 09:31:45 EST 2006 - Seokmann Ju <sju@lsil.com>
2Current Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module)
3Older Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
4
51. Fixed a bug in megaraid_init_mbox().
6 Customer reported "garbage in file on x86_64 platform".
7 Root Cause: the driver registered controllers as 64-bit DMA capable
8 for those which are not support it.
9 Fix: Made change in the function inserting identification machanism
10 identifying 64-bit DMA capable controllers.
11
12 > -----Original Message-----
13 > From: Vasily Averin [mailto:vvs@sw.ru]
14 > Sent: Thursday, May 04, 2006 2:49 PM
15 > To: linux-scsi@vger.kernel.org; Kolli, Neela; Mukker, Atul;
16 > Ju, Seokmann; Bagalkote, Sreenivas;
17 > James.Bottomley@SteelEye.com; devel@openvz.org
18 > Subject: megaraid_mbox: garbage in file
19 >
20 > Hello all,
21 >
22 > I've investigated customers claim on the unstable work of
23 > their node and found a
24 > strange effect: reading from some files leads to the
25 > "attempt to access beyond end of device" messages.
26 >
27 > I've checked filesystem, memory on the node, motherboard BIOS
28 > version, but it
29 > does not help and issue still has been reproduced by simple
30 > file reading.
31 >
32 > Reproducer is simple:
33 >
34 > echo 0xffffffff >/proc/sys/dev/scsi/logging_level ;
35 > cat /vz/private/101/root/etc/ld.so.cache >/tmp/ttt ;
36 > echo 0 >/proc/sys/dev/scsi/logging
37 >
38 > It leads to the following messages in dmesg
39 >
40 > sd_init_command: disk=sda, block=871769260, count=26
41 > sda : block=871769260
42 > sda : reading 26/26 512 byte blocks.
43 > scsi_add_timer: scmd: f79ed980, time: 7500, (c02b1420)
44 > sd 0:1:0:0: send 0xf79ed980 sd 0:1:0:0:
45 > command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00
46 > buffer = 0xf7cfb540, bufflen = 13312, done = 0xc0366b40,
47 > queuecommand 0xc0344010
48 > leaving scsi_dispatch_cmnd()
49 > scsi_delete_timer: scmd: f79ed980, rtn: 1
50 > sd 0:1:0:0: done 0xf79ed980 SUCCESS 0 sd 0:1:0:0:
51 > command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00
52 > scsi host busy 1 failed 0
53 > sd 0:1:0:0: Notifying upper driver of completion (result 0)
54 > sd_rw_intr: sda: res=0x0
55 > 26 sectors total, 13312 bytes done.
56 > use_sg is 4
57 > attempt to access beyond end of device
58 > sda6: rw=0, want=1044134458, limit=951401367
59 > Buffer I/O error on device sda6, logical block 522067228
60 > attempt to access beyond end of device
61
622. When INQUIRY with EVPD bit set issued to the MegaRAID controller,
63 system memory gets corrupted.
64 Root Cause: MegaRAID F/W handle the INQUIRY with EVPD bit set
65 incorrectly.
66 Fix: MegaRAID F/W has fixed the problem and being process of release,
67 soon. Meanwhile, driver will filter out the request.
68
693. One of member in the data structure of the driver leads unaligne
70 issue on 64-bit platform.
71 Customer reporeted "kernel unaligned access addrss" issue when
72 application communicates with MegaRAID HBA driver.
73 Root Cause: in uioc_t structure, one of member had misaligned and it
74 led system to display the error message.
75 Fix: A patch submitted to community from following folk.
76
77 > -----Original Message-----
78 > From: linux-scsi-owner@vger.kernel.org
79 > [mailto:linux-scsi-owner@vger.kernel.org] On Behalf Of Sakurai Hiroomi
80 > Sent: Wednesday, July 12, 2006 4:20 AM
81 > To: linux-scsi@vger.kernel.org; linux-kernel@vger.kernel.org
82 > Subject: Re: Help: strange messages from kernel on IA64 platform
83 >
84 > Hi,
85 >
86 > I saw same message.
87 >
88 > When GAM(Global Array Manager) is started, The following
89 > message output.
90 > kernel: kernel unaligned access to 0xe0000001fe1080d4,
91 > ip=0xa000000200053371
92 >
93 > The uioc structure used by ioctl is defined by packed,
94 > the allignment of each member are disturbed.
95 > In a 64 bit structure, the allignment of member doesn't fit 64 bit
96 > boundary. this causes this messages.
97 > In a 32 bit structure, we don't see the message because the allinment
98 > of member fit 32 bit boundary even if packed is specified.
99 >
100 > patch
101 > I Add 32 bit dummy member to fit 64 bit boundary. I tested.
102 > We confirmed this patch fix the problem by IA64 server.
103 >
104 > **************************************************************
105 > ****************
106 > --- linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h.orig
107 > 2006-04-03 17:13:03.000000000 +0900
108 > +++ linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h
109 > 2006-04-03 17:14:09.000000000 +0900
110 > @@ -132,6 +132,10 @@
111 > /* Driver Data: */
112 > void __user * user_data;
113 > uint32_t user_data_len;
114 > +
115 > + /* 64bit alignment */
116 > + uint32_t pad_0xBC;
117 > +
118 > mraid_passthru_t __user *user_pthru;
119 >
120 > mraid_passthru_t *pthru32;
121 > **************************************************************
122 > ****************
123
1Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com> 124Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com>
2Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module) 125Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
3Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module) 126Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 0b62c62142cf..5c3a51905969 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -25,6 +25,7 @@ Currently, these files are in /proc/sys/fs:
25- inode-state 25- inode-state
26- overflowuid 26- overflowuid
27- overflowgid 27- overflowgid
28- suid_dumpable
28- super-max 29- super-max
29- super-nr 30- super-nr
30 31
@@ -131,6 +132,25 @@ The default is 65534.
131 132
132============================================================== 133==============================================================
133 134
135suid_dumpable:
136
137This value can be used to query and set the core dump mode for setuid
138or otherwise protected/tainted binaries. The modes are
139
1400 - (default) - traditional behaviour. Any process which has changed
141 privilege levels or is execute only will not be dumped
1421 - (debug) - all processes dump core when possible. The core dump is
143 owned by the current user and no security is applied. This is
144 intended for system debugging situations only. Ptrace is unchecked.
1452 - (suidsafe) - any binary which normally would not be dumped is dumped
146 readable by root only. This allows the end user to remove
147 such a dump but not access it directly. For security reasons
148 core dumps in this mode will not overwrite one another or
149 other files. This mode is appropriate when adminstrators are
150 attempting to debug problems in a normal environment.
151
152==============================================================
153
134super-max & super-nr: 154super-max & super-nr:
135 155
136These numbers control the maximum number of superblocks, and 156These numbers control the maximum number of superblocks, and
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 7345c338080a..89bf8c20a586 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -50,7 +50,6 @@ show up in /proc/sys/kernel:
50- shmmax [ sysv ipc ] 50- shmmax [ sysv ipc ]
51- shmmni 51- shmmni
52- stop-a [ SPARC only ] 52- stop-a [ SPARC only ]
53- suid_dumpable
54- sysrq ==> Documentation/sysrq.txt 53- sysrq ==> Documentation/sysrq.txt
55- tainted 54- tainted
56- threads-max 55- threads-max
@@ -310,25 +309,6 @@ kernel. This value defaults to SHMMAX.
310 309
311============================================================== 310==============================================================
312 311
313suid_dumpable:
314
315This value can be used to query and set the core dump mode for setuid
316or otherwise protected/tainted binaries. The modes are
317
3180 - (default) - traditional behaviour. Any process which has changed
319 privilege levels or is execute only will not be dumped
3201 - (debug) - all processes dump core when possible. The core dump is
321 owned by the current user and no security is applied. This is
322 intended for system debugging situations only. Ptrace is unchecked.
3232 - (suidsafe) - any binary which normally would not be dumped is dumped
324 readable by root only. This allows the end user to remove
325 such a dump but not access it directly. For security reasons
326 core dumps in this mode will not overwrite one another or
327 other files. This mode is appropriate when adminstrators are
328 attempting to debug problems in a normal environment.
329
330==============================================================
331
332tainted: 312tainted:
333 313
334Non-zero if the kernel has been tainted. Numeric values, which 314Non-zero if the kernel has been tainted. Numeric values, which
diff --git a/MAINTAINERS b/MAINTAINERS
index d3315a552be1..3bbd7064f947 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -889,6 +889,12 @@ M: rdunlap@xenotime.net
889T: git http://tali.admingilde.org/git/linux-docbook.git 889T: git http://tali.admingilde.org/git/linux-docbook.git
890S: Maintained 890S: Maintained
891 891
892DOCKING STATION DRIVER
893P: Kristen Carlson Accardi
894M: kristen.c.accardi@intel.com
895L: linux-acpi@vger.kernel.org
896S: Maintained
897
892DOUBLETALK DRIVER 898DOUBLETALK DRIVER
893P: James R. Van Zandt 899P: James R. Van Zandt
894M: jrv@vanzandt.mv.com 900M: jrv@vanzandt.mv.com
diff --git a/Makefile b/Makefile
index 8406d02c6385..33559b566449 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 18 3SUBLEVEL = 18
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME=Crazed Snow-Weasel 5NAME=Crazed Snow-Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 5b7c26395b44..028bdc9228fb 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -179,17 +179,19 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
179static inline struct safe_buffer * 179static inline struct safe_buffer *
180find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) 180find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
181{ 181{
182 struct safe_buffer *b = NULL; 182 struct safe_buffer *b, *rb = NULL;
183 unsigned long flags; 183 unsigned long flags;
184 184
185 read_lock_irqsave(&device_info->lock, flags); 185 read_lock_irqsave(&device_info->lock, flags);
186 186
187 list_for_each_entry(b, &device_info->safe_buffers, node) 187 list_for_each_entry(b, &device_info->safe_buffers, node)
188 if (b->safe_dma_addr == safe_dma_addr) 188 if (b->safe_dma_addr == safe_dma_addr) {
189 rb = b;
189 break; 190 break;
191 }
190 192
191 read_unlock_irqrestore(&device_info->lock, flags); 193 read_unlock_irqrestore(&device_info->lock, flags);
192 return b; 194 return rb;
193} 195}
194 196
195static inline void 197static inline void
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7ea5f01dfc7b..de4e33137901 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -634,6 +634,14 @@ ENTRY(__switch_to)
634 * purpose. 634 * purpose.
635 */ 635 */
636 636
637 .macro usr_ret, reg
638#ifdef CONFIG_ARM_THUMB
639 bx \reg
640#else
641 mov pc, \reg
642#endif
643 .endm
644
637 .align 5 645 .align 5
638 .globl __kuser_helper_start 646 .globl __kuser_helper_start
639__kuser_helper_start: 647__kuser_helper_start:
@@ -675,7 +683,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
675#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP) 683#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
676 mcr p15, 0, r0, c7, c10, 5 @ dmb 684 mcr p15, 0, r0, c7, c10, 5 @ dmb
677#endif 685#endif
678 mov pc, lr 686 usr_ret lr
679 687
680 .align 5 688 .align 5
681 689
@@ -778,7 +786,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
778 mov r0, #-1 786 mov r0, #-1
779 adds r0, r0, #0 787 adds r0, r0, #0
780#endif 788#endif
781 mov pc, lr 789 usr_ret lr
782 790
783#else 791#else
784 792
@@ -792,7 +800,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
792#ifdef CONFIG_SMP 800#ifdef CONFIG_SMP
793 mcr p15, 0, r0, c7, c10, 5 @ dmb 801 mcr p15, 0, r0, c7, c10, 5 @ dmb
794#endif 802#endif
795 mov pc, lr 803 usr_ret lr
796 804
797#endif 805#endif
798 806
@@ -834,16 +842,11 @@ __kuser_cmpxchg: @ 0xffff0fc0
834__kuser_get_tls: @ 0xffff0fe0 842__kuser_get_tls: @ 0xffff0fe0
835 843
836#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) 844#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
837
838 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 845 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
839 mov pc, lr
840
841#else 846#else
842
843 mrc p15, 0, r0, c13, c0, 3 @ read TLS register 847 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
844 mov pc, lr
845
846#endif 848#endif
849 usr_ret lr
847 850
848 .rep 5 851 .rep 5
849 .word 0 @ pad up to __kuser_helper_version 852 .word 0 @ pad up to __kuser_helper_version
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 4fe386eea4b4..5365d4e5949e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -118,7 +118,7 @@ ENTRY(secondary_startup)
118 sub r4, r4, r5 @ mmu has been enabled 118 sub r4, r4, r5 @ mmu has been enabled
119 ldr r4, [r7, r4] @ get secondary_data.pgdir 119 ldr r4, [r7, r4] @ get secondary_data.pgdir
120 adr lr, __enable_mmu @ return address 120 adr lr, __enable_mmu @ return address
121 add pc, r10, #12 @ initialise processor 121 add pc, r10, #PROCINFO_INITFUNC @ initialise processor
122 @ (return control reg) 122 @ (return control reg)
123 123
124 /* 124 /*
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index 0c7938645df6..273e05f2b8de 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -10,45 +10,47 @@ obj-m :=
10obj-n := 10obj-n :=
11obj- := 11obj- :=
12 12
13# DMA
14obj-$(CONFIG_S3C2410_DMA) += dma.o
15
13# S3C2400 support files 16# S3C2400 support files
14obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o 17obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o
15 18
16# S3C2410 support files 19# S3C2410 support files
17 20
18obj-$(CONFIG_CPU_S3C2410) += s3c2410.o 21obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
19obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o 22obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o
20obj-$(CONFIG_S3C2410_DMA) += dma.o
21 23
22# Power Management support 24# Power Management support
23 25
24obj-$(CONFIG_PM) += pm.o sleep.o 26obj-$(CONFIG_PM) += pm.o sleep.o
25obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o 27obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
26 28
27# S3C2412 support 29# S3C2412 support
28obj-$(CONFIG_CPU_S3C2412) += s3c2412.o 30obj-$(CONFIG_CPU_S3C2412) += s3c2412.o
29obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o 31obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o
30 32
31# 33#
32# S3C244X support 34# S3C244X support
33 35
34obj-$(CONFIG_CPU_S3C244X) += s3c244x.o 36obj-$(CONFIG_CPU_S3C244X) += s3c244x.o
35obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o 37obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o
36 38
37# Clock control 39# Clock control
38 40
39obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o 41obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
40 42
41# S3C2440 support 43# S3C2440 support
42 44
43obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o 45obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
44obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o 46obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o
45obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o 47obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o
46obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o 48obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o
47 49
48# S3C2442 support 50# S3C2442 support
49 51
50obj-$(CONFIG_CPU_S3C2442) += s3c2442.o 52obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
51obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o 53obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o
52 54
53# bast extras 55# bast extras
54 56
diff --git a/arch/arm/mach-s3c2410/dma.c b/arch/arm/mach-s3c2410/dma.c
index 094cc52745c5..25855452fe8c 100644
--- a/arch/arm/mach-s3c2410/dma.c
+++ b/arch/arm/mach-s3c2410/dma.c
@@ -112,7 +112,7 @@ dmadbg_capture(s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs)
112} 112}
113 113
114static void 114static void
115dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan, 115dmadbg_dumpregs(const char *fname, int line, s3c2410_dma_chan_t *chan,
116 struct s3c2410_dma_regstate *regs) 116 struct s3c2410_dma_regstate *regs)
117{ 117{
118 printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n", 118 printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
@@ -132,7 +132,16 @@ dmadbg_showchan(const char *fname, int line, s3c2410_dma_chan_t *chan)
132 chan->number, fname, line, chan->load_state, 132 chan->number, fname, line, chan->load_state,
133 chan->curr, chan->next, chan->end); 133 chan->curr, chan->next, chan->end);
134 134
135 dmadbg_showregs(fname, line, chan, &state); 135 dmadbg_dumpregs(fname, line, chan, &state);
136}
137
138static void
139dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan)
140{
141 struct s3c2410_dma_regstate state;
142
143 dmadbg_capture(chan, &state);
144 dmadbg_dumpregs(fname, line, chan, &state);
136} 145}
137 146
138#define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan)) 147#define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan))
@@ -253,10 +262,14 @@ s3c2410_dma_loadbuffer(s3c2410_dma_chan_t *chan,
253 buf->next); 262 buf->next);
254 reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0; 263 reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
255 } else { 264 } else {
256 pr_debug("load_state is %d => autoreload\n", chan->load_state); 265 //pr_debug("load_state is %d => autoreload\n", chan->load_state);
257 reload = S3C2410_DCON_AUTORELOAD; 266 reload = S3C2410_DCON_AUTORELOAD;
258 } 267 }
259 268
269 if ((buf->data & 0xf0000000) != 0x30000000) {
270 dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
271 }
272
260 writel(buf->data, chan->addr_reg); 273 writel(buf->data, chan->addr_reg);
261 274
262 dma_wrreg(chan, S3C2410_DMA_DCON, 275 dma_wrreg(chan, S3C2410_DMA_DCON,
@@ -370,7 +383,7 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
370 tmp |= S3C2410_DMASKTRIG_ON; 383 tmp |= S3C2410_DMASKTRIG_ON;
371 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); 384 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
372 385
373 pr_debug("wrote %08lx to DMASKTRIG\n", tmp); 386 pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
374 387
375#if 0 388#if 0
376 /* the dma buffer loads should take care of clearing the AUTO 389 /* the dma buffer loads should take care of clearing the AUTO
@@ -384,7 +397,30 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
384 397
385 dbg_showchan(chan); 398 dbg_showchan(chan);
386 399
400 /* if we've only loaded one buffer onto the channel, then chec
401 * to see if we have another, and if so, try and load it so when
402 * the first buffer is finished, the new one will be loaded onto
403 * the channel */
404
405 if (chan->next != NULL) {
406 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
407
408 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
409 pr_debug("%s: buff not yet loaded, no more todo\n",
410 __FUNCTION__);
411 } else {
412 chan->load_state = S3C2410_DMALOAD_1RUNNING;
413 s3c2410_dma_loadbuffer(chan, chan->next);
414 }
415
416 } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
417 s3c2410_dma_loadbuffer(chan, chan->next);
418 }
419 }
420
421
387 local_irq_restore(flags); 422 local_irq_restore(flags);
423
388 return 0; 424 return 0;
389} 425}
390 426
@@ -436,12 +472,11 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id,
436 buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); 472 buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
437 if (buf == NULL) { 473 if (buf == NULL) {
438 pr_debug("%s: out of memory (%ld alloc)\n", 474 pr_debug("%s: out of memory (%ld alloc)\n",
439 __FUNCTION__, sizeof(*buf)); 475 __FUNCTION__, (long)sizeof(*buf));
440 return -ENOMEM; 476 return -ENOMEM;
441 } 477 }
442 478
443 pr_debug("%s: new buffer %p\n", __FUNCTION__, buf); 479 //pr_debug("%s: new buffer %p\n", __FUNCTION__, buf);
444
445 //dbg_showchan(chan); 480 //dbg_showchan(chan);
446 481
447 buf->next = NULL; 482 buf->next = NULL;
@@ -537,14 +572,20 @@ s3c2410_dma_lastxfer(s3c2410_dma_chan_t *chan)
537 case S3C2410_DMALOAD_1LOADED: 572 case S3C2410_DMALOAD_1LOADED:
538 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { 573 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
539 /* flag error? */ 574 /* flag error? */
540 printk(KERN_ERR "dma%d: timeout waiting for load\n", 575 printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
541 chan->number); 576 chan->number, __FUNCTION__);
542 return; 577 return;
543 } 578 }
544 break; 579 break;
545 580
581 case S3C2410_DMALOAD_1LOADED_1RUNNING:
582 /* I belive in this case we do not have anything to do
583 * until the next buffer comes along, and we turn off the
584 * reload */
585 return;
586
546 default: 587 default:
547 pr_debug("dma%d: lastxfer: unhandled load_state %d with no next", 588 pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
548 chan->number, chan->load_state); 589 chan->number, chan->load_state);
549 return; 590 return;
550 591
@@ -629,7 +670,14 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
629 } else { 670 } else {
630 } 671 }
631 672
632 if (chan->next != NULL) { 673 /* only reload if the channel is still running... our buffer done
674 * routine may have altered the state by requesting the dma channel
675 * to stop or shutdown... */
676
677 /* todo: check that when the channel is shut-down from inside this
678 * function, we cope with unsetting reload, etc */
679
680 if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
633 unsigned long flags; 681 unsigned long flags;
634 682
635 switch (chan->load_state) { 683 switch (chan->load_state) {
@@ -644,8 +692,8 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
644 case S3C2410_DMALOAD_1LOADED: 692 case S3C2410_DMALOAD_1LOADED:
645 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { 693 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
646 /* flag error? */ 694 /* flag error? */
647 printk(KERN_ERR "dma%d: timeout waiting for load\n", 695 printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
648 chan->number); 696 chan->number, __FUNCTION__);
649 return IRQ_HANDLED; 697 return IRQ_HANDLED;
650 } 698 }
651 699
@@ -678,8 +726,6 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
678 return IRQ_HANDLED; 726 return IRQ_HANDLED;
679} 727}
680 728
681
682
683/* s3c2410_request_dma 729/* s3c2410_request_dma
684 * 730 *
685 * get control of an dma channel 731 * get control of an dma channel
@@ -718,11 +764,17 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
718 pr_debug("dma%d: %s : requesting irq %d\n", 764 pr_debug("dma%d: %s : requesting irq %d\n",
719 channel, __FUNCTION__, chan->irq); 765 channel, __FUNCTION__, chan->irq);
720 766
767 chan->irq_claimed = 1;
768 local_irq_restore(flags);
769
721 err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED, 770 err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED,
722 client->name, (void *)chan); 771 client->name, (void *)chan);
723 772
773 local_irq_save(flags);
774
724 if (err) { 775 if (err) {
725 chan->in_use = 0; 776 chan->in_use = 0;
777 chan->irq_claimed = 0;
726 local_irq_restore(flags); 778 local_irq_restore(flags);
727 779
728 printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n", 780 printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
@@ -730,7 +782,6 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
730 return err; 782 return err;
731 } 783 }
732 784
733 chan->irq_claimed = 1;
734 chan->irq_enabled = 1; 785 chan->irq_enabled = 1;
735 } 786 }
736 787
@@ -810,6 +861,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
810 861
811 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); 862 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
812 tmp |= S3C2410_DMASKTRIG_STOP; 863 tmp |= S3C2410_DMASKTRIG_STOP;
864 //tmp &= ~S3C2410_DMASKTRIG_ON;
813 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); 865 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
814 866
815#if 0 867#if 0
@@ -819,6 +871,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
819 dma_wrreg(chan, S3C2410_DMA_DCON, tmp); 871 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
820#endif 872#endif
821 873
874 /* should stop do this, or should we wait for flush? */
822 chan->state = S3C2410_DMA_IDLE; 875 chan->state = S3C2410_DMA_IDLE;
823 chan->load_state = S3C2410_DMALOAD_NONE; 876 chan->load_state = S3C2410_DMALOAD_NONE;
824 877
@@ -827,6 +880,22 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
827 return 0; 880 return 0;
828} 881}
829 882
883void s3c2410_dma_waitforstop(s3c2410_dma_chan_t *chan)
884{
885 unsigned long tmp;
886 unsigned int timeout = 0x10000;
887
888 while (timeout-- > 0) {
889 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
890
891 if (!(tmp & S3C2410_DMASKTRIG_ON))
892 return;
893 }
894
895 pr_debug("dma%d: failed to stop?\n", chan->number);
896}
897
898
830/* s3c2410_dma_flush 899/* s3c2410_dma_flush
831 * 900 *
832 * stop the channel, and remove all current and pending transfers 901 * stop the channel, and remove all current and pending transfers
@@ -837,7 +906,9 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
837 s3c2410_dma_buf_t *buf, *next; 906 s3c2410_dma_buf_t *buf, *next;
838 unsigned long flags; 907 unsigned long flags;
839 908
840 pr_debug("%s:\n", __FUNCTION__); 909 pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number);
910
911 dbg_showchan(chan);
841 912
842 local_irq_save(flags); 913 local_irq_save(flags);
843 914
@@ -864,11 +935,64 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
864 } 935 }
865 } 936 }
866 937
938 dbg_showregs(chan);
939
940 s3c2410_dma_waitforstop(chan);
941
942#if 0
943 /* should also clear interrupts, according to WinCE BSP */
944 {
945 unsigned long tmp;
946
947 tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
948 tmp |= S3C2410_DCON_NORELOAD;
949 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
950 }
951#endif
952
953 dbg_showregs(chan);
954
867 local_irq_restore(flags); 955 local_irq_restore(flags);
868 956
869 return 0; 957 return 0;
870} 958}
871 959
960int
961s3c2410_dma_started(s3c2410_dma_chan_t *chan)
962{
963 unsigned long flags;
964
965 local_irq_save(flags);
966
967 dbg_showchan(chan);
968
969 /* if we've only loaded one buffer onto the channel, then chec
970 * to see if we have another, and if so, try and load it so when
971 * the first buffer is finished, the new one will be loaded onto
972 * the channel */
973
974 if (chan->next != NULL) {
975 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
976
977 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
978 pr_debug("%s: buff not yet loaded, no more todo\n",
979 __FUNCTION__);
980 } else {
981 chan->load_state = S3C2410_DMALOAD_1RUNNING;
982 s3c2410_dma_loadbuffer(chan, chan->next);
983 }
984
985 } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
986 s3c2410_dma_loadbuffer(chan, chan->next);
987 }
988 }
989
990
991 local_irq_restore(flags);
992
993 return 0;
994
995}
872 996
873int 997int
874s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op) 998s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
@@ -885,14 +1009,15 @@ s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
885 return s3c2410_dma_dostop(chan); 1009 return s3c2410_dma_dostop(chan);
886 1010
887 case S3C2410_DMAOP_PAUSE: 1011 case S3C2410_DMAOP_PAUSE:
888 return -ENOENT;
889
890 case S3C2410_DMAOP_RESUME: 1012 case S3C2410_DMAOP_RESUME:
891 return -ENOENT; 1013 return -ENOENT;
892 1014
893 case S3C2410_DMAOP_FLUSH: 1015 case S3C2410_DMAOP_FLUSH:
894 return s3c2410_dma_flush(chan); 1016 return s3c2410_dma_flush(chan);
895 1017
1018 case S3C2410_DMAOP_STARTED:
1019 return s3c2410_dma_started(chan);
1020
896 case S3C2410_DMAOP_TIMEOUT: 1021 case S3C2410_DMAOP_TIMEOUT:
897 return 0; 1022 return 0;
898 1023
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index c4e3f8c68479..f2bbef07b1e4 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -285,7 +285,7 @@ static struct flash_platform_data versatile_flash_data = {
285 285
286static struct resource versatile_flash_resource = { 286static struct resource versatile_flash_resource = {
287 .start = VERSATILE_FLASH_BASE, 287 .start = VERSATILE_FLASH_BASE,
288 .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE, 288 .end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE - 1,
289 .flags = IORESOURCE_MEM, 289 .flags = IORESOURCE_MEM,
290}; 290};
291 291
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index f71fb4a029cb..b2751eadbc56 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -142,6 +142,7 @@ config X86_SUMMIT
142 In particular, it is needed for the x440. 142 In particular, it is needed for the x440.
143 143
144 If you don't have one of these computers, you should say N here. 144 If you don't have one of these computers, you should say N here.
145 If you want to build a NUMA kernel, you must select ACPI.
145 146
146config X86_BIGSMP 147config X86_BIGSMP
147 bool "Support for other sub-arch SMP systems with more than 8 CPUs" 148 bool "Support for other sub-arch SMP systems with more than 8 CPUs"
@@ -169,6 +170,7 @@ config X86_GENERICARCH
169 help 170 help
170 This option compiles in the Summit, bigsmp, ES7000, default subarchitectures. 171 This option compiles in the Summit, bigsmp, ES7000, default subarchitectures.
171 It is intended for a generic binary kernel. 172 It is intended for a generic binary kernel.
173 If you want a NUMA kernel, select ACPI. We need SRAT for NUMA.
172 174
173config X86_ES7000 175config X86_ES7000
174 bool "Support for Unisys ES7000 IA32 series" 176 bool "Support for Unisys ES7000 IA32 series"
@@ -542,7 +544,7 @@ config X86_PAE
542# Common NUMA Features 544# Common NUMA Features
543config NUMA 545config NUMA
544 bool "Numa Memory Allocation and Scheduler Support" 546 bool "Numa Memory Allocation and Scheduler Support"
545 depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI)) 547 depends on SMP && HIGHMEM64G && (X86_NUMAQ || (X86_SUMMIT || X86_GENERICARCH) && ACPI)
546 default n if X86_PC 548 default n if X86_PC
547 default y if (X86_NUMAQ || X86_SUMMIT) 549 default y if (X86_NUMAQ || X86_SUMMIT)
548 550
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 0db6387025ca..ee003bc0e8b1 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -59,7 +59,7 @@ static inline int gsi_irq_sharing(int gsi) { return gsi; }
59 59
60#define BAD_MADT_ENTRY(entry, end) ( \ 60#define BAD_MADT_ENTRY(entry, end) ( \
61 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 61 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
62 ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) 62 ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
63 63
64#define PREFIX "ACPI: " 64#define PREFIX "ACPI: "
65 65
diff --git a/arch/i386/kernel/acpi/wakeup.S b/arch/i386/kernel/acpi/wakeup.S
index 9f408eee4e6f..b781b38131c0 100644
--- a/arch/i386/kernel/acpi/wakeup.S
+++ b/arch/i386/kernel/acpi/wakeup.S
@@ -292,7 +292,10 @@ ENTRY(do_suspend_lowlevel)
292 pushl $3 292 pushl $3
293 call acpi_enter_sleep_state 293 call acpi_enter_sleep_state
294 addl $4, %esp 294 addl $4, %esp
295 ret 295
296# In case of S3 failure, we'll emerge here. Jump
297# to ret_point to recover
298 jmp ret_point
296 .p2align 4,,7 299 .p2align 4,,7
297ret_point: 300ret_point:
298 call restore_registers 301 call restore_registers
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index efb41e81351c..e6ea00edcb54 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -567,16 +567,11 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
567static int __init 567static int __init
568acpi_cpufreq_init (void) 568acpi_cpufreq_init (void)
569{ 569{
570 int result = 0;
571
572 dprintk("acpi_cpufreq_init\n"); 570 dprintk("acpi_cpufreq_init\n");
573 571
574 result = acpi_cpufreq_early_init_acpi(); 572 acpi_cpufreq_early_init_acpi();
575 573
576 if (!result) 574 return cpufreq_register_driver(&acpi_cpufreq_driver);
577 result = cpufreq_register_driver(&acpi_cpufreq_driver);
578
579 return (result);
580} 575}
581 576
582 577
diff --git a/arch/i386/pci/init.c b/arch/i386/pci/init.c
index c7650a7e0b07..51087a9d9172 100644
--- a/arch/i386/pci/init.c
+++ b/arch/i386/pci/init.c
@@ -14,8 +14,12 @@ static __init int pci_access_init(void)
14#ifdef CONFIG_PCI_BIOS 14#ifdef CONFIG_PCI_BIOS
15 pci_pcbios_init(); 15 pci_pcbios_init();
16#endif 16#endif
17 if (raw_pci_ops) 17 /*
18 return 0; 18 * don't check for raw_pci_ops here because we want pcbios as last
19 * fallback, yet it's needed to run first to set pcibios_last_bus
20 * in case legacy PCI probing is used. otherwise detecting peer busses
21 * fails.
22 */
19#ifdef CONFIG_PCI_DIRECT 23#ifdef CONFIG_PCI_DIRECT
20 pci_direct_init(); 24 pci_direct_init();
21#endif 25#endif
diff --git a/arch/i386/pci/mmconfig.c b/arch/i386/pci/mmconfig.c
index e545b0992c48..972180f738d9 100644
--- a/arch/i386/pci/mmconfig.c
+++ b/arch/i386/pci/mmconfig.c
@@ -178,7 +178,7 @@ static __init void unreachable_devices(void)
178 pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0)); 178 pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0));
179 if (addr == 0 || 179 if (addr == 0 ||
180 readl((u32 __iomem *)mmcfg_virt_addr) != val1) { 180 readl((u32 __iomem *)mmcfg_virt_addr) != val1) {
181 set_bit(i, fallback_slots); 181 set_bit(i + 32*k, fallback_slots);
182 printk(KERN_NOTICE 182 printk(KERN_NOTICE
183 "PCI: No mmconfig possible on %x:%x\n", k, i); 183 "PCI: No mmconfig possible on %x:%x\n", k, i);
184 } 184 }
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index 8a4f0d0d17a3..8f0a16a79a67 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -244,7 +244,8 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
244 244
245 if (scatterlen == 0) 245 if (scatterlen == 0)
246 memcpy(sc->request_buffer, buf, len); 246 memcpy(sc->request_buffer, buf, len);
247 else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) { 247 else for (slp = (struct scatterlist *)sc->request_buffer;
248 scatterlen-- > 0 && len > 0; slp++) {
248 unsigned thislen = min(len, slp->length); 249 unsigned thislen = min(len, slp->length);
249 250
250 memcpy(page_address(slp->page) + slp->offset, buf, thislen); 251 memcpy(page_address(slp->page) + slp->offset, buf, thislen);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 99761b81db44..0176556aeecc 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -55,7 +55,7 @@
55 55
56#define BAD_MADT_ENTRY(entry, end) ( \ 56#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
58 ((acpi_table_entry_header *)entry)->length != sizeof(*entry)) 58 ((acpi_table_entry_header *)entry)->length < sizeof(*entry))
59 59
60#define PREFIX "ACPI: " 60#define PREFIX "ACPI: "
61 61
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
new file mode 100644
index 000000000000..5f41c1f7a5f3
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -0,0 +1,257 @@
1/*
2 * MPC8540 ADS Device Tree Source
3 *
4 * Copyright 2006 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12
13/ {
14 model = "MPC8540ADS";
15 compatible = "MPC85xxADS";
16 #address-cells = <1>;
17 #size-cells = <1>;
18 linux,phandle = <100>;
19
20 cpus {
21 #cpus = <1>;
22 #address-cells = <1>;
23 #size-cells = <0>;
24 linux,phandle = <200>;
25
26 PowerPC,8540@0 {
27 device_type = "cpu";
28 reg = <0>;
29 d-cache-line-size = <20>; // 32 bytes
30 i-cache-line-size = <20>; // 32 bytes
31 d-cache-size = <8000>; // L1, 32K
32 i-cache-size = <8000>; // L1, 32K
33 timebase-frequency = <0>; // 33 MHz, from uboot
34 bus-frequency = <0>; // 166 MHz
35 clock-frequency = <0>; // 825 MHz, from uboot
36 32-bit;
37 linux,phandle = <201>;
38 };
39 };
40
41 memory {
42 device_type = "memory";
43 linux,phandle = <300>;
44 reg = <00000000 08000000>; // 128M at 0x0
45 };
46
47 soc8540@e0000000 {
48 #address-cells = <1>;
49 #size-cells = <1>;
50 #interrupt-cells = <2>;
51 device_type = "soc";
52 ranges = <0 e0000000 00100000>;
53 reg = <e0000000 00100000>; // CCSRBAR 1M
54 bus-frequency = <0>;
55
56 i2c@3000 {
57 device_type = "i2c";
58 compatible = "fsl-i2c";
59 reg = <3000 100>;
60 interrupts = <1b 2>;
61 interrupt-parent = <40000>;
62 dfsrr;
63 };
64
65 mdio@24520 {
66 #address-cells = <1>;
67 #size-cells = <0>;
68 device_type = "mdio";
69 compatible = "gianfar";
70 reg = <24520 20>;
71 linux,phandle = <24520>;
72 ethernet-phy@0 {
73 linux,phandle = <2452000>;
74 interrupt-parent = <40000>;
75 interrupts = <35 1>;
76 reg = <0>;
77 device_type = "ethernet-phy";
78 };
79 ethernet-phy@1 {
80 linux,phandle = <2452001>;
81 interrupt-parent = <40000>;
82 interrupts = <35 1>;
83 reg = <1>;
84 device_type = "ethernet-phy";
85 };
86 ethernet-phy@3 {
87 linux,phandle = <2452003>;
88 interrupt-parent = <40000>;
89 interrupts = <37 1>;
90 reg = <3>;
91 device_type = "ethernet-phy";
92 };
93 };
94
95 ethernet@24000 {
96 #address-cells = <1>;
97 #size-cells = <0>;
98 device_type = "network";
99 model = "TSEC";
100 compatible = "gianfar";
101 reg = <24000 1000>;
102 address = [ 00 E0 0C 00 73 00 ];
103 local-mac-address = [ 00 E0 0C 00 73 00 ];
104 interrupts = <d 2 e 2 12 2>;
105 interrupt-parent = <40000>;
106 phy-handle = <2452000>;
107 };
108
109 ethernet@25000 {
110 #address-cells = <1>;
111 #size-cells = <0>;
112 device_type = "network";
113 model = "TSEC";
114 compatible = "gianfar";
115 reg = <25000 1000>;
116 address = [ 00 E0 0C 00 73 01 ];
117 local-mac-address = [ 00 E0 0C 00 73 01 ];
118 interrupts = <13 2 14 2 18 2>;
119 interrupt-parent = <40000>;
120 phy-handle = <2452001>;
121 };
122
123 ethernet@26000 {
124 #address-cells = <1>;
125 #size-cells = <0>;
126 device_type = "network";
127 model = "FEC";
128 compatible = "gianfar";
129 reg = <26000 1000>;
130 address = [ 00 E0 0C 00 73 02 ];
131 local-mac-address = [ 00 E0 0C 00 73 02 ];
132 interrupts = <19 2>;
133 interrupt-parent = <40000>;
134 phy-handle = <2452003>;
135 };
136
137 serial@4500 {
138 device_type = "serial";
139 compatible = "ns16550";
140 reg = <4500 100>; // reg base, size
141 clock-frequency = <0>; // should we fill in in uboot?
142 interrupts = <1a 2>;
143 interrupt-parent = <40000>;
144 };
145
146 serial@4600 {
147 device_type = "serial";
148 compatible = "ns16550";
149 reg = <4600 100>; // reg base, size
150 clock-frequency = <0>; // should we fill in in uboot?
151 interrupts = <1a 2>;
152 interrupt-parent = <40000>;
153 };
154 pci@8000 {
155 linux,phandle = <8000>;
156 interrupt-map-mask = <f800 0 0 7>;
157 interrupt-map = <
158
159 /* IDSEL 0x02 */
160 1000 0 0 1 40000 31 1
161 1000 0 0 2 40000 32 1
162 1000 0 0 3 40000 33 1
163 1000 0 0 4 40000 34 1
164
165 /* IDSEL 0x03 */
166 1800 0 0 1 40000 34 1
167 1800 0 0 2 40000 31 1
168 1800 0 0 3 40000 32 1
169 1800 0 0 4 40000 33 1
170
171 /* IDSEL 0x04 */
172 2000 0 0 1 40000 33 1
173 2000 0 0 2 40000 34 1
174 2000 0 0 3 40000 31 1
175 2000 0 0 4 40000 32 1
176
177 /* IDSEL 0x05 */
178 2800 0 0 1 40000 32 1
179 2800 0 0 2 40000 33 1
180 2800 0 0 3 40000 34 1
181 2800 0 0 4 40000 31 1
182
183 /* IDSEL 0x0c */
184 6000 0 0 1 40000 31 1
185 6000 0 0 2 40000 32 1
186 6000 0 0 3 40000 33 1
187 6000 0 0 4 40000 34 1
188
189 /* IDSEL 0x0d */
190 6800 0 0 1 40000 34 1
191 6800 0 0 2 40000 31 1
192 6800 0 0 3 40000 32 1
193 6800 0 0 4 40000 33 1
194
195 /* IDSEL 0x0e */
196 7000 0 0 1 40000 33 1
197 7000 0 0 2 40000 34 1
198 7000 0 0 3 40000 31 1
199 7000 0 0 4 40000 32 1
200
201 /* IDSEL 0x0f */
202 7800 0 0 1 40000 32 1
203 7800 0 0 2 40000 33 1
204 7800 0 0 3 40000 34 1
205 7800 0 0 4 40000 31 1
206
207 /* IDSEL 0x12 */
208 9000 0 0 1 40000 31 1
209 9000 0 0 2 40000 32 1
210 9000 0 0 3 40000 33 1
211 9000 0 0 4 40000 34 1
212
213 /* IDSEL 0x13 */
214 9800 0 0 1 40000 34 1
215 9800 0 0 2 40000 31 1
216 9800 0 0 3 40000 32 1
217 9800 0 0 4 40000 33 1
218
219 /* IDSEL 0x14 */
220 a000 0 0 1 40000 33 1
221 a000 0 0 2 40000 34 1
222 a000 0 0 3 40000 31 1
223 a000 0 0 4 40000 32 1
224
225 /* IDSEL 0x15 */
226 a800 0 0 1 40000 32 1
227 a800 0 0 2 40000 33 1
228 a800 0 0 3 40000 34 1
229 a800 0 0 4 40000 31 1>;
230 interrupt-parent = <40000>;
231 interrupts = <08 2>;
232 bus-range = <0 0>;
233 ranges = <02000000 0 80000000 80000000 0 20000000
234 01000000 0 00000000 e2000000 0 00100000>;
235 clock-frequency = <3f940aa>;
236 #interrupt-cells = <1>;
237 #size-cells = <2>;
238 #address-cells = <3>;
239 reg = <8000 1000>;
240 compatible = "85xx";
241 device_type = "pci";
242 };
243
244 pic@40000 {
245 linux,phandle = <40000>;
246 clock-frequency = <0>;
247 interrupt-controller;
248 #address-cells = <0>;
249 #interrupt-cells = <2>;
250 reg = <40000 40000>;
251 built-in;
252 compatible = "chrp,open-pic";
253 device_type = "open-pic";
254 big-endian;
255 };
256 };
257};
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
new file mode 100644
index 000000000000..7be0bc659e1c
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -0,0 +1,244 @@
1/*
2 * MPC8541 CDS Device Tree Source
3 *
4 * Copyright 2006 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12
13/ {
14 model = "MPC8541CDS";
15 compatible = "MPC85xxCDS";
16 #address-cells = <1>;
17 #size-cells = <1>;
18 linux,phandle = <100>;
19
20 cpus {
21 #cpus = <1>;
22 #address-cells = <1>;
23 #size-cells = <0>;
24 linux,phandle = <200>;
25
26 PowerPC,8541@0 {
27 device_type = "cpu";
28 reg = <0>;
29 d-cache-line-size = <20>; // 32 bytes
30 i-cache-line-size = <20>; // 32 bytes
31 d-cache-size = <8000>; // L1, 32K
32 i-cache-size = <8000>; // L1, 32K
33 timebase-frequency = <0>; // 33 MHz, from uboot
34 bus-frequency = <0>; // 166 MHz
35 clock-frequency = <0>; // 825 MHz, from uboot
36 32-bit;
37 linux,phandle = <201>;
38 };
39 };
40
41 memory {
42 device_type = "memory";
43 linux,phandle = <300>;
44 reg = <00000000 08000000>; // 128M at 0x0
45 };
46
47 soc8541@e0000000 {
48 #address-cells = <1>;
49 #size-cells = <1>;
50 #interrupt-cells = <2>;
51 device_type = "soc";
52 ranges = <0 e0000000 00100000>;
53 reg = <e0000000 00100000>; // CCSRBAR 1M
54 bus-frequency = <0>;
55
56 i2c@3000 {
57 device_type = "i2c";
58 compatible = "fsl-i2c";
59 reg = <3000 100>;
60 interrupts = <1b 2>;
61 interrupt-parent = <40000>;
62 dfsrr;
63 };
64
65 mdio@24520 {
66 #address-cells = <1>;
67 #size-cells = <0>;
68 device_type = "mdio";
69 compatible = "gianfar";
70 reg = <24520 20>;
71 linux,phandle = <24520>;
72 ethernet-phy@0 {
73 linux,phandle = <2452000>;
74 interrupt-parent = <40000>;
75 interrupts = <35 0>;
76 reg = <0>;
77 device_type = "ethernet-phy";
78 };
79 ethernet-phy@1 {
80 linux,phandle = <2452001>;
81 interrupt-parent = <40000>;
82 interrupts = <35 0>;
83 reg = <1>;
84 device_type = "ethernet-phy";
85 };
86 };
87
88 ethernet@24000 {
89 #address-cells = <1>;
90 #size-cells = <0>;
91 device_type = "network";
92 model = "TSEC";
93 compatible = "gianfar";
94 reg = <24000 1000>;
95 local-mac-address = [ 00 E0 0C 00 73 00 ];
96 interrupts = <d 2 e 2 12 2>;
97 interrupt-parent = <40000>;
98 phy-handle = <2452000>;
99 };
100
101 ethernet@25000 {
102 #address-cells = <1>;
103 #size-cells = <0>;
104 device_type = "network";
105 model = "TSEC";
106 compatible = "gianfar";
107 reg = <25000 1000>;
108 local-mac-address = [ 00 E0 0C 00 73 01 ];
109 interrupts = <13 2 14 2 18 2>;
110 interrupt-parent = <40000>;
111 phy-handle = <2452001>;
112 };
113
114 serial@4500 {
115 device_type = "serial";
116 compatible = "ns16550";
117 reg = <4500 100>; // reg base, size
118 clock-frequency = <0>; // should we fill in in uboot?
119 interrupts = <1a 2>;
120 interrupt-parent = <40000>;
121 };
122
123 serial@4600 {
124 device_type = "serial";
125 compatible = "ns16550";
126 reg = <4600 100>; // reg base, size
127 clock-frequency = <0>; // should we fill in in uboot?
128 interrupts = <1a 2>;
129 interrupt-parent = <40000>;
130 };
131
132 pci@8000 {
133 linux,phandle = <8000>;
134 interrupt-map-mask = <1f800 0 0 7>;
135 interrupt-map = <
136
137 /* IDSEL 0x10 */
138 08000 0 0 1 40000 30 1
139 08000 0 0 2 40000 31 1
140 08000 0 0 3 40000 32 1
141 08000 0 0 4 40000 33 1
142
143 /* IDSEL 0x11 */
144 08800 0 0 1 40000 30 1
145 08800 0 0 2 40000 31 1
146 08800 0 0 3 40000 32 1
147 08800 0 0 4 40000 33 1
148
149 /* IDSEL 0x12 (Slot 1) */
150 09000 0 0 1 40000 30 1
151 09000 0 0 2 40000 31 1
152 09000 0 0 3 40000 32 1
153 09000 0 0 4 40000 33 1
154
155 /* IDSEL 0x13 (Slot 2) */
156 09800 0 0 1 40000 31 1
157 09800 0 0 2 40000 32 1
158 09800 0 0 3 40000 33 1
159 09800 0 0 4 40000 30 1
160
161 /* IDSEL 0x14 (Slot 3) */
162 0a000 0 0 1 40000 32 1
163 0a000 0 0 2 40000 33 1
164 0a000 0 0 3 40000 30 1
165 0a000 0 0 4 40000 31 1
166
167 /* IDSEL 0x15 (Slot 4) */
168 0a800 0 0 1 40000 33 1
169 0a800 0 0 2 40000 30 1
170 0a800 0 0 3 40000 31 1
171 0a800 0 0 4 40000 32 1
172
173 /* Bus 1 (Tundra Bridge) */
174 /* IDSEL 0x12 (ISA bridge) */
175 19000 0 0 1 40000 30 1
176 19000 0 0 2 40000 31 1
177 19000 0 0 3 40000 32 1
178 19000 0 0 4 40000 33 1>;
179 interrupt-parent = <40000>;
180 interrupts = <08 2>;
181 bus-range = <0 0>;
182 ranges = <02000000 0 80000000 80000000 0 20000000
183 01000000 0 00000000 e2000000 0 00100000>;
184 clock-frequency = <3f940aa>;
185 #interrupt-cells = <1>;
186 #size-cells = <2>;
187 #address-cells = <3>;
188 reg = <8000 1000>;
189 compatible = "85xx";
190 device_type = "pci";
191
192 i8259@19000 {
193 clock-frequency = <0>;
194 interrupt-controller;
195 device_type = "interrupt-controller";
196 reg = <19000 0 0 0 1>;
197 #address-cells = <0>;
198 #interrupt-cells = <2>;
199 built-in;
200 compatible = "chrp,iic";
201 big-endian;
202 interrupts = <1>;
203 interrupt-parent = <8000>;
204 };
205 };
206
207 pci@9000 {
208 linux,phandle = <9000>;
209 interrupt-map-mask = <f800 0 0 7>;
210 interrupt-map = <
211
212 /* IDSEL 0x15 */
213 a800 0 0 1 40000 3b 1
214 a800 0 0 2 40000 3b 1
215 a800 0 0 3 40000 3b 1
216 a800 0 0 4 40000 3b 1>;
217 interrupt-parent = <40000>;
218 interrupts = <09 2>;
219 bus-range = <0 0>;
220 ranges = <02000000 0 a0000000 a0000000 0 20000000
221 01000000 0 00000000 e3000000 0 00100000>;
222 clock-frequency = <3f940aa>;
223 #interrupt-cells = <1>;
224 #size-cells = <2>;
225 #address-cells = <3>;
226 reg = <9000 1000>;
227 compatible = "85xx";
228 device_type = "pci";
229 };
230
231 pic@40000 {
232 linux,phandle = <40000>;
233 clock-frequency = <0>;
234 interrupt-controller;
235 #address-cells = <0>;
236 #interrupt-cells = <2>;
237 reg = <40000 40000>;
238 built-in;
239 compatible = "chrp,open-pic";
240 device_type = "open-pic";
241 big-endian;
242 };
243 };
244};
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
new file mode 100644
index 000000000000..893d7957c174
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -0,0 +1,287 @@
1/*
2 * MPC8555 CDS Device Tree Source
3 *
4 * Copyright 2006 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12
13/ {
14 model = "MPC8548CDS";
15 compatible = "MPC85xxCDS";
16 #address-cells = <1>;
17 #size-cells = <1>;
18 linux,phandle = <100>;
19
20 cpus {
21 #cpus = <1>;
22 #address-cells = <1>;
23 #size-cells = <0>;
24 linux,phandle = <200>;
25
26 PowerPC,8548@0 {
27 device_type = "cpu";
28 reg = <0>;
29 d-cache-line-size = <20>; // 32 bytes
30 i-cache-line-size = <20>; // 32 bytes
31 d-cache-size = <8000>; // L1, 32K
32 i-cache-size = <8000>; // L1, 32K
33 timebase-frequency = <0>; // 33 MHz, from uboot
34 bus-frequency = <0>; // 166 MHz
35 clock-frequency = <0>; // 825 MHz, from uboot
36 32-bit;
37 linux,phandle = <201>;
38 };
39 };
40
41 memory {
42 device_type = "memory";
43 linux,phandle = <300>;
44 reg = <00000000 08000000>; // 128M at 0x0
45 };
46
47 soc8548@e0000000 {
48 #address-cells = <1>;
49 #size-cells = <1>;
50 #interrupt-cells = <2>;
51 device_type = "soc";
52 ranges = <0 e0000000 00100000>;
53 reg = <e0000000 00100000>; // CCSRBAR 1M
54 bus-frequency = <0>;
55
56 i2c@3000 {
57 device_type = "i2c";
58 compatible = "fsl-i2c";
59 reg = <3000 100>;
60 interrupts = <1b 2>;
61 interrupt-parent = <40000>;
62 dfsrr;
63 };
64
65 mdio@24520 {
66 #address-cells = <1>;
67 #size-cells = <0>;
68 device_type = "mdio";
69 compatible = "gianfar";
70 reg = <24520 20>;
71 linux,phandle = <24520>;
72 ethernet-phy@0 {
73 linux,phandle = <2452000>;
74 interrupt-parent = <40000>;
75 interrupts = <35 0>;
76 reg = <0>;
77 device_type = "ethernet-phy";
78 };
79 ethernet-phy@1 {
80 linux,phandle = <2452001>;
81 interrupt-parent = <40000>;
82 interrupts = <35 0>;
83 reg = <1>;
84 device_type = "ethernet-phy";
85 };
86
87 ethernet-phy@2 {
88 linux,phandle = <2452002>;
89 interrupt-parent = <40000>;
90 interrupts = <35 0>;
91 reg = <2>;
92 device_type = "ethernet-phy";
93 };
94 ethernet-phy@3 {
95 linux,phandle = <2452003>;
96 interrupt-parent = <40000>;
97 interrupts = <35 0>;
98 reg = <3>;
99 device_type = "ethernet-phy";
100 };
101 };
102
103 ethernet@24000 {
104 #address-cells = <1>;
105 #size-cells = <0>;
106 device_type = "network";
107 model = "eTSEC";
108 compatible = "gianfar";
109 reg = <24000 1000>;
110 local-mac-address = [ 00 E0 0C 00 73 00 ];
111 interrupts = <d 2 e 2 12 2>;
112 interrupt-parent = <40000>;
113 phy-handle = <2452000>;
114 };
115
116 ethernet@25000 {
117 #address-cells = <1>;
118 #size-cells = <0>;
119 device_type = "network";
120 model = "eTSEC";
121 compatible = "gianfar";
122 reg = <25000 1000>;
123 local-mac-address = [ 00 E0 0C 00 73 01 ];
124 interrupts = <13 2 14 2 18 2>;
125 interrupt-parent = <40000>;
126 phy-handle = <2452001>;
127 };
128
129 ethernet@26000 {
130 #address-cells = <1>;
131 #size-cells = <0>;
132 device_type = "network";
133 model = "eTSEC";
134 compatible = "gianfar";
135 reg = <26000 1000>;
136 local-mac-address = [ 00 E0 0C 00 73 02 ];
137 interrupts = <f 2 10 2 11 2>;
138 interrupt-parent = <40000>;
139 phy-handle = <2452001>;
140 };
141
142/* eTSEC 4 is currently broken
143 ethernet@27000 {
144 #address-cells = <1>;
145 #size-cells = <0>;
146 device_type = "network";
147 model = "eTSEC";
148 compatible = "gianfar";
149 reg = <27000 1000>;
150 local-mac-address = [ 00 E0 0C 00 73 03 ];
151 interrupts = <15 2 16 2 17 2>;
152 interrupt-parent = <40000>;
153 phy-handle = <2452001>;
154 };
155 */
156
157 serial@4500 {
158 device_type = "serial";
159 compatible = "ns16550";
160 reg = <4500 100>; // reg base, size
161 clock-frequency = <0>; // should we fill in in uboot?
162 interrupts = <1a 2>;
163 interrupt-parent = <40000>;
164 };
165
166 serial@4600 {
167 device_type = "serial";
168 compatible = "ns16550";
169 reg = <4600 100>; // reg base, size
170 clock-frequency = <0>; // should we fill in in uboot?
171 interrupts = <1a 2>;
172 interrupt-parent = <40000>;
173 };
174
175 pci@8000 {
176 linux,phandle = <8000>;
177 interrupt-map-mask = <1f800 0 0 7>;
178 interrupt-map = <
179
180 /* IDSEL 0x10 */
181 08000 0 0 1 40000 30 1
182 08000 0 0 2 40000 31 1
183 08000 0 0 3 40000 32 1
184 08000 0 0 4 40000 33 1
185
186 /* IDSEL 0x11 */
187 08800 0 0 1 40000 30 1
188 08800 0 0 2 40000 31 1
189 08800 0 0 3 40000 32 1
190 08800 0 0 4 40000 33 1
191
192 /* IDSEL 0x12 (Slot 1) */
193 09000 0 0 1 40000 30 1
194 09000 0 0 2 40000 31 1
195 09000 0 0 3 40000 32 1
196 09000 0 0 4 40000 33 1
197
198 /* IDSEL 0x13 (Slot 2) */
199 09800 0 0 1 40000 31 1
200 09800 0 0 2 40000 32 1
201 09800 0 0 3 40000 33 1
202 09800 0 0 4 40000 30 1
203
204 /* IDSEL 0x14 (Slot 3) */
205 0a000 0 0 1 40000 32 1
206 0a000 0 0 2 40000 33 1
207 0a000 0 0 3 40000 30 1
208 0a000 0 0 4 40000 31 1
209
210 /* IDSEL 0x15 (Slot 4) */
211 0a800 0 0 1 40000 33 1
212 0a800 0 0 2 40000 30 1
213 0a800 0 0 3 40000 31 1
214 0a800 0 0 4 40000 32 1
215
216 /* Bus 1 (Tundra Bridge) */
217 /* IDSEL 0x12 (ISA bridge) */
218 19000 0 0 1 40000 30 1
219 19000 0 0 2 40000 31 1
220 19000 0 0 3 40000 32 1
221 19000 0 0 4 40000 33 1>;
222 interrupt-parent = <40000>;
223 interrupts = <08 2>;
224 bus-range = <0 0>;
225 ranges = <02000000 0 80000000 80000000 0 20000000
226 01000000 0 00000000 e2000000 0 00100000>;
227 clock-frequency = <3f940aa>;
228 #interrupt-cells = <1>;
229 #size-cells = <2>;
230 #address-cells = <3>;
231 reg = <8000 1000>;
232 compatible = "85xx";
233 device_type = "pci";
234
235 i8259@19000 {
236 clock-frequency = <0>;
237 interrupt-controller;
238 device_type = "interrupt-controller";
239 reg = <19000 0 0 0 1>;
240 #address-cells = <0>;
241 #interrupt-cells = <2>;
242 built-in;
243 compatible = "chrp,iic";
244 big-endian;
245 interrupts = <1>;
246 interrupt-parent = <8000>;
247 };
248 };
249
250 pci@9000 {
251 linux,phandle = <9000>;
252 interrupt-map-mask = <f800 0 0 7>;
253 interrupt-map = <
254
255 /* IDSEL 0x15 */
256 a800 0 0 1 40000 3b 1
257 a800 0 0 2 40000 3b 1
258 a800 0 0 3 40000 3b 1
259 a800 0 0 4 40000 3b 1>;
260 interrupt-parent = <40000>;
261 interrupts = <09 2>;
262 bus-range = <0 0>;
263 ranges = <02000000 0 a0000000 a0000000 0 20000000
264 01000000 0 00000000 e3000000 0 00100000>;
265 clock-frequency = <3f940aa>;
266 #interrupt-cells = <1>;
267 #size-cells = <2>;
268 #address-cells = <3>;
269 reg = <9000 1000>;
270 compatible = "85xx";
271 device_type = "pci";
272 };
273
274 pic@40000 {
275 linux,phandle = <40000>;
276 clock-frequency = <0>;
277 interrupt-controller;
278 #address-cells = <0>;
279 #interrupt-cells = <2>;
280 reg = <40000 40000>;
281 built-in;
282 compatible = "chrp,open-pic";
283 device_type = "open-pic";
284 big-endian;
285 };
286 };
287};
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
new file mode 100644
index 000000000000..118f5a887651
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -0,0 +1,244 @@
1/*
2 * MPC8555 CDS Device Tree Source
3 *
4 * Copyright 2006 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12
13/ {
14 model = "MPC8555CDS";
15 compatible = "MPC85xxCDS";
16 #address-cells = <1>;
17 #size-cells = <1>;
18 linux,phandle = <100>;
19
20 cpus {
21 #cpus = <1>;
22 #address-cells = <1>;
23 #size-cells = <0>;
24 linux,phandle = <200>;
25
26 PowerPC,8555@0 {
27 device_type = "cpu";
28 reg = <0>;
29 d-cache-line-size = <20>; // 32 bytes
30 i-cache-line-size = <20>; // 32 bytes
31 d-cache-size = <8000>; // L1, 32K
32 i-cache-size = <8000>; // L1, 32K
33 timebase-frequency = <0>; // 33 MHz, from uboot
34 bus-frequency = <0>; // 166 MHz
35 clock-frequency = <0>; // 825 MHz, from uboot
36 32-bit;
37 linux,phandle = <201>;
38 };
39 };
40
41 memory {
42 device_type = "memory";
43 linux,phandle = <300>;
44 reg = <00000000 08000000>; // 128M at 0x0
45 };
46
47 soc8555@e0000000 {
48 #address-cells = <1>;
49 #size-cells = <1>;
50 #interrupt-cells = <2>;
51 device_type = "soc";
52 ranges = <0 e0000000 00100000>;
53 reg = <e0000000 00100000>; // CCSRBAR 1M
54 bus-frequency = <0>;
55
56 i2c@3000 {
57 device_type = "i2c";
58 compatible = "fsl-i2c";
59 reg = <3000 100>;
60 interrupts = <1b 2>;
61 interrupt-parent = <40000>;
62 dfsrr;
63 };
64
65 mdio@24520 {
66 #address-cells = <1>;
67 #size-cells = <0>;
68 device_type = "mdio";
69 compatible = "gianfar";
70 reg = <24520 20>;
71 linux,phandle = <24520>;
72 ethernet-phy@0 {
73 linux,phandle = <2452000>;
74 interrupt-parent = <40000>;
75 interrupts = <35 0>;
76 reg = <0>;
77 device_type = "ethernet-phy";
78 };
79 ethernet-phy@1 {
80 linux,phandle = <2452001>;
81 interrupt-parent = <40000>;
82 interrupts = <35 0>;
83 reg = <1>;
84 device_type = "ethernet-phy";
85 };
86 };
87
88 ethernet@24000 {
89 #address-cells = <1>;
90 #size-cells = <0>;
91 device_type = "network";
92 model = "TSEC";
93 compatible = "gianfar";
94 reg = <24000 1000>;
95 local-mac-address = [ 00 E0 0C 00 73 00 ];
96 interrupts = <0d 2 0e 2 12 2>;
97 interrupt-parent = <40000>;
98 phy-handle = <2452000>;
99 };
100
101 ethernet@25000 {
102 #address-cells = <1>;
103 #size-cells = <0>;
104 device_type = "network";
105 model = "TSEC";
106 compatible = "gianfar";
107 reg = <25000 1000>;
108 local-mac-address = [ 00 E0 0C 00 73 01 ];
109 interrupts = <13 2 14 2 18 2>;
110 interrupt-parent = <40000>;
111 phy-handle = <2452001>;
112 };
113
114 serial@4500 {
115 device_type = "serial";
116 compatible = "ns16550";
117 reg = <4500 100>; // reg base, size
118 clock-frequency = <0>; // should we fill in in uboot?
119 interrupts = <1a 2>;
120 interrupt-parent = <40000>;
121 };
122
123 serial@4600 {
124 device_type = "serial";
125 compatible = "ns16550";
126 reg = <4600 100>; // reg base, size
127 clock-frequency = <0>; // should we fill in in uboot?
128 interrupts = <1a 2>;
129 interrupt-parent = <40000>;
130 };
131
132 pci@8000 {
133 linux,phandle = <8000>;
134 interrupt-map-mask = <1f800 0 0 7>;
135 interrupt-map = <
136
137 /* IDSEL 0x10 */
138 08000 0 0 1 40000 30 1
139 08000 0 0 2 40000 31 1
140 08000 0 0 3 40000 32 1
141 08000 0 0 4 40000 33 1
142
143 /* IDSEL 0x11 */
144 08800 0 0 1 40000 30 1
145 08800 0 0 2 40000 31 1
146 08800 0 0 3 40000 32 1
147 08800 0 0 4 40000 33 1
148
149 /* IDSEL 0x12 (Slot 1) */
150 09000 0 0 1 40000 30 1
151 09000 0 0 2 40000 31 1
152 09000 0 0 3 40000 32 1
153 09000 0 0 4 40000 33 1
154
155 /* IDSEL 0x13 (Slot 2) */
156 09800 0 0 1 40000 31 1
157 09800 0 0 2 40000 32 1
158 09800 0 0 3 40000 33 1
159 09800 0 0 4 40000 30 1
160
161 /* IDSEL 0x14 (Slot 3) */
162 0a000 0 0 1 40000 32 1
163 0a000 0 0 2 40000 33 1
164 0a000 0 0 3 40000 30 1
165 0a000 0 0 4 40000 31 1
166
167 /* IDSEL 0x15 (Slot 4) */
168 0a800 0 0 1 40000 33 1
169 0a800 0 0 2 40000 30 1
170 0a800 0 0 3 40000 31 1
171 0a800 0 0 4 40000 32 1
172
173 /* Bus 1 (Tundra Bridge) */
174 /* IDSEL 0x12 (ISA bridge) */
175 19000 0 0 1 40000 30 1
176 19000 0 0 2 40000 31 1
177 19000 0 0 3 40000 32 1
178 19000 0 0 4 40000 33 1>;
179 interrupt-parent = <40000>;
180 interrupts = <08 2>;
181 bus-range = <0 0>;
182 ranges = <02000000 0 80000000 80000000 0 20000000
183 01000000 0 00000000 e2000000 0 00100000>;
184 clock-frequency = <3f940aa>;
185 #interrupt-cells = <1>;
186 #size-cells = <2>;
187 #address-cells = <3>;
188 reg = <8000 1000>;
189 compatible = "85xx";
190 device_type = "pci";
191
192 i8259@19000 {
193 clock-frequency = <0>;
194 interrupt-controller;
195 device_type = "interrupt-controller";
196 reg = <19000 0 0 0 1>;
197 #address-cells = <0>;
198 #interrupt-cells = <2>;
199 built-in;
200 compatible = "chrp,iic";
201 big-endian;
202 interrupts = <1>;
203 interrupt-parent = <8000>;
204 };
205 };
206
207 pci@9000 {
208 linux,phandle = <9000>;
209 interrupt-map-mask = <f800 0 0 7>;
210 interrupt-map = <
211
212 /* IDSEL 0x15 */
213 a800 0 0 1 40000 3b 1
214 a800 0 0 2 40000 3b 1
215 a800 0 0 3 40000 3b 1
216 a800 0 0 4 40000 3b 1>;
217 interrupt-parent = <40000>;
218 interrupts = <09 2>;
219 bus-range = <0 0>;
220 ranges = <02000000 0 a0000000 a0000000 0 20000000
221 01000000 0 00000000 e3000000 0 00100000>;
222 clock-frequency = <3f940aa>;
223 #interrupt-cells = <1>;
224 #size-cells = <2>;
225 #address-cells = <3>;
226 reg = <9000 1000>;
227 compatible = "85xx";
228 device_type = "pci";
229 };
230
231 pic@40000 {
232 linux,phandle = <40000>;
233 clock-frequency = <0>;
234 interrupt-controller;
235 #address-cells = <0>;
236 #interrupt-cells = <2>;
237 reg = <40000 40000>;
238 built-in;
239 compatible = "chrp,open-pic";
240 device_type = "open-pic";
241 big-endian;
242 };
243 };
244};
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 359ab89748e0..40a39291861f 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -115,6 +115,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
115 u64 addr; 115 u64 addr;
116 u32 *addrp; 116 u32 *addrp;
117 upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; 117 upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
118 struct device_node *tsi = of_get_parent(np);
118 119
119 /* We only support ports that have a clock frequency properly 120 /* We only support ports that have a clock frequency properly
120 * encoded in the device-tree. 121 * encoded in the device-tree.
@@ -134,7 +135,10 @@ static int __init add_legacy_soc_port(struct device_node *np,
134 /* Add port, irq will be dealt with later. We passed a translated 135 /* Add port, irq will be dealt with later. We passed a translated
135 * IO port value. It will be fixed up later along with the irq 136 * IO port value. It will be fixed up later along with the irq
136 */ 137 */
137 return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); 138 if (tsi && !strcmp(tsi->type, "tsi-bridge"))
139 return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0);
140 else
141 return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
138} 142}
139 143
140static int __init add_legacy_isa_port(struct device_node *np, 144static int __init add_legacy_isa_port(struct device_node *np,
@@ -464,7 +468,7 @@ static int __init serial_dev_init(void)
464 fixup_port_irq(i, np, port); 468 fixup_port_irq(i, np, port);
465 if (port->iotype == UPIO_PORT) 469 if (port->iotype == UPIO_PORT)
466 fixup_port_pio(i, np, port); 470 fixup_port_pio(i, np, port);
467 if (port->iotype == UPIO_MEM) 471 if ((port->iotype == UPIO_MEM) || (port->iotype == UPIO_TSI))
468 fixup_port_mmio(i, np, port); 472 fixup_port_mmio(i, np, port);
469 } 473 }
470 474
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 6a7e997c401d..11052c212ad5 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -598,11 +598,6 @@ static struct device_node *of_irq_find_parent(struct device_node *child)
598 return p; 598 return p;
599} 599}
600 600
601static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
602{
603 return (((pin - 1) + slot) % 4) + 1;
604}
605
606/* This doesn't need to be called if you don't have any special workaround 601/* This doesn't need to be called if you don't have any special workaround
607 * flags to pass 602 * flags to pass
608 */ 603 */
@@ -891,6 +886,12 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
891} 886}
892EXPORT_SYMBOL_GPL(of_irq_map_one); 887EXPORT_SYMBOL_GPL(of_irq_map_one);
893 888
889#ifdef CONFIG_PCI
890static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
891{
892 return (((pin - 1) + slot) % 4) + 1;
893}
894
894int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) 895int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
895{ 896{
896 struct device_node *dn, *ppnode; 897 struct device_node *dn, *ppnode;
@@ -967,4 +968,4 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
967 return of_irq_map_raw(ppnode, &lspec, laddr, out_irq); 968 return of_irq_map_raw(ppnode, &lspec, laddr, out_irq);
968} 969}
969EXPORT_SYMBOL_GPL(of_irq_map_pci); 970EXPORT_SYMBOL_GPL(of_irq_map_pci);
970 971#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 774c0a3c5019..18e59e43d2b3 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -417,7 +417,7 @@ static __inline__ void timer_check_rtc(void)
417/* 417/*
418 * This version of gettimeofday has microsecond resolution. 418 * This version of gettimeofday has microsecond resolution.
419 */ 419 */
420static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val) 420static inline void __do_gettimeofday(struct timeval *tv)
421{ 421{
422 unsigned long sec, usec; 422 unsigned long sec, usec;
423 u64 tb_ticks, xsec; 423 u64 tb_ticks, xsec;
@@ -431,7 +431,12 @@ static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
431 * without a divide (and in fact, without a multiply) 431 * without a divide (and in fact, without a multiply)
432 */ 432 */
433 temp_varp = do_gtod.varp; 433 temp_varp = do_gtod.varp;
434 tb_ticks = tb_val - temp_varp->tb_orig_stamp; 434
435 /* Sampling the time base must be done after loading
436 * do_gtod.varp in order to avoid racing with update_gtod.
437 */
438 data_barrier(temp_varp);
439 tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
435 temp_tb_to_xs = temp_varp->tb_to_xs; 440 temp_tb_to_xs = temp_varp->tb_to_xs;
436 temp_stamp_xsec = temp_varp->stamp_xsec; 441 temp_stamp_xsec = temp_varp->stamp_xsec;
437 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs); 442 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
@@ -464,7 +469,7 @@ void do_gettimeofday(struct timeval *tv)
464 tv->tv_usec = usec; 469 tv->tv_usec = usec;
465 return; 470 return;
466 } 471 }
467 __do_gettimeofday(tv, get_tb()); 472 __do_gettimeofday(tv);
468} 473}
469 474
470EXPORT_SYMBOL(do_gettimeofday); 475EXPORT_SYMBOL(do_gettimeofday);
@@ -650,6 +655,7 @@ void timer_interrupt(struct pt_regs * regs)
650 int next_dec; 655 int next_dec;
651 int cpu = smp_processor_id(); 656 int cpu = smp_processor_id();
652 unsigned long ticks; 657 unsigned long ticks;
658 u64 tb_next_jiffy;
653 659
654#ifdef CONFIG_PPC32 660#ifdef CONFIG_PPC32
655 if (atomic_read(&ppc_n_lost_interrupts) != 0) 661 if (atomic_read(&ppc_n_lost_interrupts) != 0)
@@ -691,11 +697,14 @@ void timer_interrupt(struct pt_regs * regs)
691 continue; 697 continue;
692 698
693 write_seqlock(&xtime_lock); 699 write_seqlock(&xtime_lock);
694 tb_last_jiffy += tb_ticks_per_jiffy; 700 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
695 tb_last_stamp = per_cpu(last_jiffy, cpu); 701 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
696 do_timer(regs); 702 tb_last_jiffy = tb_next_jiffy;
697 timer_recalc_offset(tb_last_jiffy); 703 tb_last_stamp = per_cpu(last_jiffy, cpu);
698 timer_check_rtc(); 704 do_timer(regs);
705 timer_recalc_offset(tb_last_jiffy);
706 timer_check_rtc();
707 }
699 write_sequnlock(&xtime_lock); 708 write_sequnlock(&xtime_lock);
700 } 709 }
701 710
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index e4d1713e8aea..9b352bd0a460 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -585,14 +585,14 @@ static void parse_fpe(struct pt_regs *regs)
585#define INST_MFSPR_PVR_MASK 0xfc1fffff 585#define INST_MFSPR_PVR_MASK 0xfc1fffff
586 586
587#define INST_DCBA 0x7c0005ec 587#define INST_DCBA 0x7c0005ec
588#define INST_DCBA_MASK 0x7c0007fe 588#define INST_DCBA_MASK 0xfc0007fe
589 589
590#define INST_MCRXR 0x7c000400 590#define INST_MCRXR 0x7c000400
591#define INST_MCRXR_MASK 0x7c0007fe 591#define INST_MCRXR_MASK 0xfc0007fe
592 592
593#define INST_STRING 0x7c00042a 593#define INST_STRING 0x7c00042a
594#define INST_STRING_MASK 0x7c0007fe 594#define INST_STRING_MASK 0xfc0007fe
595#define INST_STRING_GEN_MASK 0x7c00067e 595#define INST_STRING_GEN_MASK 0xfc00067e
596#define INST_LSWI 0x7c0004aa 596#define INST_LSWI 0x7c0004aa
597#define INST_LSWX 0x7c00042a 597#define INST_LSWX 0x7c00042a
598#define INST_STSWI 0x7c0005aa 598#define INST_STSWI 0x7c0005aa
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 266b8b2ceac9..5615acc29527 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -153,7 +153,7 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
153 hpdp->pd = 0; 153 hpdp->pd = 0;
154 tlb->need_flush = 1; 154 tlb->need_flush = 1;
155 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM, 155 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
156 HUGEPTE_TABLE_SIZE-1)); 156 PGF_CACHENUM_MASK));
157} 157}
158 158
159#ifdef CONFIG_PPC_64K_PAGES 159#ifdef CONFIG_PPC_64K_PAGES
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 454fc53289ab..c3268d9877e4 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -14,7 +14,6 @@ config MPC8540_ADS
14config MPC85xx_CDS 14config MPC85xx_CDS
15 bool "Freescale MPC85xx CDS" 15 bool "Freescale MPC85xx CDS"
16 select DEFAULT_UIMAGE 16 select DEFAULT_UIMAGE
17 select PPC_I8259 if PCI
18 help 17 help
19 This option enables support for the MPC85xx CDS board 18 This option enables support for the MPC85xx CDS board
20 19
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 06a497676c99..9d2acfbbeccd 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -37,79 +37,7 @@ unsigned long isa_io_base = 0;
37unsigned long isa_mem_base = 0; 37unsigned long isa_mem_base = 0;
38#endif 38#endif
39 39
40/*
41 * Internal interrupts are all Level Sensitive, and Positive Polarity
42 *
43 * Note: Likely, this table and the following function should be
44 * obtained and derived from the OF Device Tree.
45 */
46static u_char mpc85xx_ads_openpic_initsenses[] __initdata = {
47 MPC85XX_INTERNAL_IRQ_SENSES,
48 0x0, /* External 0: */
49#if defined(CONFIG_PCI)
50 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 1: PCI slot 0 */
51 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 2: PCI slot 1 */
52 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 3: PCI slot 2 */
53 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 4: PCI slot 3 */
54#else
55 0x0, /* External 1: */
56 0x0, /* External 2: */
57 0x0, /* External 3: */
58 0x0, /* External 4: */
59#endif
60 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */
61 0x0, /* External 6: */
62 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 7: PHY */
63 0x0, /* External 8: */
64 0x0, /* External 9: */
65 0x0, /* External 10: */
66 0x0, /* External 11: */
67};
68
69#ifdef CONFIG_PCI 40#ifdef CONFIG_PCI
70/*
71 * interrupt routing
72 */
73
74int
75mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
76{
77 static char pci_irq_table[][4] =
78 /*
79 * This is little evil, but works around the fact
80 * that revA boards have IDSEL starting at 18
81 * and others boards (older) start at 12
82 *
83 * PCI IDSEL/INTPIN->INTLINE
84 * A B C D
85 */
86 {
87 {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 2 */
88 {PIRQD, PIRQA, PIRQB, PIRQC},
89 {PIRQC, PIRQD, PIRQA, PIRQB},
90 {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 5 */
91 {0, 0, 0, 0}, /* -- */
92 {0, 0, 0, 0}, /* -- */
93 {0, 0, 0, 0}, /* -- */
94 {0, 0, 0, 0}, /* -- */
95 {0, 0, 0, 0}, /* -- */
96 {0, 0, 0, 0}, /* -- */
97 {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 12 */
98 {PIRQD, PIRQA, PIRQB, PIRQC},
99 {PIRQC, PIRQD, PIRQA, PIRQB},
100 {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 15 */
101 {0, 0, 0, 0}, /* -- */
102 {0, 0, 0, 0}, /* -- */
103 {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 18 */
104 {PIRQD, PIRQA, PIRQB, PIRQC},
105 {PIRQC, PIRQD, PIRQA, PIRQB},
106 {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 21 */
107 };
108
109 const long min_idsel = 2, max_idsel = 21, irqs_per_slot = 4;
110 return PCI_IRQ_TABLE_LOOKUP;
111}
112
113int 41int
114mpc85xx_exclude_device(u_char bus, u_char devfn) 42mpc85xx_exclude_device(u_char bus, u_char devfn)
115{ 43{
@@ -119,44 +47,63 @@ mpc85xx_exclude_device(u_char bus, u_char devfn)
119 return PCIBIOS_SUCCESSFUL; 47 return PCIBIOS_SUCCESSFUL;
120} 48}
121 49
50void __init
51mpc85xx_pcibios_fixup(void)
52{
53 struct pci_dev *dev = NULL;
54
55 for_each_pci_dev(dev)
56 pci_read_irq_line(dev);
57}
122#endif /* CONFIG_PCI */ 58#endif /* CONFIG_PCI */
123 59
124 60
125void __init mpc85xx_ads_pic_init(void) 61void __init mpc85xx_ads_pic_init(void)
126{ 62{
127 struct mpic *mpic1; 63 struct mpic *mpic;
128 phys_addr_t OpenPIC_PAddr; 64 struct resource r;
129 65 struct device_node *np = NULL;
130 /* Determine the Physical Address of the OpenPIC regs */ 66
131 OpenPIC_PAddr = get_immrbase() + MPC85xx_OPENPIC_OFFSET; 67 np = of_find_node_by_type(np, "open-pic");
132 68
133 mpic1 = mpic_alloc(OpenPIC_PAddr, 69 if (np == NULL) {
134 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 70 printk(KERN_ERR "Could not find open-pic node\n");
135 4, MPC85xx_OPENPIC_IRQ_OFFSET, 0, 250, 71 return;
136 mpc85xx_ads_openpic_initsenses, 72 }
137 sizeof(mpc85xx_ads_openpic_initsenses), 73
138 " OpenPIC "); 74 if(of_address_to_resource(np, 0, &r)) {
139 BUG_ON(mpic1 == NULL); 75 printk(KERN_ERR "Could not map mpic register space\n");
140 mpic_assign_isu(mpic1, 0, OpenPIC_PAddr + 0x10200); 76 of_node_put(np);
141 mpic_assign_isu(mpic1, 1, OpenPIC_PAddr + 0x10280); 77 return;
142 mpic_assign_isu(mpic1, 2, OpenPIC_PAddr + 0x10300); 78 }
143 mpic_assign_isu(mpic1, 3, OpenPIC_PAddr + 0x10380); 79
144 mpic_assign_isu(mpic1, 4, OpenPIC_PAddr + 0x10400); 80 mpic = mpic_alloc(np, r.start,
145 mpic_assign_isu(mpic1, 5, OpenPIC_PAddr + 0x10480); 81 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
146 mpic_assign_isu(mpic1, 6, OpenPIC_PAddr + 0x10500); 82 4, 0, " OpenPIC ");
147 mpic_assign_isu(mpic1, 7, OpenPIC_PAddr + 0x10580); 83 BUG_ON(mpic == NULL);
148 84 of_node_put(np);
149 /* dummy mappings to get to 48 */ 85
150 mpic_assign_isu(mpic1, 8, OpenPIC_PAddr + 0x10600); 86 mpic_assign_isu(mpic, 0, r.start + 0x10200);
151 mpic_assign_isu(mpic1, 9, OpenPIC_PAddr + 0x10680); 87 mpic_assign_isu(mpic, 1, r.start + 0x10280);
152 mpic_assign_isu(mpic1, 10, OpenPIC_PAddr + 0x10700); 88 mpic_assign_isu(mpic, 2, r.start + 0x10300);
153 mpic_assign_isu(mpic1, 11, OpenPIC_PAddr + 0x10780); 89 mpic_assign_isu(mpic, 3, r.start + 0x10380);
154 90 mpic_assign_isu(mpic, 4, r.start + 0x10400);
155 /* External ints */ 91 mpic_assign_isu(mpic, 5, r.start + 0x10480);
156 mpic_assign_isu(mpic1, 12, OpenPIC_PAddr + 0x10000); 92 mpic_assign_isu(mpic, 6, r.start + 0x10500);
157 mpic_assign_isu(mpic1, 13, OpenPIC_PAddr + 0x10080); 93 mpic_assign_isu(mpic, 7, r.start + 0x10580);
158 mpic_assign_isu(mpic1, 14, OpenPIC_PAddr + 0x10100); 94
159 mpic_init(mpic1); 95 /* Unused on this platform (leave room for 8548) */
96 mpic_assign_isu(mpic, 8, r.start + 0x10600);
97 mpic_assign_isu(mpic, 9, r.start + 0x10680);
98 mpic_assign_isu(mpic, 10, r.start + 0x10700);
99 mpic_assign_isu(mpic, 11, r.start + 0x10780);
100
101 /* External Interrupts */
102 mpic_assign_isu(mpic, 12, r.start + 0x10000);
103 mpic_assign_isu(mpic, 13, r.start + 0x10080);
104 mpic_assign_isu(mpic, 14, r.start + 0x10100);
105
106 mpic_init(mpic);
160} 107}
161 108
162/* 109/*
@@ -165,7 +112,9 @@ void __init mpc85xx_ads_pic_init(void)
165static void __init mpc85xx_ads_setup_arch(void) 112static void __init mpc85xx_ads_setup_arch(void)
166{ 113{
167 struct device_node *cpu; 114 struct device_node *cpu;
115#ifdef CONFIG_PCI
168 struct device_node *np; 116 struct device_node *np;
117#endif
169 118
170 if (ppc_md.progress) 119 if (ppc_md.progress)
171 ppc_md.progress("mpc85xx_ads_setup_arch()", 0); 120 ppc_md.progress("mpc85xx_ads_setup_arch()", 0);
@@ -186,8 +135,7 @@ static void __init mpc85xx_ads_setup_arch(void)
186 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) 135 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
187 add_bridge(np); 136 add_bridge(np);
188 137
189 ppc_md.pci_swizzle = common_swizzle; 138 ppc_md.pcibios_fixup = mpc85xx_pcibios_fixup;
190 ppc_md.pci_map_irq = mpc85xx_map_irq;
191 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 139 ppc_md.pci_exclude_device = mpc85xx_exclude_device;
192#endif 140#endif
193 141
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 18e6e11f7020..1d357d32a29f 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -57,94 +57,8 @@ unsigned long isa_mem_base = 0;
57static int cds_pci_slot = 2; 57static int cds_pci_slot = 2;
58static volatile u8 *cadmus; 58static volatile u8 *cadmus;
59 59
60/*
61 * Internal interrupts are all Level Sensitive, and Positive Polarity
62 *
63 * Note: Likely, this table and the following function should be
64 * obtained and derived from the OF Device Tree.
65 */
66static u_char mpc85xx_cds_openpic_initsenses[] __initdata = {
67 MPC85XX_INTERNAL_IRQ_SENSES,
68#if defined(CONFIG_PCI)
69 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Ext 0: PCI slot 0 */
70 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 1: PCI slot 1 */
71 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 2: PCI slot 2 */
72 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 3: PCI slot 3 */
73#else
74 0x0, /* External 0: */
75 0x0, /* External 1: */
76 0x0, /* External 2: */
77 0x0, /* External 3: */
78#endif
79 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */
80 0x0, /* External 6: */
81 0x0, /* External 7: */
82 0x0, /* External 8: */
83 0x0, /* External 9: */
84 0x0, /* External 10: */
85#ifdef CONFIG_PCI
86 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 11: PCI2 slot 0 */
87#else
88 0x0, /* External 11: */
89#endif
90};
91
92 60
93#ifdef CONFIG_PCI 61#ifdef CONFIG_PCI
94/*
95 * interrupt routing
96 */
97int
98mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
99{
100 struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
101
102 if (!hose->index)
103 {
104 /* Handle PCI1 interrupts */
105 char pci_irq_table[][4] =
106 /*
107 * PCI IDSEL/INTPIN->INTLINE
108 * A B C D
109 */
110
111 /* Note IRQ assignment for slots is based on which slot the elysium is
112 * in -- in this setup elysium is in slot #2 (this PIRQA as first
113 * interrupt on slot */
114 {
115 { 0, 1, 2, 3 }, /* 16 - PMC */
116 { 0, 1, 2, 3 }, /* 17 P2P (Tsi320) */
117 { 0, 1, 2, 3 }, /* 18 - Slot 1 */
118 { 1, 2, 3, 0 }, /* 19 - Slot 2 */
119 { 2, 3, 0, 1 }, /* 20 - Slot 3 */
120 { 3, 0, 1, 2 }, /* 21 - Slot 4 */
121 };
122
123 const long min_idsel = 16, max_idsel = 21, irqs_per_slot = 4;
124 int i, j;
125
126 for (i = 0; i < 6; i++)
127 for (j = 0; j < 4; j++)
128 pci_irq_table[i][j] =
129 ((pci_irq_table[i][j] + 5 -
130 cds_pci_slot) & 0x3) + PIRQ0A;
131
132 return PCI_IRQ_TABLE_LOOKUP;
133 } else {
134 /* Handle PCI2 interrupts (if we have one) */
135 char pci_irq_table[][4] =
136 {
137 /*
138 * We only have one slot and one interrupt
139 * going to PIRQA - PIRQD */
140 { PIRQ1A, PIRQ1A, PIRQ1A, PIRQ1A }, /* 21 - slot 0 */
141 };
142
143 const long min_idsel = 21, max_idsel = 21, irqs_per_slot = 4;
144
145 return PCI_IRQ_TABLE_LOOKUP;
146 }
147}
148 62
149#define ARCADIA_HOST_BRIDGE_IDSEL 17 63#define ARCADIA_HOST_BRIDGE_IDSEL 17
150#define ARCADIA_2ND_BRIDGE_IDSEL 3 64#define ARCADIA_2ND_BRIDGE_IDSEL 3
@@ -210,50 +124,104 @@ mpc85xx_cds_pcibios_fixup(void)
210 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11); 124 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
211 pci_dev_put(dev); 125 pci_dev_put(dev);
212 } 126 }
127
128 /* Now map all the PCI irqs */
129 dev = NULL;
130 for_each_pci_dev(dev)
131 pci_read_irq_line(dev);
132}
133
134#ifdef CONFIG_PPC_I8259
135#warning The i8259 PIC support is currently broken
136static void mpc85xx_8259_cascade(unsigned int irq, struct
137 irq_desc *desc, struct pt_regs *regs)
138{
139 unsigned int cascade_irq = i8259_irq(regs);
140
141 if (cascade_irq != NO_IRQ)
142 generic_handle_irq(cascade_irq, regs);
143
144 desc->chip->eoi(irq);
213} 145}
146#endif /* PPC_I8259 */
214#endif /* CONFIG_PCI */ 147#endif /* CONFIG_PCI */
215 148
216void __init mpc85xx_cds_pic_init(void) 149void __init mpc85xx_cds_pic_init(void)
217{ 150{
218 struct mpic *mpic1; 151 struct mpic *mpic;
219 phys_addr_t OpenPIC_PAddr; 152 struct resource r;
153 struct device_node *np = NULL;
154 struct device_node *cascade_node = NULL;
155 int cascade_irq;
220 156
221 /* Determine the Physical Address of the OpenPIC regs */ 157 np = of_find_node_by_type(np, "open-pic");
222 OpenPIC_PAddr = get_immrbase() + MPC85xx_OPENPIC_OFFSET; 158
159 if (np == NULL) {
160 printk(KERN_ERR "Could not find open-pic node\n");
161 return;
162 }
223 163
224 mpic1 = mpic_alloc(OpenPIC_PAddr, 164 if (of_address_to_resource(np, 0, &r)) {
165 printk(KERN_ERR "Failed to map mpic register space\n");
166 of_node_put(np);
167 return;
168 }
169
170 mpic = mpic_alloc(np, r.start,
225 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 171 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
226 4, MPC85xx_OPENPIC_IRQ_OFFSET, 0, 250, 172 4, 0, " OpenPIC ");
227 mpc85xx_cds_openpic_initsenses, 173 BUG_ON(mpic == NULL);
228 sizeof(mpc85xx_cds_openpic_initsenses), " OpenPIC "); 174
229 BUG_ON(mpic1 == NULL); 175 /* Return the mpic node */
230 mpic_assign_isu(mpic1, 0, OpenPIC_PAddr + 0x10200); 176 of_node_put(np);
231 mpic_assign_isu(mpic1, 1, OpenPIC_PAddr + 0x10280); 177
232 mpic_assign_isu(mpic1, 2, OpenPIC_PAddr + 0x10300); 178 mpic_assign_isu(mpic, 0, r.start + 0x10200);
233 mpic_assign_isu(mpic1, 3, OpenPIC_PAddr + 0x10380); 179 mpic_assign_isu(mpic, 1, r.start + 0x10280);
234 mpic_assign_isu(mpic1, 4, OpenPIC_PAddr + 0x10400); 180 mpic_assign_isu(mpic, 2, r.start + 0x10300);
235 mpic_assign_isu(mpic1, 5, OpenPIC_PAddr + 0x10480); 181 mpic_assign_isu(mpic, 3, r.start + 0x10380);
236 mpic_assign_isu(mpic1, 6, OpenPIC_PAddr + 0x10500); 182 mpic_assign_isu(mpic, 4, r.start + 0x10400);
237 mpic_assign_isu(mpic1, 7, OpenPIC_PAddr + 0x10580); 183 mpic_assign_isu(mpic, 5, r.start + 0x10480);
238 184 mpic_assign_isu(mpic, 6, r.start + 0x10500);
239 /* dummy mappings to get to 48 */ 185 mpic_assign_isu(mpic, 7, r.start + 0x10580);
240 mpic_assign_isu(mpic1, 8, OpenPIC_PAddr + 0x10600); 186
241 mpic_assign_isu(mpic1, 9, OpenPIC_PAddr + 0x10680); 187 /* Used only for 8548 so far, but no harm in
242 mpic_assign_isu(mpic1, 10, OpenPIC_PAddr + 0x10700); 188 * allocating them for everyone */
243 mpic_assign_isu(mpic1, 11, OpenPIC_PAddr + 0x10780); 189 mpic_assign_isu(mpic, 8, r.start + 0x10600);
244 190 mpic_assign_isu(mpic, 9, r.start + 0x10680);
245 /* External ints */ 191 mpic_assign_isu(mpic, 10, r.start + 0x10700);
246 mpic_assign_isu(mpic1, 12, OpenPIC_PAddr + 0x10000); 192 mpic_assign_isu(mpic, 11, r.start + 0x10780);
247 mpic_assign_isu(mpic1, 13, OpenPIC_PAddr + 0x10080); 193
248 mpic_assign_isu(mpic1, 14, OpenPIC_PAddr + 0x10100); 194 /* External Interrupts */
249 195 mpic_assign_isu(mpic, 12, r.start + 0x10000);
250 mpic_init(mpic1); 196 mpic_assign_isu(mpic, 13, r.start + 0x10080);
197 mpic_assign_isu(mpic, 14, r.start + 0x10100);
198
199 mpic_init(mpic);
200
201#ifdef CONFIG_PPC_I8259
202 /* Initialize the i8259 controller */
203 for_each_node_by_type(np, "interrupt-controller")
204 if (device_is_compatible(np, "chrp,iic")) {
205 cascade_node = np;
206 break;
207 }
208
209 if (cascade_node == NULL) {
210 printk(KERN_DEBUG "Could not find i8259 PIC\n");
211 return;
212 }
251 213
252#ifdef CONFIG_PCI 214 cascade_irq = irq_of_parse_and_map(cascade_node, 0);
253 mpic_setup_cascade(PIRQ0A, i8259_irq_cascade, NULL); 215 if (cascade_irq == NO_IRQ) {
216 printk(KERN_ERR "Failed to map cascade interrupt\n");
217 return;
218 }
254 219
255 i8259_init(0,0); 220 i8259_init(cascade_node, 0);
256#endif 221 of_node_put(cascade_node);
222
223 set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade);
224#endif /* CONFIG_PPC_I8259 */
257} 225}
258 226
259 227
@@ -298,8 +266,6 @@ mpc85xx_cds_setup_arch(void)
298 add_bridge(np); 266 add_bridge(np);
299 267
300 ppc_md.pcibios_fixup = mpc85xx_cds_pcibios_fixup; 268 ppc_md.pcibios_fixup = mpc85xx_cds_pcibios_fixup;
301 ppc_md.pci_swizzle = common_swizzle;
302 ppc_md.pci_map_irq = mpc85xx_map_irq;
303 ppc_md.pci_exclude_device = mpc85xx_exclude_device; 269 ppc_md.pci_exclude_device = mpc85xx_exclude_device;
304#endif 270#endif
305 271
diff --git a/arch/powerpc/platforms/86xx/mpc8641_hpcn.h b/arch/powerpc/platforms/86xx/mpc8641_hpcn.h
index 5d2bcf78cef7..41e554c4af94 100644
--- a/arch/powerpc/platforms/86xx/mpc8641_hpcn.h
+++ b/arch/powerpc/platforms/86xx/mpc8641_hpcn.h
@@ -16,38 +16,6 @@
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18 18
19/* PCI interrupt controller */
20#define PIRQA 3
21#define PIRQB 4
22#define PIRQC 5
23#define PIRQD 6
24#define PIRQ7 7
25#define PIRQE 9
26#define PIRQF 10
27#define PIRQG 11
28#define PIRQH 12
29
30/* PCI-Express memory map */
31#define MPC86XX_PCIE_LOWER_IO 0x00000000
32#define MPC86XX_PCIE_UPPER_IO 0x00ffffff
33
34#define MPC86XX_PCIE_LOWER_MEM 0x80000000
35#define MPC86XX_PCIE_UPPER_MEM 0x9fffffff
36
37#define MPC86XX_PCIE_IO_BASE 0xe2000000
38#define MPC86XX_PCIE_MEM_OFFSET 0x00000000
39
40#define MPC86XX_PCIE_IO_SIZE 0x01000000
41
42#define PCIE1_CFG_ADDR_OFFSET (0x8000)
43#define PCIE1_CFG_DATA_OFFSET (0x8004)
44
45#define PCIE2_CFG_ADDR_OFFSET (0x9000)
46#define PCIE2_CFG_DATA_OFFSET (0x9004)
47
48#define MPC86xx_PCIE_OFFSET PCIE1_CFG_ADDR_OFFSET
49#define MPC86xx_PCIE_SIZE (0x1000)
50
51#define MPC86XX_RSTCR_OFFSET (0xe00b0) /* Reset Control Register */ 19#define MPC86XX_RSTCR_OFFSET (0xe00b0) /* Reset Control Register */
52 20
53#endif /* __MPC8641_HPCN_H__ */ 21#endif /* __MPC8641_HPCN_H__ */
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index ebae73eb0063..146da3001c67 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -37,6 +37,14 @@
37#include "mpc86xx.h" 37#include "mpc86xx.h"
38#include "mpc8641_hpcn.h" 38#include "mpc8641_hpcn.h"
39 39
40#undef DEBUG
41
42#ifdef DEBUG
43#define DBG(fmt...) do { printk(KERN_ERR fmt); } while(0)
44#else
45#define DBG(fmt...) do { } while(0)
46#endif
47
40#ifndef CONFIG_PCI 48#ifndef CONFIG_PCI
41unsigned long isa_io_base = 0; 49unsigned long isa_io_base = 0;
42unsigned long isa_mem_base = 0; 50unsigned long isa_mem_base = 0;
@@ -44,205 +52,215 @@ unsigned long pci_dram_offset = 0;
44#endif 52#endif
45 53
46 54
47/* 55static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc,
48 * Internal interrupts are all Level Sensitive, and Positive Polarity 56 struct pt_regs *regs)
49 */ 57{
50 58 unsigned int cascade_irq = i8259_irq(regs);
51static u_char mpc86xx_hpcn_openpic_initsenses[] __initdata = { 59 if (cascade_irq != NO_IRQ)
52 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: Reserved */ 60 generic_handle_irq(cascade_irq, regs);
53 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: MCM */ 61 desc->chip->eoi(irq);
54 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */ 62}
55 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */
56 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */
57 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */
58 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */
59 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */
60 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCIE1 */
61 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: PCIE2 */
62 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: Reserved */
63 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: Reserved */
64 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: DUART2 */
65 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 1 Transmit */
66 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 1 Receive */
67 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: TSEC 3 transmit */
68 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: TSEC 3 receive */
69 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: TSEC 3 error */
70 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 1 Receive/Transmit Error */
71 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 2 Transmit */
72 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 2 Receive */
73 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: TSEC 4 transmit */
74 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: TSEC 4 receive */
75 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: TSEC 4 error */
76 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 2 Receive/Transmit Error */
77 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Unused */
78 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART1 */
79 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */
80 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */
81 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */
82 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: Unused */
83 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */
84 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 32: SRIO error/write-port unit */
85 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 33: SRIO outbound doorbell */
86 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 34: SRIO inbound doorbell */
87 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 35: Unused */
88 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 36: Unused */
89 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 37: SRIO outbound message unit 1 */
90 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 38: SRIO inbound message unit 1 */
91 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 39: SRIO outbound message unit 2 */
92 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 40: SRIO inbound message unit 2 */
93 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 41: Unused */
94 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 42: Unused */
95 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 43: Unused */
96 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 44: Unused */
97 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 45: Unused */
98 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 46: Unused */
99 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 47: Unused */
100 0x0, /* External 0: */
101 0x0, /* External 1: */
102 0x0, /* External 2: */
103 0x0, /* External 3: */
104 0x0, /* External 4: */
105 0x0, /* External 5: */
106 0x0, /* External 6: */
107 0x0, /* External 7: */
108 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 8: Pixis FPGA */
109 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* External 9: ULI 8259 INTR Cascade */
110 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 10: Quad ETH PHY */
111 0x0, /* External 11: */
112 0x0,
113 0x0,
114 0x0,
115 0x0,
116};
117
118 63
119void __init 64void __init
120mpc86xx_hpcn_init_irq(void) 65mpc86xx_hpcn_init_irq(void)
121{ 66{
122 struct mpic *mpic1; 67 struct mpic *mpic1;
68 struct device_node *np, *cascade_node = NULL;
69 int cascade_irq;
123 phys_addr_t openpic_paddr; 70 phys_addr_t openpic_paddr;
124 71
72 np = of_find_node_by_type(NULL, "open-pic");
73 if (np == NULL)
74 return;
75
125 /* Determine the Physical Address of the OpenPIC regs */ 76 /* Determine the Physical Address of the OpenPIC regs */
126 openpic_paddr = get_immrbase() + MPC86xx_OPENPIC_OFFSET; 77 openpic_paddr = get_immrbase() + MPC86xx_OPENPIC_OFFSET;
127 78
128 /* Alloc mpic structure and per isu has 16 INT entries. */ 79 /* Alloc mpic structure and per isu has 16 INT entries. */
129 mpic1 = mpic_alloc(openpic_paddr, 80 mpic1 = mpic_alloc(np, openpic_paddr,
130 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 81 MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
131 16, MPC86xx_OPENPIC_IRQ_OFFSET, 0, 250, 82 16, NR_IRQS - 4,
132 mpc86xx_hpcn_openpic_initsenses,
133 sizeof(mpc86xx_hpcn_openpic_initsenses),
134 " MPIC "); 83 " MPIC ");
135 BUG_ON(mpic1 == NULL); 84 BUG_ON(mpic1 == NULL);
136 85
86 mpic_assign_isu(mpic1, 0, openpic_paddr + 0x10000);
87
137 /* 48 Internal Interrupts */ 88 /* 48 Internal Interrupts */
138 mpic_assign_isu(mpic1, 0, openpic_paddr + 0x10200); 89 mpic_assign_isu(mpic1, 1, openpic_paddr + 0x10200);
139 mpic_assign_isu(mpic1, 1, openpic_paddr + 0x10400); 90 mpic_assign_isu(mpic1, 2, openpic_paddr + 0x10400);
140 mpic_assign_isu(mpic1, 2, openpic_paddr + 0x10600); 91 mpic_assign_isu(mpic1, 3, openpic_paddr + 0x10600);
141 92
142 /* 16 External interrupts */ 93 /* 16 External interrupts
143 mpic_assign_isu(mpic1, 3, openpic_paddr + 0x10000); 94 * Moving them from [0 - 15] to [64 - 79]
95 */
96 mpic_assign_isu(mpic1, 4, openpic_paddr + 0x10000);
144 97
145 mpic_init(mpic1); 98 mpic_init(mpic1);
146 99
147#ifdef CONFIG_PCI 100#ifdef CONFIG_PCI
148 mpic_setup_cascade(MPC86xx_IRQ_EXT9, i8259_irq_cascade, NULL); 101 /* Initialize i8259 controller */
149 i8259_init(0, I8259_OFFSET); 102 for_each_node_by_type(np, "interrupt-controller")
150#endif 103 if (device_is_compatible(np, "chrp,iic")) {
151} 104 cascade_node = np;
105 break;
106 }
107 if (cascade_node == NULL) {
108 printk(KERN_DEBUG "mpc86xxhpcn: no ISA interrupt controller\n");
109 return;
110 }
152 111
112 cascade_irq = irq_of_parse_and_map(cascade_node, 0);
113 if (cascade_irq == NO_IRQ) {
114 printk(KERN_ERR "mpc86xxhpcn: failed to map cascade interrupt");
115 return;
116 }
117 DBG("mpc86xxhpcn: cascade mapped to irq %d\n", cascade_irq);
153 118
119 i8259_init(cascade_node, 0);
120 set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade);
121#endif
122}
154 123
155#ifdef CONFIG_PCI 124#ifdef CONFIG_PCI
156/*
157 * interrupt routing
158 */
159 125
160int 126enum pirq{PIRQA = 8, PIRQB, PIRQC, PIRQD, PIRQE, PIRQF, PIRQG, PIRQH};
161mpc86xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) 127const unsigned char uli1575_irq_route_table[16] = {
128 0, /* 0: Reserved */
129 0x8, /* 1: 0b1000 */
130 0, /* 2: Reserved */
131 0x2, /* 3: 0b0010 */
132 0x4, /* 4: 0b0100 */
133 0x5, /* 5: 0b0101 */
134 0x7, /* 6: 0b0111 */
135 0x6, /* 7: 0b0110 */
136 0, /* 8: Reserved */
137 0x1, /* 9: 0b0001 */
138 0x3, /* 10: 0b0011 */
139 0x9, /* 11: 0b1001 */
140 0xb, /* 12: 0b1011 */
141 0, /* 13: Reserved */
142 0xd, /* 14, 0b1101 */
143 0xf, /* 15, 0b1111 */
144};
145
146static int __devinit
147get_pci_irq_from_of(struct pci_controller *hose, int slot, int pin)
162{ 148{
163 static char pci_irq_table[][4] = { 149 struct of_irq oirq;
164 /* 150 u32 laddr[3];
165 * PCI IDSEL/INTPIN->INTLINE 151 struct device_node *hosenode = hose ? hose->arch_data : NULL;
166 * A B C D 152
167 */ 153 if (!hosenode) return -EINVAL;
168 {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 17 -- PCI Slot 1 */ 154
169 {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 18 -- PCI Slot 2 */ 155 laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(slot, 0) << 8);
170 {0, 0, 0, 0}, /* IDSEL 19 */ 156 laddr[1] = laddr[2] = 0;
171 {0, 0, 0, 0}, /* IDSEL 20 */ 157 of_irq_map_raw(hosenode, &pin, laddr, &oirq);
172 {0, 0, 0, 0}, /* IDSEL 21 */ 158 DBG("mpc86xx_hpcn: pci irq addr %x, slot %d, pin %d, irq %d\n",
173 {0, 0, 0, 0}, /* IDSEL 22 */ 159 laddr[0], slot, pin, oirq.specifier[0]);
174 {0, 0, 0, 0}, /* IDSEL 23 */ 160 return oirq.specifier[0];
175 {0, 0, 0, 0}, /* IDSEL 24 */
176 {0, 0, 0, 0}, /* IDSEL 25 */
177 {PIRQD, PIRQA, PIRQB, PIRQC}, /* IDSEL 26 -- PCI Bridge*/
178 {PIRQC, 0, 0, 0}, /* IDSEL 27 -- LAN */
179 {PIRQE, PIRQF, PIRQH, PIRQ7}, /* IDSEL 28 -- USB 1.1 */
180 {PIRQE, PIRQF, PIRQG, 0}, /* IDSEL 29 -- Audio & Modem */
181 {PIRQH, 0, 0, 0}, /* IDSEL 30 -- LPC & PMU*/
182 {PIRQD, 0, 0, 0}, /* IDSEL 31 -- ATA */
183 };
184
185 const long min_idsel = 17, max_idsel = 31, irqs_per_slot = 4;
186 return PCI_IRQ_TABLE_LOOKUP + I8259_OFFSET;
187} 161}
188 162
189static void __devinit quirk_ali1575(struct pci_dev *dev) 163static void __devinit quirk_uli1575(struct pci_dev *dev)
190{ 164{
191 unsigned short temp; 165 unsigned short temp;
166 struct pci_controller *hose = pci_bus_to_host(dev->bus);
167 unsigned char irq2pin[16];
168 unsigned long pirq_map_word = 0;
169 u32 irq;
170 int i;
192 171
193 /* 172 /*
194 * ALI1575 interrupts route table setup: 173 * ULI1575 interrupts route setup
174 */
175 memset(irq2pin, 0, 16); /* Initialize default value 0 */
176
177 /*
178 * PIRQA -> PIRQD mapping read from OF-tree
179 *
180 * interrupts for PCI slot0 -- PIRQA / PIRQB / PIRQC / PIRQD
181 * PCI slot1 -- PIRQB / PIRQC / PIRQD / PIRQA
182 */
183 for (i = 0; i < 4; i++){
184 irq = get_pci_irq_from_of(hose, 17, i + 1);
185 if (irq > 0 && irq < 16)
186 irq2pin[irq] = PIRQA + i;
187 else
188 printk(KERN_WARNING "ULI1575 device"
189 "(slot %d, pin %d) irq %d is invalid.\n",
190 17, i, irq);
191 }
192
193 /*
194 * PIRQE -> PIRQF mapping set manually
195 * 195 *
196 * IRQ pin IRQ# 196 * IRQ pin IRQ#
197 * PIRQA ---- 3
198 * PIRQB ---- 4
199 * PIRQC ---- 5
200 * PIRQD ---- 6
201 * PIRQE ---- 9 197 * PIRQE ---- 9
202 * PIRQF ---- 10 198 * PIRQF ---- 10
203 * PIRQG ---- 11 199 * PIRQG ---- 11
204 * PIRQH ---- 12 200 * PIRQH ---- 12
205 *
206 * interrupts for PCI slot0 -- PIRQA / PIRQB / PIRQC / PIRQD
207 * PCI slot1 -- PIRQB / PIRQC / PIRQD / PIRQA
208 */ 201 */
209 pci_write_config_dword(dev, 0x48, 0xb9317542); 202 for (i = 0; i < 4; i++) irq2pin[i + 9] = PIRQE + i;
203
204 /* Set IRQ-PIRQ Mapping to ULI1575 */
205 for (i = 0; i < 16; i++)
206 if (irq2pin[i])
207 pirq_map_word |= (uli1575_irq_route_table[i] & 0xf)
208 << ((irq2pin[i] - PIRQA) * 4);
210 209
211 /* USB 1.1 OHCI controller 1, interrupt: PIRQE */ 210 /* ULI1575 IRQ mapping conf register default value is 0xb9317542 */
212 pci_write_config_byte(dev, 0x86, 0x0c); 211 DBG("Setup ULI1575 IRQ mapping configuration register value = 0x%x\n",
212 pirq_map_word);
213 pci_write_config_dword(dev, 0x48, pirq_map_word);
213 214
214 /* USB 1.1 OHCI controller 2, interrupt: PIRQF */ 215#define ULI1575_SET_DEV_IRQ(slot, pin, reg) \
215 pci_write_config_byte(dev, 0x87, 0x0d); 216 do { \
217 int irq; \
218 irq = get_pci_irq_from_of(hose, slot, pin); \
219 if (irq > 0 && irq < 16) \
220 pci_write_config_byte(dev, reg, irq2pin[irq]); \
221 else \
222 printk(KERN_WARNING "ULI1575 device" \
223 "(slot %d, pin %d) irq %d is invalid.\n", \
224 slot, pin, irq); \
225 } while(0)
216 226
217 /* USB 1.1 OHCI controller 3, interrupt: PIRQH */ 227 /* USB 1.1 OHCI controller 1, slot 28, pin 1 */
218 pci_write_config_byte(dev, 0x88, 0x0f); 228 ULI1575_SET_DEV_IRQ(28, 1, 0x86);
219 229
220 /* USB 2.0 controller, interrupt: PIRQ7 */ 230 /* USB 1.1 OHCI controller 2, slot 28, pin 2 */
221 pci_write_config_byte(dev, 0x74, 0x06); 231 ULI1575_SET_DEV_IRQ(28, 2, 0x87);
222 232
223 /* Audio controller, interrupt: PIRQE */ 233 /* USB 1.1 OHCI controller 3, slot 28, pin 3 */
224 pci_write_config_byte(dev, 0x8a, 0x0c); 234 ULI1575_SET_DEV_IRQ(28, 3, 0x88);
225 235
226 /* Modem controller, interrupt: PIRQF */ 236 /* USB 2.0 controller, slot 28, pin 4 */
227 pci_write_config_byte(dev, 0x8b, 0x0d); 237 irq = get_pci_irq_from_of(hose, 28, 4);
238 if (irq >= 0 && irq <=15)
239 pci_write_config_dword(dev, 0x74, uli1575_irq_route_table[irq]);
228 240
229 /* HD audio controller, interrupt: PIRQG */ 241 /* Audio controller, slot 29, pin 1 */
230 pci_write_config_byte(dev, 0x8c, 0x0e); 242 ULI1575_SET_DEV_IRQ(29, 1, 0x8a);
231 243
232 /* Serial ATA interrupt: PIRQD */ 244 /* Modem controller, slot 29, pin 2 */
233 pci_write_config_byte(dev, 0x8d, 0x0b); 245 ULI1575_SET_DEV_IRQ(29, 2, 0x8b);
234 246
235 /* SMB interrupt: PIRQH */ 247 /* HD audio controller, slot 29, pin 3 */
236 pci_write_config_byte(dev, 0x8e, 0x0f); 248 ULI1575_SET_DEV_IRQ(29, 3, 0x8c);
237 249
238 /* PMU ACPI SCI interrupt: PIRQH */ 250 /* SMB interrupt: slot 30, pin 1 */
239 pci_write_config_byte(dev, 0x8f, 0x0f); 251 ULI1575_SET_DEV_IRQ(30, 1, 0x8e);
252
253 /* PMU ACPI SCI interrupt: slot 30, pin 2 */
254 ULI1575_SET_DEV_IRQ(30, 2, 0x8f);
255
256 /* Serial ATA interrupt: slot 31, pin 1 */
257 ULI1575_SET_DEV_IRQ(31, 1, 0x8d);
240 258
241 /* Primary PATA IDE IRQ: 14 259 /* Primary PATA IDE IRQ: 14
242 * Secondary PATA IDE IRQ: 15 260 * Secondary PATA IDE IRQ: 15
243 */ 261 */
244 pci_write_config_byte(dev, 0x44, 0x3d); 262 pci_write_config_byte(dev, 0x44, 0x30 | uli1575_irq_route_table[14]);
245 pci_write_config_byte(dev, 0x75, 0x0f); 263 pci_write_config_byte(dev, 0x75, uli1575_irq_route_table[15]);
246 264
247 /* Set IRQ14 and IRQ15 to legacy IRQs */ 265 /* Set IRQ14 and IRQ15 to legacy IRQs */
248 pci_read_config_word(dev, 0x46, &temp); 266 pci_read_config_word(dev, 0x46, &temp);
@@ -264,6 +282,8 @@ static void __devinit quirk_ali1575(struct pci_dev *dev)
264 */ 282 */
265 outb(0xfa, 0x4d0); 283 outb(0xfa, 0x4d0);
266 outb(0x1e, 0x4d1); 284 outb(0x1e, 0x4d1);
285
286#undef ULI1575_SET_DEV_IRQ
267} 287}
268 288
269static void __devinit quirk_uli5288(struct pci_dev *dev) 289static void __devinit quirk_uli5288(struct pci_dev *dev)
@@ -306,7 +326,7 @@ static void __devinit early_uli5249(struct pci_dev *dev)
306 dev->class |= 0x1; 326 dev->class |= 0x1;
307} 327}
308 328
309DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_ali1575); 329DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_uli1575);
310DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288); 330DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288);
311DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229); 331DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
312DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249); 332DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249);
@@ -337,8 +357,6 @@ mpc86xx_hpcn_setup_arch(void)
337 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;) 357 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
338 add_bridge(np); 358 add_bridge(np);
339 359
340 ppc_md.pci_swizzle = common_swizzle;
341 ppc_md.pci_map_irq = mpc86xx_map_irq;
342 ppc_md.pci_exclude_device = mpc86xx_exclude_device; 360 ppc_md.pci_exclude_device = mpc86xx_exclude_device;
343#endif 361#endif
344 362
@@ -377,6 +395,15 @@ mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
377} 395}
378 396
379 397
398void __init mpc86xx_hpcn_pcibios_fixup(void)
399{
400 struct pci_dev *dev = NULL;
401
402 for_each_pci_dev(dev)
403 pci_read_irq_line(dev);
404}
405
406
380/* 407/*
381 * Called very early, device-tree isn't unflattened 408 * Called very early, device-tree isn't unflattened
382 */ 409 */
@@ -431,6 +458,7 @@ define_machine(mpc86xx_hpcn) {
431 .setup_arch = mpc86xx_hpcn_setup_arch, 458 .setup_arch = mpc86xx_hpcn_setup_arch,
432 .init_IRQ = mpc86xx_hpcn_init_irq, 459 .init_IRQ = mpc86xx_hpcn_init_irq,
433 .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo, 460 .show_cpuinfo = mpc86xx_hpcn_show_cpuinfo,
461 .pcibios_fixup = mpc86xx_hpcn_pcibios_fixup,
434 .get_irq = mpic_get_irq, 462 .get_irq = mpic_get_irq,
435 .restart = mpc86xx_restart, 463 .restart = mpc86xx_restart,
436 .time_init = mpc86xx_time_init, 464 .time_init = mpc86xx_time_init,
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index d7a4fc7ca238..ed00ed2455dd 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * mpc7448_hpc2.c 2 * mpc7448_hpc2.c
3 * 3 *
4 * Board setup routines for the Freescale Taiga platform 4 * Board setup routines for the Freescale mpc7448hpc2(taiga) platform
5 * 5 *
6 * Author: Jacob Pan 6 * Author: Jacob Pan
7 * jacob.pan@freescale.com 7 * jacob.pan@freescale.com
@@ -12,10 +12,10 @@
12 * 12 *
13 * Copyright 2004-2006 Freescale Semiconductor, Inc. 13 * Copyright 2004-2006 Freescale Semiconductor, Inc.
14 * 14 *
15 * This file is licensed under 15 * This program is free software; you can redistribute it and/or
16 * the terms of the GNU General Public License version 2. This program 16 * modify it under the terms of the GNU General Public License
17 * is licensed "as is" without any warranty of any kind, whether express 17 * as published by the Free Software Foundation; either version
18 * or implied. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21#include <linux/config.h> 21#include <linux/config.h>
@@ -62,43 +62,8 @@ pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET;
62extern int tsi108_setup_pci(struct device_node *dev); 62extern int tsi108_setup_pci(struct device_node *dev);
63extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); 63extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
64extern void tsi108_pci_int_init(void); 64extern void tsi108_pci_int_init(void);
65extern int tsi108_irq_cascade(struct pt_regs *regs, void *unused); 65extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc,
66 66 struct pt_regs *regs);
67/*
68 * Define all of the IRQ senses and polarities. Taken from the
69 * mpc7448hpc manual.
70 * Note: Likely, this table and the following function should be
71 * obtained and derived from the OF Device Tree.
72 */
73
74static u_char mpc7448_hpc2_pic_initsenses[] __initdata = {
75 /* External on-board sources */
76 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[0] XINT0 from FPGA */
77 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[1] XINT1 from FPGA */
78 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[2] PHY_INT from both GIGE */
79 (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[3] RESERVED */
80 /* Internal Tsi108/109 interrupt sources */
81 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
82 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
83 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
84 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
85 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA0 */
86 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA1 */
87 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA2 */
88 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA3 */
89 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* UART0 */
90 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* UART1 */
91 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* I2C */
92 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* GPIO */
93 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* GIGE0 */
94 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* GIGE1 */
95 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
96 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* HLP */
97 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* SDC */
98 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Processor IF */
99 (IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
100 (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* PCI/X block */
101};
102 67
103int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn) 68int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
104{ 69{
@@ -229,6 +194,8 @@ static void __init mpc7448_hpc2_init_IRQ(void)
229{ 194{
230 struct mpic *mpic; 195 struct mpic *mpic;
231 phys_addr_t mpic_paddr = 0; 196 phys_addr_t mpic_paddr = 0;
197 unsigned int cascade_pci_irq;
198 struct device_node *tsi_pci;
232 struct device_node *tsi_pic; 199 struct device_node *tsi_pic;
233 200
234 tsi_pic = of_find_node_by_type(NULL, "open-pic"); 201 tsi_pic = of_find_node_by_type(NULL, "open-pic");
@@ -246,24 +213,31 @@ static void __init mpc7448_hpc2_init_IRQ(void)
246 DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__, 213 DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__,
247 (u32) mpic_paddr); 214 (u32) mpic_paddr);
248 215
249 mpic = mpic_alloc(mpic_paddr, 216 mpic = mpic_alloc(tsi_pic, mpic_paddr,
250 MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET | 217 MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
251 MPIC_SPV_EOI | MPIC_MOD_ID(MPIC_ID_TSI108), 218 MPIC_SPV_EOI | MPIC_MOD_ID(MPIC_ID_TSI108),
252 0, /* num_sources used */ 219 0, /* num_sources used */
253 TSI108_IRQ_BASE,
254 0, /* num_sources used */ 220 0, /* num_sources used */
255 NR_IRQS - 4 /* XXXX */, 221 "Tsi108_PIC");
256 mpc7448_hpc2_pic_initsenses,
257 sizeof(mpc7448_hpc2_pic_initsenses), "Tsi108_PIC");
258 222
259 BUG_ON(mpic == NULL); /* XXXX */ 223 BUG_ON(mpic == NULL); /* XXXX */
260
261 mpic_init(mpic); 224 mpic_init(mpic);
262 mpic_setup_cascade(IRQ_TSI108_PCI, tsi108_irq_cascade, mpic); 225
226 tsi_pci = of_find_node_by_type(NULL, "pci");
227 if (tsi_pci == 0) {
228 printk("%s: No tsi108 pci node found !\n", __FUNCTION__);
229 return;
230 }
231
232 cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
233 set_irq_data(cascade_pci_irq, mpic);
234 set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
235
263 tsi108_pci_int_init(); 236 tsi108_pci_int_init();
264 237
265 /* Configure MPIC outputs to CPU0 */ 238 /* Configure MPIC outputs to CPU0 */
266 tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); 239 tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
240 of_node_put(tsi_pic);
267} 241}
268 242
269void mpc7448_hpc2_show_cpuinfo(struct seq_file *m) 243void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
@@ -320,6 +294,7 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
320 return 0; 294 return 0;
321 295
322} 296}
297
323define_machine(mpc7448_hpc2){ 298define_machine(mpc7448_hpc2){
324 .name = "MPC7448 HPC2", 299 .name = "MPC7448 HPC2",
325 .probe = mpc7448_hpc2_probe, 300 .probe = mpc7448_hpc2_probe,
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 6a026c733f6a..9d73d0234c5d 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -411,8 +411,15 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
411 DBG("End of boot params: %x\n", mem_end); 411 DBG("End of boot params: %x\n", mem_end);
412 rsvmap[0] = mem_start; 412 rsvmap[0] = mem_start;
413 rsvmap[1] = mem_end; 413 rsvmap[1] = mem_end;
414 rsvmap[2] = 0; 414 if (bootx_info->ramDisk) {
415 rsvmap[3] = 0; 415 rsvmap[2] = ((unsigned long)bootx_info) + bootx_info->ramDisk;
416 rsvmap[3] = rsvmap[2] + bootx_info->ramDiskSize;
417 rsvmap[4] = 0;
418 rsvmap[5] = 0;
419 } else {
420 rsvmap[2] = 0;
421 rsvmap[3] = 0;
422 }
416 423
417 return (unsigned long)hdr; 424 return (unsigned long)hdr;
418} 425}
@@ -543,12 +550,12 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
543 */ 550 */
544 if (bi->version < 5) { 551 if (bi->version < 5) {
545 space = bi->deviceTreeOffset + bi->deviceTreeSize; 552 space = bi->deviceTreeOffset + bi->deviceTreeSize;
546 if (bi->ramDisk) 553 if (bi->ramDisk >= space)
547 space = bi->ramDisk + bi->ramDiskSize; 554 space = bi->ramDisk + bi->ramDiskSize;
548 } else 555 } else
549 space = bi->totalParamsSize; 556 space = bi->totalParamsSize;
550 557
551 bootx_printf("Total space used by parameters & ramdisk: %x \n", space); 558 bootx_printf("Total space used by parameters & ramdisk: 0x%x \n", space);
552 559
553 /* New BootX will have flushed all TLBs and enters kernel with 560 /* New BootX will have flushed all TLBs and enters kernel with
554 * MMU switched OFF, so this should not be useful anymore. 561 * MMU switched OFF, so this should not be useful anymore.
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 12b65609c072..ef10bcf2d943 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -85,11 +85,8 @@ static int __init gfar_mdio_of_init(void)
85 mdio_data.irq[k] = -1; 85 mdio_data.irq[k] = -1;
86 86
87 while ((child = of_get_next_child(np, child)) != NULL) { 87 while ((child = of_get_next_child(np, child)) != NULL) {
88 if (child->n_intrs) { 88 u32 *id = get_property(child, "reg", NULL);
89 u32 *id = 89 mdio_data.irq[*id] = irq_of_parse_and_map(child, 0);
90 (u32 *) get_property(child, "reg", NULL);
91 mdio_data.irq[*id] = child->intrs[0].line;
92 }
93 } 90 }
94 91
95 ret = 92 ret =
@@ -131,6 +128,7 @@ static int __init gfar_of_init(void)
131 char *model; 128 char *model;
132 void *mac_addr; 129 void *mac_addr;
133 phandle *ph; 130 phandle *ph;
131 int n_res = 1;
134 132
135 memset(r, 0, sizeof(r)); 133 memset(r, 0, sizeof(r));
136 memset(&gfar_data, 0, sizeof(gfar_data)); 134 memset(&gfar_data, 0, sizeof(gfar_data));
@@ -139,8 +137,7 @@ static int __init gfar_of_init(void)
139 if (ret) 137 if (ret)
140 goto err; 138 goto err;
141 139
142 r[1].start = np->intrs[0].line; 140 r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
143 r[1].end = np->intrs[0].line;
144 r[1].flags = IORESOURCE_IRQ; 141 r[1].flags = IORESOURCE_IRQ;
145 142
146 model = get_property(np, "model", NULL); 143 model = get_property(np, "model", NULL);
@@ -150,19 +147,19 @@ static int __init gfar_of_init(void)
150 r[1].name = gfar_tx_intr; 147 r[1].name = gfar_tx_intr;
151 148
152 r[2].name = gfar_rx_intr; 149 r[2].name = gfar_rx_intr;
153 r[2].start = np->intrs[1].line; 150 r[2].start = r[2].end = irq_of_parse_and_map(np, 1);
154 r[2].end = np->intrs[1].line;
155 r[2].flags = IORESOURCE_IRQ; 151 r[2].flags = IORESOURCE_IRQ;
156 152
157 r[3].name = gfar_err_intr; 153 r[3].name = gfar_err_intr;
158 r[3].start = np->intrs[2].line; 154 r[3].start = r[3].end = irq_of_parse_and_map(np, 2);
159 r[3].end = np->intrs[2].line;
160 r[3].flags = IORESOURCE_IRQ; 155 r[3].flags = IORESOURCE_IRQ;
156
157 n_res += 2;
161 } 158 }
162 159
163 gfar_dev = 160 gfar_dev =
164 platform_device_register_simple("fsl-gianfar", i, &r[0], 161 platform_device_register_simple("fsl-gianfar", i, &r[0],
165 np->n_intrs + 1); 162 n_res + 1);
166 163
167 if (IS_ERR(gfar_dev)) { 164 if (IS_ERR(gfar_dev)) {
168 ret = PTR_ERR(gfar_dev); 165 ret = PTR_ERR(gfar_dev);
@@ -259,8 +256,7 @@ static int __init fsl_i2c_of_init(void)
259 if (ret) 256 if (ret)
260 goto err; 257 goto err;
261 258
262 r[1].start = np->intrs[0].line; 259 r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
263 r[1].end = np->intrs[0].line;
264 r[1].flags = IORESOURCE_IRQ; 260 r[1].flags = IORESOURCE_IRQ;
265 261
266 i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2); 262 i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2);
@@ -396,8 +392,7 @@ static int __init fsl_usb_of_init(void)
396 if (ret) 392 if (ret)
397 goto err; 393 goto err;
398 394
399 r[1].start = np->intrs[0].line; 395 r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
400 r[1].end = np->intrs[0].line;
401 r[1].flags = IORESOURCE_IRQ; 396 r[1].flags = IORESOURCE_IRQ;
402 397
403 usb_dev_mph = 398 usb_dev_mph =
@@ -445,8 +440,7 @@ static int __init fsl_usb_of_init(void)
445 if (ret) 440 if (ret)
446 goto unreg_mph; 441 goto unreg_mph;
447 442
448 r[1].start = np->intrs[0].line; 443 r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
449 r[1].end = np->intrs[0].line;
450 r[1].flags = IORESOURCE_IRQ; 444 r[1].flags = IORESOURCE_IRQ;
451 445
452 usb_dev_dr = 446 usb_dev_dr =
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index 26a0cc820cde..f3038461d4c0 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -93,13 +93,15 @@ static int __init tsi108_eth_of_init(void)
93 goto err; 93 goto err;
94 94
95 r[1].name = "tx"; 95 r[1].name = "tx";
96 r[1].start = np->intrs[0].line; 96 r[1].start = irq_of_parse_and_map(np, 0);
97 r[1].end = np->intrs[0].line; 97 r[1].end = irq_of_parse_and_map(np, 0);
98 r[1].flags = IORESOURCE_IRQ; 98 r[1].flags = IORESOURCE_IRQ;
99 DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
100 __FUNCTION__,r[1].name, r[1].start, r[1].end);
99 101
100 tsi_eth_dev = 102 tsi_eth_dev =
101 platform_device_register_simple("tsi-ethernet", i, &r[0], 103 platform_device_register_simple("tsi-ethernet", i, &r[0],
102 np->n_intrs + 1); 104 1);
103 105
104 if (IS_ERR(tsi_eth_dev)) { 106 if (IS_ERR(tsi_eth_dev)) {
105 ret = PTR_ERR(tsi_eth_dev); 107 ret = PTR_ERR(tsi_eth_dev);
@@ -127,7 +129,7 @@ static int __init tsi108_eth_of_init(void)
127 tsi_eth_data.regs = r[0].start; 129 tsi_eth_data.regs = r[0].start;
128 tsi_eth_data.phyregs = res.start; 130 tsi_eth_data.phyregs = res.start;
129 tsi_eth_data.phy = *phy_id; 131 tsi_eth_data.phy = *phy_id;
130 tsi_eth_data.irq_num = np->intrs[0].line; 132 tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0);
131 of_node_put(phy); 133 of_node_put(phy);
132 ret = 134 ret =
133 platform_device_add_data(tsi_eth_dev, &tsi_eth_data, 135 platform_device_add_data(tsi_eth_dev, &tsi_eth_data,
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 3265d54c82ed..2ab06ed3ae73 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -26,7 +26,6 @@
26#include <linux/irq.h> 26#include <linux/irq.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28 28
29
30#include <asm/byteorder.h> 29#include <asm/byteorder.h>
31#include <asm/io.h> 30#include <asm/io.h>
32#include <asm/irq.h> 31#include <asm/irq.h>
@@ -228,7 +227,7 @@ int __init tsi108_setup_pci(struct device_node *dev)
228 227
229 (hose)->ops = &tsi108_direct_pci_ops; 228 (hose)->ops = &tsi108_direct_pci_ops;
230 229
231 printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08lx. " 230 printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. "
232 "Firmware bus number: %d->%d\n", 231 "Firmware bus number: %d->%d\n",
233 rsrc.start, hose->first_busno, hose->last_busno); 232 rsrc.start, hose->first_busno, hose->last_busno);
234 233
@@ -278,7 +277,7 @@ static void init_pci_source(void)
278 mb(); 277 mb();
279} 278}
280 279
281static inline int get_pci_source(void) 280static inline unsigned int get_pci_source(void)
282{ 281{
283 u_int temp = 0; 282 u_int temp = 0;
284 int irq = -1; 283 int irq = -1;
@@ -371,12 +370,12 @@ static void tsi108_pci_irq_end(u_int irq)
371 * Interrupt controller descriptor for cascaded PCI interrupt controller. 370 * Interrupt controller descriptor for cascaded PCI interrupt controller.
372 */ 371 */
373 372
374struct hw_interrupt_type tsi108_pci_irq = { 373static struct irq_chip tsi108_pci_irq = {
375 .typename = "tsi108_PCI_int", 374 .typename = "tsi108_PCI_int",
376 .enable = tsi108_pci_irq_enable, 375 .mask = tsi108_pci_irq_disable,
377 .disable = tsi108_pci_irq_disable,
378 .ack = tsi108_pci_irq_ack, 376 .ack = tsi108_pci_irq_ack,
379 .end = tsi108_pci_irq_end, 377 .end = tsi108_pci_irq_end,
378 .unmask = tsi108_pci_irq_enable,
380}; 379};
381 380
382/* 381/*
@@ -399,14 +398,18 @@ void __init tsi108_pci_int_init(void)
399 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); 398 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
400 399
401 for (i = 0; i < NUM_PCI_IRQS; i++) { 400 for (i = 0; i < NUM_PCI_IRQS; i++) {
402 irq_desc[i + IRQ_PCI_INTAD_BASE].handler = &tsi108_pci_irq; 401 irq_desc[i + IRQ_PCI_INTAD_BASE].chip = &tsi108_pci_irq;
403 irq_desc[i + IRQ_PCI_INTAD_BASE].status |= IRQ_LEVEL; 402 irq_desc[i + IRQ_PCI_INTAD_BASE].status |= IRQ_LEVEL;
404 } 403 }
405 404
406 init_pci_source(); 405 init_pci_source();
407} 406}
408 407
409int tsi108_irq_cascade(struct pt_regs *regs, void *unused) 408void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc,
409 struct pt_regs *regs)
410{ 410{
411 return get_pci_source(); 411 unsigned int cascade_irq = get_pci_source();
412 if (cascade_irq != NO_IRQ)
413 generic_handle_irq(cascade_irq, regs);
414 desc->chip->eoi(irq);
412} 415}
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index 35488d6c7457..0251cab4708b 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -348,9 +348,9 @@ void __init setup_arch(char **cmdline_p)
348 init_mm.context = (unsigned long) NO_CONTEXT; 348 init_mm.context = (unsigned long) NO_CONTEXT;
349 init_task.thread.kregs = &fake_swapper_regs; 349 init_task.thread.kregs = &fake_swapper_regs;
350 350
351 smp_setup_cpu_possible_map();
352
353 paging_init(); 351 paging_init();
352
353 smp_setup_cpu_possible_map();
354} 354}
355 355
356static int __init set_preferred_console(void) 356static int __init set_preferred_console(void)
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index e311ade1b490..276f22881d0f 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -34,7 +34,6 @@
34#include <asm/tlbflush.h> 34#include <asm/tlbflush.h>
35#include <asm/cpudata.h> 35#include <asm/cpudata.h>
36 36
37volatile int smp_processors_ready = 0;
38int smp_num_cpus = 1; 37int smp_num_cpus = 1;
39volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,}; 38volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
40unsigned char boot_cpu_id = 0; 39unsigned char boot_cpu_id = 0;
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index ba843f6a2832..3ff4edd32815 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -42,7 +42,7 @@ extern ctxd_t *srmmu_ctx_table_phys;
42 42
43extern void calibrate_delay(void); 43extern void calibrate_delay(void);
44 44
45extern volatile int smp_processors_ready; 45static volatile int smp_processors_ready = 0;
46static int smp_highest_cpu; 46static int smp_highest_cpu;
47extern volatile unsigned long cpu_callin_map[NR_CPUS]; 47extern volatile unsigned long cpu_callin_map[NR_CPUS];
48extern cpuinfo_sparc cpu_data[NR_CPUS]; 48extern cpuinfo_sparc cpu_data[NR_CPUS];
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 3b32096134aa..7d4a649138f6 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -39,7 +39,6 @@ extern ctxd_t *srmmu_ctx_table_phys;
39 39
40extern void calibrate_delay(void); 40extern void calibrate_delay(void);
41 41
42extern volatile int smp_processors_ready;
43extern volatile unsigned long cpu_callin_map[NR_CPUS]; 42extern volatile unsigned long cpu_callin_map[NR_CPUS];
44extern unsigned char boot_cpu_id; 43extern unsigned char boot_cpu_id;
45 44
@@ -217,7 +216,6 @@ void __init smp4m_smp_done(void)
217 } 216 }
218 217
219 /* Ok, they are spinning and ready to go. */ 218 /* Ok, they are spinning and ready to go. */
220 smp_processors_ready = 1;
221} 219}
222 220
223/* At each hardware IRQ, we get this called to forward IRQ reception 221/* At each hardware IRQ, we get this called to forward IRQ reception
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index aae3123bf3ee..3a3aee08ec5f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1561,7 +1561,7 @@ restart:
1561 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1561 /* ->key must be copied to avoid race with cfq_exit_queue() */
1562 k = __cic->key; 1562 k = __cic->key;
1563 if (unlikely(!k)) { 1563 if (unlikely(!k)) {
1564 cfq_drop_dead_cic(ioc, cic); 1564 cfq_drop_dead_cic(ioc, __cic);
1565 goto restart; 1565 goto restart;
1566 } 1566 }
1567 1567
diff --git a/block/elevator.c b/block/elevator.c
index bc7baeec0d10..9b72dc7c8a5c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -765,7 +765,8 @@ void elv_unregister(struct elevator_type *e)
765 read_lock(&tasklist_lock); 765 read_lock(&tasklist_lock);
766 do_each_thread(g, p) { 766 do_each_thread(g, p) {
767 task_lock(p); 767 task_lock(p);
768 e->ops.trim(p->io_context); 768 if (p->io_context)
769 e->ops.trim(p->io_context);
769 task_unlock(p); 770 task_unlock(p);
770 } while_each_thread(g, p); 771 } while_each_thread(g, p);
771 read_unlock(&tasklist_lock); 772 read_unlock(&tasklist_lock);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 61d6b3c65b66..ddd9253f9d55 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3628,6 +3628,8 @@ struct io_context *current_io_context(gfp_t gfp_flags)
3628 ret->nr_batch_requests = 0; /* because this is 0 */ 3628 ret->nr_batch_requests = 0; /* because this is 0 */
3629 ret->aic = NULL; 3629 ret->aic = NULL;
3630 ret->cic_root.rb_node = NULL; 3630 ret->cic_root.rb_node = NULL;
3631 /* make sure set_task_ioprio() sees the settings above */
3632 smp_wmb();
3631 tsk->io_context = ret; 3633 tsk->io_context = ret;
3632 } 3634 }
3633 3635
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 96309b9660da..11abc7bf777e 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -285,6 +285,8 @@ static int __init acpi_ac_init(void)
285{ 285{
286 int result; 286 int result;
287 287
288 if (acpi_disabled)
289 return -ENODEV;
288 290
289 acpi_ac_dir = acpi_lock_ac_dir(); 291 acpi_ac_dir = acpi_lock_ac_dir();
290 if (!acpi_ac_dir) 292 if (!acpi_ac_dir)
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index b0d4b147b19e..1dda370f402b 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -484,10 +484,8 @@ acpi_memory_register_notify_handler(acpi_handle handle,
484 484
485 485
486 status = is_memory_device(handle); 486 status = is_memory_device(handle);
487 if (ACPI_FAILURE(status)){ 487 if (ACPI_FAILURE(status))
488 ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
489 return AE_OK; /* continue */ 488 return AE_OK; /* continue */
490 }
491 489
492 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, 490 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
493 acpi_memory_device_notify, NULL); 491 acpi_memory_device_notify, NULL);
@@ -503,10 +501,8 @@ acpi_memory_deregister_notify_handler(acpi_handle handle,
503 501
504 502
505 status = is_memory_device(handle); 503 status = is_memory_device(handle);
506 if (ACPI_FAILURE(status)){ 504 if (ACPI_FAILURE(status))
507 ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
508 return AE_OK; /* continue */ 505 return AE_OK; /* continue */
509 }
510 506
511 status = acpi_remove_notify_handler(handle, 507 status = acpi_remove_notify_handler(handle,
512 ACPI_SYSTEM_NOTIFY, 508 ACPI_SYSTEM_NOTIFY,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6e5221707d97..9810e2a55d0a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -757,6 +757,9 @@ static int __init acpi_battery_init(void)
757{ 757{
758 int result; 758 int result;
759 759
760 if (acpi_disabled)
761 return -ENODEV;
762
760 acpi_battery_dir = acpi_lock_battery_dir(); 763 acpi_battery_dir = acpi_lock_battery_dir();
761 if (!acpi_battery_dir) 764 if (!acpi_battery_dir)
762 return -ENODEV; 765 return -ENODEV;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index b2977695e120..279c4bac92e5 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -25,6 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <linux/kernel.h>
28#include <linux/list.h> 29#include <linux/list.h>
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include <linux/pm.h> 31#include <linux/pm.h>
@@ -68,7 +69,8 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
68 69
69 status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device); 70 status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
70 if (ACPI_FAILURE(status) || !*device) { 71 if (ACPI_FAILURE(status) || !*device) {
71 ACPI_EXCEPTION((AE_INFO, status, "No context for object [%p]", handle)); 72 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
73 handle));
72 return -ENODEV; 74 return -ENODEV;
73 } 75 }
74 76
@@ -192,7 +194,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
192 /* Make sure this is a valid target state */ 194 /* Make sure this is a valid target state */
193 195
194 if (!device->flags.power_manageable) { 196 if (!device->flags.power_manageable) {
195 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable", 197 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
196 device->kobj.name)); 198 device->kobj.name));
197 return -ENODEV; 199 return -ENODEV;
198 } 200 }
@@ -738,7 +740,10 @@ static int __init acpi_init(void)
738 return -ENODEV; 740 return -ENODEV;
739 } 741 }
740 742
741 firmware_register(&acpi_subsys); 743 result = firmware_register(&acpi_subsys);
744 if (result < 0)
745 printk(KERN_WARNING "%s: firmware_register error: %d\n",
746 __FUNCTION__, result);
742 747
743 result = acpi_bus_init(); 748 result = acpi_bus_init();
744 749
diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c
index 32c9d88fd196..1ba2db671865 100644
--- a/drivers/acpi/hotkey.c
+++ b/drivers/acpi/hotkey.c
@@ -91,6 +91,14 @@ enum {
91 HK_EVENT_ENTERRING_S5, 91 HK_EVENT_ENTERRING_S5,
92}; 92};
93 93
94enum conf_entry_enum {
95 bus_handle = 0,
96 bus_method = 1,
97 action_handle = 2,
98 method = 3,
99 LAST_CONF_ENTRY
100};
101
94/* procdir we use */ 102/* procdir we use */
95static struct proc_dir_entry *hotkey_proc_dir; 103static struct proc_dir_entry *hotkey_proc_dir;
96static struct proc_dir_entry *hotkey_config; 104static struct proc_dir_entry *hotkey_config;
@@ -244,19 +252,15 @@ static int hotkey_info_open_fs(struct inode *inode, struct file *file)
244 252
245static char *format_result(union acpi_object *object) 253static char *format_result(union acpi_object *object)
246{ 254{
247 char *buf = NULL; 255 char *buf;
248
249 buf = (char *)kmalloc(RESULT_STR_LEN, GFP_KERNEL);
250 if (buf)
251 memset(buf, 0, RESULT_STR_LEN);
252 else
253 goto do_fail;
254 256
257 buf = kzalloc(RESULT_STR_LEN, GFP_KERNEL);
258 if (!buf)
259 return NULL;
255 /* Now, just support integer type */ 260 /* Now, just support integer type */
256 if (object->type == ACPI_TYPE_INTEGER) 261 if (object->type == ACPI_TYPE_INTEGER)
257 sprintf(buf, "%d\n", (u32) object->integer.value); 262 sprintf(buf, "%d\n", (u32) object->integer.value);
258 do_fail: 263 return buf;
259 return (buf);
260} 264}
261 265
262static int hotkey_polling_seq_show(struct seq_file *seq, void *offset) 266static int hotkey_polling_seq_show(struct seq_file *seq, void *offset)
@@ -486,98 +490,102 @@ static void free_hotkey_device(union acpi_hotkey *key)
486 490
487static void free_hotkey_buffer(union acpi_hotkey *key) 491static void free_hotkey_buffer(union acpi_hotkey *key)
488{ 492{
493 /* key would never be null, action method could be */
489 kfree(key->event_hotkey.action_method); 494 kfree(key->event_hotkey.action_method);
490} 495}
491 496
492static void free_poll_hotkey_buffer(union acpi_hotkey *key) 497static void free_poll_hotkey_buffer(union acpi_hotkey *key)
493{ 498{
499 /* key would never be null, others could be*/
494 kfree(key->poll_hotkey.action_method); 500 kfree(key->poll_hotkey.action_method);
495 kfree(key->poll_hotkey.poll_method); 501 kfree(key->poll_hotkey.poll_method);
496 kfree(key->poll_hotkey.poll_result); 502 kfree(key->poll_hotkey.poll_result);
497} 503}
498static int 504static int
499init_hotkey_device(union acpi_hotkey *key, char *bus_str, char *action_str, 505init_hotkey_device(union acpi_hotkey *key, char **config_entry,
500 char *method, int std_num, int external_num) 506 int std_num, int external_num)
501{ 507{
502 acpi_handle tmp_handle; 508 acpi_handle tmp_handle;
503 acpi_status status = AE_OK; 509 acpi_status status = AE_OK;
504 510
505
506 if (std_num < 0 || IS_POLL(std_num) || !key) 511 if (std_num < 0 || IS_POLL(std_num) || !key)
507 goto do_fail; 512 goto do_fail;
508 513
509 if (!bus_str || !action_str || !method) 514 if (!config_entry[bus_handle] || !config_entry[action_handle]
515 || !config_entry[method])
510 goto do_fail; 516 goto do_fail;
511 517
512 key->link.hotkey_type = ACPI_HOTKEY_EVENT; 518 key->link.hotkey_type = ACPI_HOTKEY_EVENT;
513 key->link.hotkey_standard_num = std_num; 519 key->link.hotkey_standard_num = std_num;
514 key->event_hotkey.flag = 0; 520 key->event_hotkey.flag = 0;
515 key->event_hotkey.action_method = method; 521 key->event_hotkey.action_method = config_entry[method];
516 522
517 status = 523 status = acpi_get_handle(NULL, config_entry[bus_handle],
518 acpi_get_handle(NULL, bus_str, &(key->event_hotkey.bus_handle)); 524 &(key->event_hotkey.bus_handle));
519 if (ACPI_FAILURE(status)) 525 if (ACPI_FAILURE(status))
520 goto do_fail; 526 goto do_fail_zero;
521 key->event_hotkey.external_hotkey_num = external_num; 527 key->event_hotkey.external_hotkey_num = external_num;
522 status = 528 status = acpi_get_handle(NULL, config_entry[action_handle],
523 acpi_get_handle(NULL, action_str,
524 &(key->event_hotkey.action_handle)); 529 &(key->event_hotkey.action_handle));
525 if (ACPI_FAILURE(status)) 530 if (ACPI_FAILURE(status))
526 goto do_fail; 531 goto do_fail_zero;
527 status = acpi_get_handle(key->event_hotkey.action_handle, 532 status = acpi_get_handle(key->event_hotkey.action_handle,
528 method, &tmp_handle); 533 config_entry[method], &tmp_handle);
529 if (ACPI_FAILURE(status)) 534 if (ACPI_FAILURE(status))
530 goto do_fail; 535 goto do_fail_zero;
531 return AE_OK; 536 return AE_OK;
532 do_fail: 537do_fail_zero:
538 key->event_hotkey.action_method = NULL;
539do_fail:
533 return -ENODEV; 540 return -ENODEV;
534} 541}
535 542
536static int 543static int
537init_poll_hotkey_device(union acpi_hotkey *key, 544init_poll_hotkey_device(union acpi_hotkey *key, char **config_entry,
538 char *poll_str, 545 int std_num)
539 char *poll_method,
540 char *action_str, char *action_method, int std_num)
541{ 546{
542 acpi_status status = AE_OK; 547 acpi_status status = AE_OK;
543 acpi_handle tmp_handle; 548 acpi_handle tmp_handle;
544 549
545
546 if (std_num < 0 || IS_EVENT(std_num) || !key) 550 if (std_num < 0 || IS_EVENT(std_num) || !key)
547 goto do_fail; 551 goto do_fail;
548 552 if (!config_entry[bus_handle] ||!config_entry[bus_method] ||
549 if (!poll_str || !poll_method || !action_str || !action_method) 553 !config_entry[action_handle] || !config_entry[method])
550 goto do_fail; 554 goto do_fail;
551 555
552 key->link.hotkey_type = ACPI_HOTKEY_POLLING; 556 key->link.hotkey_type = ACPI_HOTKEY_POLLING;
553 key->link.hotkey_standard_num = std_num; 557 key->link.hotkey_standard_num = std_num;
554 key->poll_hotkey.flag = 0; 558 key->poll_hotkey.flag = 0;
555 key->poll_hotkey.poll_method = poll_method; 559 key->poll_hotkey.poll_method = config_entry[bus_method];
556 key->poll_hotkey.action_method = action_method; 560 key->poll_hotkey.action_method = config_entry[method];
557 561
558 status = 562 status = acpi_get_handle(NULL, config_entry[bus_handle],
559 acpi_get_handle(NULL, poll_str, &(key->poll_hotkey.poll_handle)); 563 &(key->poll_hotkey.poll_handle));
560 if (ACPI_FAILURE(status)) 564 if (ACPI_FAILURE(status))
561 goto do_fail; 565 goto do_fail_zero;
562 status = acpi_get_handle(key->poll_hotkey.poll_handle, 566 status = acpi_get_handle(key->poll_hotkey.poll_handle,
563 poll_method, &tmp_handle); 567 config_entry[bus_method], &tmp_handle);
564 if (ACPI_FAILURE(status)) 568 if (ACPI_FAILURE(status))
565 goto do_fail; 569 goto do_fail_zero;
566 status = 570 status =
567 acpi_get_handle(NULL, action_str, 571 acpi_get_handle(NULL, config_entry[action_handle],
568 &(key->poll_hotkey.action_handle)); 572 &(key->poll_hotkey.action_handle));
569 if (ACPI_FAILURE(status)) 573 if (ACPI_FAILURE(status))
570 goto do_fail; 574 goto do_fail_zero;
571 status = acpi_get_handle(key->poll_hotkey.action_handle, 575 status = acpi_get_handle(key->poll_hotkey.action_handle,
572 action_method, &tmp_handle); 576 config_entry[method], &tmp_handle);
573 if (ACPI_FAILURE(status)) 577 if (ACPI_FAILURE(status))
574 goto do_fail; 578 goto do_fail_zero;
575 key->poll_hotkey.poll_result = 579 key->poll_hotkey.poll_result =
576 (union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL); 580 (union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL);
577 if (!key->poll_hotkey.poll_result) 581 if (!key->poll_hotkey.poll_result)
578 goto do_fail; 582 goto do_fail_zero;
579 return AE_OK; 583 return AE_OK;
580 do_fail: 584
585do_fail_zero:
586 key->poll_hotkey.poll_method = NULL;
587 key->poll_hotkey.action_method = NULL;
588do_fail:
581 return -ENODEV; 589 return -ENODEV;
582} 590}
583 591
@@ -652,17 +660,18 @@ static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset)
652} 660}
653 661
654static int 662static int
655get_parms(char *config_record, 663get_parms(char *config_record, int *cmd, char **config_entry,
656 int *cmd, 664 int *internal_event_num, int *external_event_num)
657 char **bus_handle,
658 char **bus_method,
659 char **action_handle,
660 char **method, int *internal_event_num, int *external_event_num)
661{ 665{
666/* the format of *config_record =
667 * "1:\d+:*" : "cmd:internal_event_num"
668 * "\d+:\w+:\w+:\w+:\w+:\d+:\d+" :
669 * "cmd:bus_handle:bus_method:action_handle:method:internal_event_num:external_event_num"
670 */
662 char *tmp, *tmp1, count; 671 char *tmp, *tmp1, count;
672 int i;
663 673
664 sscanf(config_record, "%d", cmd); 674 sscanf(config_record, "%d", cmd);
665
666 if (*cmd == 1) { 675 if (*cmd == 1) {
667 if (sscanf(config_record, "%d:%d", cmd, internal_event_num) != 676 if (sscanf(config_record, "%d:%d", cmd, internal_event_num) !=
668 2) 677 2)
@@ -674,59 +683,27 @@ get_parms(char *config_record,
674 if (!tmp) 683 if (!tmp)
675 goto do_fail; 684 goto do_fail;
676 tmp++; 685 tmp++;
677 tmp1 = strchr(tmp, ':'); 686 for (i = 0; i < LAST_CONF_ENTRY; i++) {
678 if (!tmp1) 687 tmp1 = strchr(tmp, ':');
679 goto do_fail; 688 if (!tmp1) {
680 689 goto do_fail;
681 count = tmp1 - tmp; 690 }
682 *bus_handle = (char *)kmalloc(count + 1, GFP_KERNEL); 691 count = tmp1 - tmp;
683 if (!*bus_handle) 692 config_entry[i] = kzalloc(count + 1, GFP_KERNEL);
684 goto do_fail; 693 if (!config_entry[i])
685 strncpy(*bus_handle, tmp, count); 694 goto handle_failure;
686 *(*bus_handle + count) = 0; 695 strncpy(config_entry[i], tmp, count);
687 696 tmp = tmp1 + 1;
688 tmp = tmp1; 697 }
689 tmp++; 698 if (sscanf(tmp, "%d:%d", internal_event_num, external_event_num) <= 0)
690 tmp1 = strchr(tmp, ':'); 699 goto handle_failure;
691 if (!tmp1) 700 if (!IS_OTHERS(*internal_event_num)) {
692 goto do_fail; 701 return 6;
693 count = tmp1 - tmp; 702 }
694 *bus_method = (char *)kmalloc(count + 1, GFP_KERNEL); 703handle_failure:
695 if (!*bus_method) 704 while (i-- > 0)
696 goto do_fail; 705 kfree(config_entry[i]);
697 strncpy(*bus_method, tmp, count); 706do_fail:
698 *(*bus_method + count) = 0;
699
700 tmp = tmp1;
701 tmp++;
702 tmp1 = strchr(tmp, ':');
703 if (!tmp1)
704 goto do_fail;
705 count = tmp1 - tmp;
706 *action_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
707 if (!*action_handle)
708 goto do_fail;
709 strncpy(*action_handle, tmp, count);
710 *(*action_handle + count) = 0;
711
712 tmp = tmp1;
713 tmp++;
714 tmp1 = strchr(tmp, ':');
715 if (!tmp1)
716 goto do_fail;
717 count = tmp1 - tmp;
718 *method = (char *)kmalloc(count + 1, GFP_KERNEL);
719 if (!*method)
720 goto do_fail;
721 strncpy(*method, tmp, count);
722 *(*method + count) = 0;
723
724 if (sscanf(tmp1 + 1, "%d:%d", internal_event_num, external_event_num) <=
725 0)
726 goto do_fail;
727
728 return 6;
729 do_fail:
730 return -1; 707 return -1;
731} 708}
732 709
@@ -736,50 +713,34 @@ static ssize_t hotkey_write_config(struct file *file,
736 size_t count, loff_t * data) 713 size_t count, loff_t * data)
737{ 714{
738 char *config_record = NULL; 715 char *config_record = NULL;
739 char *bus_handle = NULL; 716 char *config_entry[LAST_CONF_ENTRY];
740 char *bus_method = NULL;
741 char *action_handle = NULL;
742 char *method = NULL;
743 int cmd, internal_event_num, external_event_num; 717 int cmd, internal_event_num, external_event_num;
744 int ret = 0; 718 int ret = 0;
745 union acpi_hotkey *key = NULL; 719 union acpi_hotkey *key = kzalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
746 720
721 if (!key)
722 return -ENOMEM;
747 723
748 config_record = (char *)kmalloc(count + 1, GFP_KERNEL); 724 config_record = kzalloc(count + 1, GFP_KERNEL);
749 if (!config_record) 725 if (!config_record) {
726 kfree(key);
750 return -ENOMEM; 727 return -ENOMEM;
728 }
751 729
752 if (copy_from_user(config_record, buffer, count)) { 730 if (copy_from_user(config_record, buffer, count)) {
753 kfree(config_record); 731 kfree(config_record);
732 kfree(key);
754 printk(KERN_ERR PREFIX "Invalid data\n"); 733 printk(KERN_ERR PREFIX "Invalid data\n");
755 return -EINVAL; 734 return -EINVAL;
756 } 735 }
757 config_record[count] = 0; 736 ret = get_parms(config_record, &cmd, config_entry,
758 737 &internal_event_num, &external_event_num);
759 ret = get_parms(config_record,
760 &cmd,
761 &bus_handle,
762 &bus_method,
763 &action_handle,
764 &method, &internal_event_num, &external_event_num);
765
766 kfree(config_record); 738 kfree(config_record);
767 if (IS_OTHERS(internal_event_num))
768 goto do_fail;
769 if (ret != 6) { 739 if (ret != 6) {
770 do_fail:
771 kfree(bus_handle);
772 kfree(bus_method);
773 kfree(action_handle);
774 kfree(method);
775 printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret); 740 printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret);
776 return -EINVAL; 741 return -EINVAL;
777 } 742 }
778 743
779 key = kmalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
780 if (!key)
781 goto do_fail;
782 memset(key, 0, sizeof(union acpi_hotkey));
783 if (cmd == 1) { 744 if (cmd == 1) {
784 union acpi_hotkey *tmp = NULL; 745 union acpi_hotkey *tmp = NULL;
785 tmp = get_hotkey_by_event(&global_hotkey_list, 746 tmp = get_hotkey_by_event(&global_hotkey_list,
@@ -791,34 +752,19 @@ static ssize_t hotkey_write_config(struct file *file,
791 goto cont_cmd; 752 goto cont_cmd;
792 } 753 }
793 if (IS_EVENT(internal_event_num)) { 754 if (IS_EVENT(internal_event_num)) {
794 kfree(bus_method); 755 if (init_hotkey_device(key, config_entry,
795 ret = init_hotkey_device(key, bus_handle, action_handle, method, 756 internal_event_num, external_event_num))
796 internal_event_num, 757 goto init_hotkey_fail;
797 external_event_num); 758 } else {
798 } else 759 if (init_poll_hotkey_device(key, config_entry,
799 ret = init_poll_hotkey_device(key, bus_handle, bus_method, 760 internal_event_num))
800 action_handle, method, 761 goto init_poll_hotkey_fail;
801 internal_event_num);
802 if (ret) {
803 kfree(bus_handle);
804 kfree(action_handle);
805 if (IS_EVENT(internal_event_num))
806 free_hotkey_buffer(key);
807 else
808 free_poll_hotkey_buffer(key);
809 kfree(key);
810 printk(KERN_ERR PREFIX "Invalid hotkey\n");
811 return -EINVAL;
812 } 762 }
813 763cont_cmd:
814 cont_cmd:
815 kfree(bus_handle);
816 kfree(action_handle);
817
818 switch (cmd) { 764 switch (cmd) {
819 case 0: 765 case 0:
820 if (get_hotkey_by_event 766 if (get_hotkey_by_event(&global_hotkey_list,
821 (&global_hotkey_list, key->link.hotkey_standard_num)) 767 key->link.hotkey_standard_num))
822 goto fail_out; 768 goto fail_out;
823 else 769 else
824 hotkey_add(key); 770 hotkey_add(key);
@@ -827,6 +773,7 @@ static ssize_t hotkey_write_config(struct file *file,
827 hotkey_remove(key); 773 hotkey_remove(key);
828 break; 774 break;
829 case 2: 775 case 2:
776 /* key is kfree()ed if matched*/
830 if (hotkey_update(key)) 777 if (hotkey_update(key))
831 goto fail_out; 778 goto fail_out;
832 break; 779 break;
@@ -835,11 +782,22 @@ static ssize_t hotkey_write_config(struct file *file,
835 break; 782 break;
836 } 783 }
837 return count; 784 return count;
838 fail_out: 785
839 if (IS_EVENT(internal_event_num)) 786init_poll_hotkey_fail: /* failed init_poll_hotkey_device */
840 free_hotkey_buffer(key); 787 kfree(config_entry[bus_method]);
841 else 788 config_entry[bus_method] = NULL;
842 free_poll_hotkey_buffer(key); 789init_hotkey_fail: /* failed init_hotkey_device */
790 kfree(config_entry[method]);
791fail_out:
792 kfree(config_entry[bus_handle]);
793 kfree(config_entry[action_handle]);
794 /* No double free since elements =NULL for error cases */
795 if (IS_EVENT(internal_event_num)) {
796 if (config_entry[bus_method])
797 kfree(config_entry[bus_method]);
798 free_hotkey_buffer(key); /* frees [method] */
799 } else
800 free_poll_hotkey_buffer(key); /* frees [bus_method]+[method] */
843 kfree(key); 801 kfree(key);
844 printk(KERN_ERR PREFIX "invalid key\n"); 802 printk(KERN_ERR PREFIX "invalid key\n");
845 return -EINVAL; 803 return -EINVAL;
@@ -923,10 +881,9 @@ static ssize_t hotkey_execute_aml_method(struct file *file,
923 union acpi_hotkey *key; 881 union acpi_hotkey *key;
924 882
925 883
926 arg = (char *)kmalloc(count + 1, GFP_KERNEL); 884 arg = kzalloc(count + 1, GFP_KERNEL);
927 if (!arg) 885 if (!arg)
928 return -ENOMEM; 886 return -ENOMEM;
929 arg[count] = 0;
930 887
931 if (copy_from_user(arg, buffer, count)) { 888 if (copy_from_user(arg, buffer, count)) {
932 kfree(arg); 889 kfree(arg);
diff --git a/drivers/acpi/i2c_ec.c b/drivers/acpi/i2c_ec.c
index 84239d51dc0c..6809c283ec58 100644
--- a/drivers/acpi/i2c_ec.c
+++ b/drivers/acpi/i2c_ec.c
@@ -330,7 +330,7 @@ static int acpi_ec_hc_add(struct acpi_device *device)
330 status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val); 330 status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val);
331 if (ACPI_FAILURE(status)) { 331 if (ACPI_FAILURE(status)) {
332 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n")); 332 ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n"));
333 kfree(ec_hc->smbus); 333 kfree(ec_hc);
334 kfree(smbus); 334 kfree(smbus);
335 return -EIO; 335 return -EIO;
336 } 336 }
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b7d1514cd199..507f051d1cef 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -746,6 +746,16 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
746 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 746 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
747 handle, units, timeout)); 747 handle, units, timeout));
748 748
749 /*
750 * This can be called during resume with interrupts off.
751 * Like boot-time, we should be single threaded and will
752 * always get the lock if we try -- timeout or not.
753 * If this doesn't succeed, then we will oops courtesy of
754 * might_sleep() in down().
755 */
756 if (!down_trylock(sem))
757 return AE_OK;
758
749 switch (timeout) { 759 switch (timeout) {
750 /* 760 /*
751 * No Wait: 761 * No Wait:
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index db7b350a5035..62bef0b3b614 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -1714,6 +1714,9 @@ static int __init acpi_sbs_init(void)
1714{ 1714{
1715 int result = 0; 1715 int result = 0;
1716 1716
1717 if (acpi_disabled)
1718 return -ENODEV;
1719
1717 init_MUTEX(&sbs_sem); 1720 init_MUTEX(&sbs_sem);
1718 1721
1719 if (capacity_mode != DEF_CAPACITY_UNIT 1722 if (capacity_mode != DEF_CAPACITY_UNIT
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5fcb50c7b778..698a1540e303 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -4,6 +4,7 @@
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/kernel.h>
7#include <linux/acpi.h> 8#include <linux/acpi.h>
8 9
9#include <acpi/acpi_drivers.h> 10#include <acpi/acpi_drivers.h>
@@ -113,6 +114,8 @@ static struct kset acpi_namespace_kset = {
113static void acpi_device_register(struct acpi_device *device, 114static void acpi_device_register(struct acpi_device *device,
114 struct acpi_device *parent) 115 struct acpi_device *parent)
115{ 116{
117 int err;
118
116 /* 119 /*
117 * Linkage 120 * Linkage
118 * ------- 121 * -------
@@ -138,7 +141,10 @@ static void acpi_device_register(struct acpi_device *device,
138 device->kobj.parent = &parent->kobj; 141 device->kobj.parent = &parent->kobj;
139 device->kobj.ktype = &ktype_acpi_ns; 142 device->kobj.ktype = &ktype_acpi_ns;
140 device->kobj.kset = &acpi_namespace_kset; 143 device->kobj.kset = &acpi_namespace_kset;
141 kobject_register(&device->kobj); 144 err = kobject_register(&device->kobj);
145 if (err < 0)
146 printk(KERN_WARNING "%s: kobject_register error: %d\n",
147 __FUNCTION__, err);
142 create_sysfs_device_files(device); 148 create_sysfs_device_files(device);
143} 149}
144 150
@@ -1450,7 +1456,9 @@ static int __init acpi_scan_init(void)
1450 if (acpi_disabled) 1456 if (acpi_disabled)
1451 return 0; 1457 return 0;
1452 1458
1453 kset_register(&acpi_namespace_kset); 1459 result = kset_register(&acpi_namespace_kset);
1460 if (result < 0)
1461 printk(KERN_ERR PREFIX "kset_register error: %d\n", result);
1454 1462
1455 result = bus_register(&acpi_bus_type); 1463 result = bus_register(&acpi_bus_type);
1456 if (result) { 1464 if (result) {
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index f48227f4c8c9..d0d84c43a9d4 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -262,7 +262,7 @@ acpi_evaluate_integer(acpi_handle handle,
262 if (!data) 262 if (!data)
263 return AE_BAD_PARAMETER; 263 return AE_BAD_PARAMETER;
264 264
265 element = kmalloc(sizeof(union acpi_object), GFP_KERNEL); 265 element = kmalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
266 if (!element) 266 if (!element)
267 return AE_NO_MEMORY; 267 return AE_NO_MEMORY;
268 268
diff --git a/drivers/base/node.c b/drivers/base/node.c
index d7de1753e094..e9b0957f15d1 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -64,7 +64,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
64 "Node %d Mapped: %8lu kB\n" 64 "Node %d Mapped: %8lu kB\n"
65 "Node %d AnonPages: %8lu kB\n" 65 "Node %d AnonPages: %8lu kB\n"
66 "Node %d PageTables: %8lu kB\n" 66 "Node %d PageTables: %8lu kB\n"
67 "Node %d NFS Unstable: %8lu kB\n" 67 "Node %d NFS_Unstable: %8lu kB\n"
68 "Node %d Bounce: %8lu kB\n" 68 "Node %d Bounce: %8lu kB\n"
69 "Node %d Slab: %8lu kB\n", 69 "Node %d Slab: %8lu kB\n",
70 nid, K(i.totalram), 70 nid, K(i.totalram),
diff --git a/drivers/cdrom/gscd.c b/drivers/cdrom/gscd.c
index b6ee50a2916d..fa7082489765 100644
--- a/drivers/cdrom/gscd.c
+++ b/drivers/cdrom/gscd.c
@@ -266,7 +266,7 @@ repeat:
266 goto out; 266 goto out;
267 267
268 if (req->cmd != READ) { 268 if (req->cmd != READ) {
269 printk("GSCD: bad cmd %lu\n", rq_data_dir(req)); 269 printk("GSCD: bad cmd %u\n", rq_data_dir(req));
270 end_request(req, 0); 270 end_request(req, 0);
271 goto repeat; 271 goto repeat;
272 } 272 }
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 4ea7bd5f4f56..a369dd6877d8 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -142,6 +142,7 @@ typedef struct _moxa_board_conf {
142 142
143static moxa_board_conf moxa_boards[MAX_BOARDS]; 143static moxa_board_conf moxa_boards[MAX_BOARDS];
144static void __iomem *moxaBaseAddr[MAX_BOARDS]; 144static void __iomem *moxaBaseAddr[MAX_BOARDS];
145static int loadstat[MAX_BOARDS];
145 146
146struct moxa_str { 147struct moxa_str {
147 int type; 148 int type;
@@ -1688,6 +1689,8 @@ int MoxaDriverPoll(void)
1688 if (moxaCard == 0) 1689 if (moxaCard == 0)
1689 return (-1); 1690 return (-1);
1690 for (card = 0; card < MAX_BOARDS; card++) { 1691 for (card = 0; card < MAX_BOARDS; card++) {
1692 if (loadstat[card] == 0)
1693 continue;
1691 if ((ports = moxa_boards[card].numPorts) == 0) 1694 if ((ports = moxa_boards[card].numPorts) == 0)
1692 continue; 1695 continue;
1693 if (readb(moxaIntPend[card]) == 0xff) { 1696 if (readb(moxaIntPend[card]) == 0xff) {
@@ -2903,6 +2906,7 @@ static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
2903 } 2906 }
2904 break; 2907 break;
2905 } 2908 }
2909 loadstat[cardno] = 1;
2906 return (0); 2910 return (0);
2907} 2911}
2908 2912
@@ -2920,7 +2924,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2920 len1 = len >> 1; 2924 len1 = len >> 1;
2921 ptr = (ushort *) moxaBuff; 2925 ptr = (ushort *) moxaBuff;
2922 for (i = 0; i < len1; i++) 2926 for (i = 0; i < len1; i++)
2923 usum += *(ptr + i); 2927 usum += le16_to_cpu(*(ptr + i));
2924 retry = 0; 2928 retry = 0;
2925 do { 2929 do {
2926 len1 = len >> 1; 2930 len1 = len >> 1;
@@ -2992,7 +2996,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2992 wlen = len >> 1; 2996 wlen = len >> 1;
2993 uptr = (ushort *) moxaBuff; 2997 uptr = (ushort *) moxaBuff;
2994 for (i = 0; i < wlen; i++) 2998 for (i = 0; i < wlen; i++)
2995 usum += uptr[i]; 2999 usum += le16_to_cpu(uptr[i]);
2996 retry = 0; 3000 retry = 0;
2997 j = 0; 3001 j = 0;
2998 do { 3002 do {
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index bfdb90242a90..bb0d9199e994 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -153,6 +153,15 @@ int tty_ioctl(struct inode * inode, struct file * file,
153static int tty_fasync(int fd, struct file * filp, int on); 153static int tty_fasync(int fd, struct file * filp, int on);
154static void release_mem(struct tty_struct *tty, int idx); 154static void release_mem(struct tty_struct *tty, int idx);
155 155
156/**
157 * alloc_tty_struct - allocate a tty object
158 *
159 * Return a new empty tty structure. The data fields have not
160 * been initialized in any way but has been zeroed
161 *
162 * Locking: none
163 * FIXME: use kzalloc
164 */
156 165
157static struct tty_struct *alloc_tty_struct(void) 166static struct tty_struct *alloc_tty_struct(void)
158{ 167{
@@ -166,6 +175,15 @@ static struct tty_struct *alloc_tty_struct(void)
166 175
167static void tty_buffer_free_all(struct tty_struct *); 176static void tty_buffer_free_all(struct tty_struct *);
168 177
178/**
179 * free_tty_struct - free a disused tty
180 * @tty: tty struct to free
181 *
182 * Free the write buffers, tty queue and tty memory itself.
183 *
184 * Locking: none. Must be called after tty is definitely unused
185 */
186
169static inline void free_tty_struct(struct tty_struct *tty) 187static inline void free_tty_struct(struct tty_struct *tty)
170{ 188{
171 kfree(tty->write_buf); 189 kfree(tty->write_buf);
@@ -175,6 +193,17 @@ static inline void free_tty_struct(struct tty_struct *tty)
175 193
176#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) 194#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
177 195
196/**
197 * tty_name - return tty naming
198 * @tty: tty structure
199 * @buf: buffer for output
200 *
201 * Convert a tty structure into a name. The name reflects the kernel
202 * naming policy and if udev is in use may not reflect user space
203 *
204 * Locking: none
205 */
206
178char *tty_name(struct tty_struct *tty, char *buf) 207char *tty_name(struct tty_struct *tty, char *buf)
179{ 208{
180 if (!tty) /* Hmm. NULL pointer. That's fun. */ 209 if (!tty) /* Hmm. NULL pointer. That's fun. */
@@ -235,6 +264,28 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
235 * Tty buffer allocation management 264 * Tty buffer allocation management
236 */ 265 */
237 266
267
268/**
269 * tty_buffer_free_all - free buffers used by a tty
270 * @tty: tty to free from
271 *
272 * Remove all the buffers pending on a tty whether queued with data
273 * or in the free ring. Must be called when the tty is no longer in use
274 *
275 * Locking: none
276 */
277
278
279/**
280 * tty_buffer_free_all - free buffers used by a tty
281 * @tty: tty to free from
282 *
283 * Remove all the buffers pending on a tty whether queued with data
284 * or in the free ring. Must be called when the tty is no longer in use
285 *
286 * Locking: none
287 */
288
238static void tty_buffer_free_all(struct tty_struct *tty) 289static void tty_buffer_free_all(struct tty_struct *tty)
239{ 290{
240 struct tty_buffer *thead; 291 struct tty_buffer *thead;
@@ -247,19 +298,47 @@ static void tty_buffer_free_all(struct tty_struct *tty)
247 kfree(thead); 298 kfree(thead);
248 } 299 }
249 tty->buf.tail = NULL; 300 tty->buf.tail = NULL;
301 tty->buf.memory_used = 0;
250} 302}
251 303
304/**
305 * tty_buffer_init - prepare a tty buffer structure
306 * @tty: tty to initialise
307 *
308 * Set up the initial state of the buffer management for a tty device.
309 * Must be called before the other tty buffer functions are used.
310 *
311 * Locking: none
312 */
313
252static void tty_buffer_init(struct tty_struct *tty) 314static void tty_buffer_init(struct tty_struct *tty)
253{ 315{
254 spin_lock_init(&tty->buf.lock); 316 spin_lock_init(&tty->buf.lock);
255 tty->buf.head = NULL; 317 tty->buf.head = NULL;
256 tty->buf.tail = NULL; 318 tty->buf.tail = NULL;
257 tty->buf.free = NULL; 319 tty->buf.free = NULL;
320 tty->buf.memory_used = 0;
258} 321}
259 322
260static struct tty_buffer *tty_buffer_alloc(size_t size) 323/**
324 * tty_buffer_alloc - allocate a tty buffer
325 * @tty: tty device
326 * @size: desired size (characters)
327 *
328 * Allocate a new tty buffer to hold the desired number of characters.
329 * Return NULL if out of memory or the allocation would exceed the
330 * per device queue
331 *
332 * Locking: Caller must hold tty->buf.lock
333 */
334
335static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
261{ 336{
262 struct tty_buffer *p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); 337 struct tty_buffer *p;
338
339 if (tty->buf.memory_used + size > 65536)
340 return NULL;
341 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
263 if(p == NULL) 342 if(p == NULL)
264 return NULL; 343 return NULL;
265 p->used = 0; 344 p->used = 0;
@@ -269,17 +348,27 @@ static struct tty_buffer *tty_buffer_alloc(size_t size)
269 p->read = 0; 348 p->read = 0;
270 p->char_buf_ptr = (char *)(p->data); 349 p->char_buf_ptr = (char *)(p->data);
271 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size; 350 p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
272/* printk("Flip create %p\n", p); */ 351 tty->buf.memory_used += size;
273 return p; 352 return p;
274} 353}
275 354
276/* Must be called with the tty_read lock held. This needs to acquire strategy 355/**
277 code to decide if we should kfree or relink a given expired buffer */ 356 * tty_buffer_free - free a tty buffer
357 * @tty: tty owning the buffer
358 * @b: the buffer to free
359 *
360 * Free a tty buffer, or add it to the free list according to our
361 * internal strategy
362 *
363 * Locking: Caller must hold tty->buf.lock
364 */
278 365
279static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) 366static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
280{ 367{
281 /* Dumb strategy for now - should keep some stats */ 368 /* Dumb strategy for now - should keep some stats */
282/* printk("Flip dispose %p\n", b); */ 369 tty->buf.memory_used -= b->size;
370 WARN_ON(tty->buf.memory_used < 0);
371
283 if(b->size >= 512) 372 if(b->size >= 512)
284 kfree(b); 373 kfree(b);
285 else { 374 else {
@@ -288,6 +377,18 @@ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
288 } 377 }
289} 378}
290 379
380/**
381 * tty_buffer_find - find a free tty buffer
382 * @tty: tty owning the buffer
383 * @size: characters wanted
384 *
385 * Locate an existing suitable tty buffer or if we are lacking one then
386 * allocate a new one. We round our buffers off in 256 character chunks
387 * to get better allocation behaviour.
388 *
389 * Locking: Caller must hold tty->buf.lock
390 */
391
291static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) 392static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
292{ 393{
293 struct tty_buffer **tbh = &tty->buf.free; 394 struct tty_buffer **tbh = &tty->buf.free;
@@ -299,20 +400,28 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
299 t->used = 0; 400 t->used = 0;
300 t->commit = 0; 401 t->commit = 0;
301 t->read = 0; 402 t->read = 0;
302 /* DEBUG ONLY */ 403 tty->buf.memory_used += t->size;
303/* memset(t->data, '*', size); */
304/* printk("Flip recycle %p\n", t); */
305 return t; 404 return t;
306 } 405 }
307 tbh = &((*tbh)->next); 406 tbh = &((*tbh)->next);
308 } 407 }
309 /* Round the buffer size out */ 408 /* Round the buffer size out */
310 size = (size + 0xFF) & ~ 0xFF; 409 size = (size + 0xFF) & ~ 0xFF;
311 return tty_buffer_alloc(size); 410 return tty_buffer_alloc(tty, size);
312 /* Should possibly check if this fails for the largest buffer we 411 /* Should possibly check if this fails for the largest buffer we
313 have queued and recycle that ? */ 412 have queued and recycle that ? */
314} 413}
315 414
415/**
416 * tty_buffer_request_room - grow tty buffer if needed
417 * @tty: tty structure
418 * @size: size desired
419 *
420 * Make at least size bytes of linear space available for the tty
421 * buffer. If we fail return the size we managed to find.
422 *
423 * Locking: Takes tty->buf.lock
424 */
316int tty_buffer_request_room(struct tty_struct *tty, size_t size) 425int tty_buffer_request_room(struct tty_struct *tty, size_t size)
317{ 426{
318 struct tty_buffer *b, *n; 427 struct tty_buffer *b, *n;
@@ -347,6 +456,18 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
347} 456}
348EXPORT_SYMBOL_GPL(tty_buffer_request_room); 457EXPORT_SYMBOL_GPL(tty_buffer_request_room);
349 458
459/**
460 * tty_insert_flip_string - Add characters to the tty buffer
461 * @tty: tty structure
462 * @chars: characters
463 * @size: size
464 *
465 * Queue a series of bytes to the tty buffering. All the characters
466 * passed are marked as without error. Returns the number added.
467 *
468 * Locking: Called functions may take tty->buf.lock
469 */
470
350int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, 471int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
351 size_t size) 472 size_t size)
352{ 473{
@@ -370,6 +491,20 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
370} 491}
371EXPORT_SYMBOL(tty_insert_flip_string); 492EXPORT_SYMBOL(tty_insert_flip_string);
372 493
494/**
495 * tty_insert_flip_string_flags - Add characters to the tty buffer
496 * @tty: tty structure
497 * @chars: characters
498 * @flags: flag bytes
499 * @size: size
500 *
501 * Queue a series of bytes to the tty buffering. For each character
502 * the flags array indicates the status of the character. Returns the
503 * number added.
504 *
505 * Locking: Called functions may take tty->buf.lock
506 */
507
373int tty_insert_flip_string_flags(struct tty_struct *tty, 508int tty_insert_flip_string_flags(struct tty_struct *tty,
374 const unsigned char *chars, const char *flags, size_t size) 509 const unsigned char *chars, const char *flags, size_t size)
375{ 510{
@@ -394,6 +529,17 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
394} 529}
395EXPORT_SYMBOL(tty_insert_flip_string_flags); 530EXPORT_SYMBOL(tty_insert_flip_string_flags);
396 531
532/**
533 * tty_schedule_flip - push characters to ldisc
534 * @tty: tty to push from
535 *
536 * Takes any pending buffers and transfers their ownership to the
537 * ldisc side of the queue. It then schedules those characters for
538 * processing by the line discipline.
539 *
540 * Locking: Takes tty->buf.lock
541 */
542
397void tty_schedule_flip(struct tty_struct *tty) 543void tty_schedule_flip(struct tty_struct *tty)
398{ 544{
399 unsigned long flags; 545 unsigned long flags;
@@ -405,12 +551,19 @@ void tty_schedule_flip(struct tty_struct *tty)
405} 551}
406EXPORT_SYMBOL(tty_schedule_flip); 552EXPORT_SYMBOL(tty_schedule_flip);
407 553
408/* 554/**
555 * tty_prepare_flip_string - make room for characters
556 * @tty: tty
557 * @chars: return pointer for character write area
558 * @size: desired size
559 *
409 * Prepare a block of space in the buffer for data. Returns the length 560 * Prepare a block of space in the buffer for data. Returns the length
410 * available and buffer pointer to the space which is now allocated and 561 * available and buffer pointer to the space which is now allocated and
411 * accounted for as ready for normal characters. This is used for drivers 562 * accounted for as ready for normal characters. This is used for drivers
412 * that need their own block copy routines into the buffer. There is no 563 * that need their own block copy routines into the buffer. There is no
413 * guarantee the buffer is a DMA target! 564 * guarantee the buffer is a DMA target!
565 *
566 * Locking: May call functions taking tty->buf.lock
414 */ 567 */
415 568
416int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) 569int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size)
@@ -427,12 +580,20 @@ int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_
427 580
428EXPORT_SYMBOL_GPL(tty_prepare_flip_string); 581EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
429 582
430/* 583/**
584 * tty_prepare_flip_string_flags - make room for characters
585 * @tty: tty
586 * @chars: return pointer for character write area
587 * @flags: return pointer for status flag write area
588 * @size: desired size
589 *
431 * Prepare a block of space in the buffer for data. Returns the length 590 * Prepare a block of space in the buffer for data. Returns the length
432 * available and buffer pointer to the space which is now allocated and 591 * available and buffer pointer to the space which is now allocated and
433 * accounted for as ready for characters. This is used for drivers 592 * accounted for as ready for characters. This is used for drivers
434 * that need their own block copy routines into the buffer. There is no 593 * that need their own block copy routines into the buffer. There is no
435 * guarantee the buffer is a DMA target! 594 * guarantee the buffer is a DMA target!
595 *
596 * Locking: May call functions taking tty->buf.lock
436 */ 597 */
437 598
438int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) 599int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size)
@@ -451,10 +612,16 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
451 612
452 613
453 614
454/* 615/**
616 * tty_set_termios_ldisc - set ldisc field
617 * @tty: tty structure
618 * @num: line discipline number
619 *
455 * This is probably overkill for real world processors but 620 * This is probably overkill for real world processors but
456 * they are not on hot paths so a little discipline won't do 621 * they are not on hot paths so a little discipline won't do
457 * any harm. 622 * any harm.
623 *
624 * Locking: takes termios_sem
458 */ 625 */
459 626
460static void tty_set_termios_ldisc(struct tty_struct *tty, int num) 627static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
@@ -474,6 +641,19 @@ static DEFINE_SPINLOCK(tty_ldisc_lock);
474static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); 641static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
475static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */ 642static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */
476 643
644/**
645 * tty_register_ldisc - install a line discipline
646 * @disc: ldisc number
647 * @new_ldisc: pointer to the ldisc object
648 *
649 * Installs a new line discipline into the kernel. The discipline
650 * is set up as unreferenced and then made available to the kernel
651 * from this point onwards.
652 *
653 * Locking:
654 * takes tty_ldisc_lock to guard against ldisc races
655 */
656
477int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc) 657int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
478{ 658{
479 unsigned long flags; 659 unsigned long flags;
@@ -493,6 +673,18 @@ int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
493} 673}
494EXPORT_SYMBOL(tty_register_ldisc); 674EXPORT_SYMBOL(tty_register_ldisc);
495 675
676/**
677 * tty_unregister_ldisc - unload a line discipline
678 * @disc: ldisc number
679 * @new_ldisc: pointer to the ldisc object
680 *
681 * Remove a line discipline from the kernel providing it is not
682 * currently in use.
683 *
684 * Locking:
685 * takes tty_ldisc_lock to guard against ldisc races
686 */
687
496int tty_unregister_ldisc(int disc) 688int tty_unregister_ldisc(int disc)
497{ 689{
498 unsigned long flags; 690 unsigned long flags;
@@ -512,6 +704,19 @@ int tty_unregister_ldisc(int disc)
512} 704}
513EXPORT_SYMBOL(tty_unregister_ldisc); 705EXPORT_SYMBOL(tty_unregister_ldisc);
514 706
707/**
708 * tty_ldisc_get - take a reference to an ldisc
709 * @disc: ldisc number
710 *
711 * Takes a reference to a line discipline. Deals with refcounts and
712 * module locking counts. Returns NULL if the discipline is not available.
713 * Returns a pointer to the discipline and bumps the ref count if it is
714 * available
715 *
716 * Locking:
717 * takes tty_ldisc_lock to guard against ldisc races
718 */
719
515struct tty_ldisc *tty_ldisc_get(int disc) 720struct tty_ldisc *tty_ldisc_get(int disc)
516{ 721{
517 unsigned long flags; 722 unsigned long flags;
@@ -540,6 +745,17 @@ struct tty_ldisc *tty_ldisc_get(int disc)
540 745
541EXPORT_SYMBOL_GPL(tty_ldisc_get); 746EXPORT_SYMBOL_GPL(tty_ldisc_get);
542 747
748/**
749 * tty_ldisc_put - drop ldisc reference
750 * @disc: ldisc number
751 *
752 * Drop a reference to a line discipline. Manage refcounts and
753 * module usage counts
754 *
755 * Locking:
756 * takes tty_ldisc_lock to guard against ldisc races
757 */
758
543void tty_ldisc_put(int disc) 759void tty_ldisc_put(int disc)
544{ 760{
545 struct tty_ldisc *ld; 761 struct tty_ldisc *ld;
@@ -557,6 +773,19 @@ void tty_ldisc_put(int disc)
557 773
558EXPORT_SYMBOL_GPL(tty_ldisc_put); 774EXPORT_SYMBOL_GPL(tty_ldisc_put);
559 775
776/**
777 * tty_ldisc_assign - set ldisc on a tty
778 * @tty: tty to assign
779 * @ld: line discipline
780 *
781 * Install an instance of a line discipline into a tty structure. The
782 * ldisc must have a reference count above zero to ensure it remains/
783 * The tty instance refcount starts at zero.
784 *
785 * Locking:
786 * Caller must hold references
787 */
788
560static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld) 789static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
561{ 790{
562 tty->ldisc = *ld; 791 tty->ldisc = *ld;
@@ -571,6 +800,8 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
571 * the tty ldisc. Return 0 on failure or 1 on success. This is 800 * the tty ldisc. Return 0 on failure or 1 on success. This is
572 * used to implement both the waiting and non waiting versions 801 * used to implement both the waiting and non waiting versions
573 * of tty_ldisc_ref 802 * of tty_ldisc_ref
803 *
804 * Locking: takes tty_ldisc_lock
574 */ 805 */
575 806
576static int tty_ldisc_try(struct tty_struct *tty) 807static int tty_ldisc_try(struct tty_struct *tty)
@@ -602,6 +833,8 @@ static int tty_ldisc_try(struct tty_struct *tty)
602 * must also be careful not to hold other locks that will deadlock 833 * must also be careful not to hold other locks that will deadlock
603 * against a discipline change, such as an existing ldisc reference 834 * against a discipline change, such as an existing ldisc reference
604 * (which we check for) 835 * (which we check for)
836 *
837 * Locking: call functions take tty_ldisc_lock
605 */ 838 */
606 839
607struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) 840struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
@@ -622,6 +855,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
622 * Dereference the line discipline for the terminal and take a 855 * Dereference the line discipline for the terminal and take a
623 * reference to it. If the line discipline is in flux then 856 * reference to it. If the line discipline is in flux then
624 * return NULL. Can be called from IRQ and timer functions. 857 * return NULL. Can be called from IRQ and timer functions.
858 *
859 * Locking: called functions take tty_ldisc_lock
625 */ 860 */
626 861
627struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) 862struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
@@ -639,6 +874,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref);
639 * 874 *
640 * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May 875 * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
641 * be called in IRQ context. 876 * be called in IRQ context.
877 *
878 * Locking: takes tty_ldisc_lock
642 */ 879 */
643 880
644void tty_ldisc_deref(struct tty_ldisc *ld) 881void tty_ldisc_deref(struct tty_ldisc *ld)
@@ -683,6 +920,9 @@ static void tty_ldisc_enable(struct tty_struct *tty)
683 * 920 *
684 * Set the discipline of a tty line. Must be called from a process 921 * Set the discipline of a tty line. Must be called from a process
685 * context. 922 * context.
923 *
924 * Locking: takes tty_ldisc_lock.
925 * called functions take termios_sem
686 */ 926 */
687 927
688static int tty_set_ldisc(struct tty_struct *tty, int ldisc) 928static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
@@ -846,9 +1086,17 @@ restart:
846 return retval; 1086 return retval;
847} 1087}
848 1088
849/* 1089/**
850 * This routine returns a tty driver structure, given a device number 1090 * get_tty_driver - find device of a tty
1091 * @dev_t: device identifier
1092 * @index: returns the index of the tty
1093 *
1094 * This routine returns a tty driver structure, given a device number
1095 * and also passes back the index number.
1096 *
1097 * Locking: caller must hold tty_mutex
851 */ 1098 */
1099
852static struct tty_driver *get_tty_driver(dev_t device, int *index) 1100static struct tty_driver *get_tty_driver(dev_t device, int *index)
853{ 1101{
854 struct tty_driver *p; 1102 struct tty_driver *p;
@@ -863,11 +1111,17 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
863 return NULL; 1111 return NULL;
864} 1112}
865 1113
866/* 1114/**
867 * If we try to write to, or set the state of, a terminal and we're 1115 * tty_check_change - check for POSIX terminal changes
868 * not in the foreground, send a SIGTTOU. If the signal is blocked or 1116 * @tty: tty to check
869 * ignored, go ahead and perform the operation. (POSIX 7.2) 1117 *
1118 * If we try to write to, or set the state of, a terminal and we're
1119 * not in the foreground, send a SIGTTOU. If the signal is blocked or
1120 * ignored, go ahead and perform the operation. (POSIX 7.2)
1121 *
1122 * Locking: none
870 */ 1123 */
1124
871int tty_check_change(struct tty_struct * tty) 1125int tty_check_change(struct tty_struct * tty)
872{ 1126{
873 if (current->signal->tty != tty) 1127 if (current->signal->tty != tty)
@@ -1005,10 +1259,27 @@ void tty_ldisc_flush(struct tty_struct *tty)
1005 1259
1006EXPORT_SYMBOL_GPL(tty_ldisc_flush); 1260EXPORT_SYMBOL_GPL(tty_ldisc_flush);
1007 1261
1008/* 1262/**
1009 * This can be called by the "eventd" kernel thread. That is process synchronous, 1263 * do_tty_hangup - actual handler for hangup events
1010 * but doesn't hold any locks, so we need to make sure we have the appropriate 1264 * @data: tty device
1011 * locks for what we're doing.. 1265 *
1266 * This can be called by the "eventd" kernel thread. That is process
1267 * synchronous but doesn't hold any locks, so we need to make sure we
1268 * have the appropriate locks for what we're doing.
1269 *
1270 * The hangup event clears any pending redirections onto the hung up
1271 * device. It ensures future writes will error and it does the needed
1272 * line discipline hangup and signal delivery. The tty object itself
1273 * remains intact.
1274 *
1275 * Locking:
1276 * BKL
1277 * redirect lock for undoing redirection
1278 * file list lock for manipulating list of ttys
1279 * tty_ldisc_lock from called functions
1280 * termios_sem resetting termios data
1281 * tasklist_lock to walk task list for hangup event
1282 *
1012 */ 1283 */
1013static void do_tty_hangup(void *data) 1284static void do_tty_hangup(void *data)
1014{ 1285{
@@ -1133,6 +1404,14 @@ static void do_tty_hangup(void *data)
1133 fput(f); 1404 fput(f);
1134} 1405}
1135 1406
1407/**
1408 * tty_hangup - trigger a hangup event
1409 * @tty: tty to hangup
1410 *
1411 * A carrier loss (virtual or otherwise) has occurred on this like
1412 * schedule a hangup sequence to run after this event.
1413 */
1414
1136void tty_hangup(struct tty_struct * tty) 1415void tty_hangup(struct tty_struct * tty)
1137{ 1416{
1138#ifdef TTY_DEBUG_HANGUP 1417#ifdef TTY_DEBUG_HANGUP
@@ -1145,6 +1424,15 @@ void tty_hangup(struct tty_struct * tty)
1145 1424
1146EXPORT_SYMBOL(tty_hangup); 1425EXPORT_SYMBOL(tty_hangup);
1147 1426
1427/**
1428 * tty_vhangup - process vhangup
1429 * @tty: tty to hangup
1430 *
1431 * The user has asked via system call for the terminal to be hung up.
1432 * We do this synchronously so that when the syscall returns the process
1433 * is complete. That guarantee is neccessary for security reasons.
1434 */
1435
1148void tty_vhangup(struct tty_struct * tty) 1436void tty_vhangup(struct tty_struct * tty)
1149{ 1437{
1150#ifdef TTY_DEBUG_HANGUP 1438#ifdef TTY_DEBUG_HANGUP
@@ -1156,6 +1444,14 @@ void tty_vhangup(struct tty_struct * tty)
1156} 1444}
1157EXPORT_SYMBOL(tty_vhangup); 1445EXPORT_SYMBOL(tty_vhangup);
1158 1446
1447/**
1448 * tty_hung_up_p - was tty hung up
1449 * @filp: file pointer of tty
1450 *
1451 * Return true if the tty has been subject to a vhangup or a carrier
1452 * loss
1453 */
1454
1159int tty_hung_up_p(struct file * filp) 1455int tty_hung_up_p(struct file * filp)
1160{ 1456{
1161 return (filp->f_op == &hung_up_tty_fops); 1457 return (filp->f_op == &hung_up_tty_fops);
@@ -1163,19 +1459,28 @@ int tty_hung_up_p(struct file * filp)
1163 1459
1164EXPORT_SYMBOL(tty_hung_up_p); 1460EXPORT_SYMBOL(tty_hung_up_p);
1165 1461
1166/* 1462/**
1167 * This function is typically called only by the session leader, when 1463 * disassociate_ctty - disconnect controlling tty
1168 * it wants to disassociate itself from its controlling tty. 1464 * @on_exit: true if exiting so need to "hang up" the session
1465 *
1466 * This function is typically called only by the session leader, when
1467 * it wants to disassociate itself from its controlling tty.
1169 * 1468 *
1170 * It performs the following functions: 1469 * It performs the following functions:
1171 * (1) Sends a SIGHUP and SIGCONT to the foreground process group 1470 * (1) Sends a SIGHUP and SIGCONT to the foreground process group
1172 * (2) Clears the tty from being controlling the session 1471 * (2) Clears the tty from being controlling the session
1173 * (3) Clears the controlling tty for all processes in the 1472 * (3) Clears the controlling tty for all processes in the
1174 * session group. 1473 * session group.
1175 * 1474 *
1176 * The argument on_exit is set to 1 if called when a process is 1475 * The argument on_exit is set to 1 if called when a process is
1177 * exiting; it is 0 if called by the ioctl TIOCNOTTY. 1476 * exiting; it is 0 if called by the ioctl TIOCNOTTY.
1477 *
1478 * Locking: tty_mutex is taken to protect current->signal->tty
1479 * BKL is taken for hysterical raisins
1480 * Tasklist lock is taken (under tty_mutex) to walk process
1481 * lists for the session.
1178 */ 1482 */
1483
1179void disassociate_ctty(int on_exit) 1484void disassociate_ctty(int on_exit)
1180{ 1485{
1181 struct tty_struct *tty; 1486 struct tty_struct *tty;
@@ -1222,6 +1527,25 @@ void disassociate_ctty(int on_exit)
1222 unlock_kernel(); 1527 unlock_kernel();
1223} 1528}
1224 1529
1530
1531/**
1532 * stop_tty - propogate flow control
1533 * @tty: tty to stop
1534 *
1535 * Perform flow control to the driver. For PTY/TTY pairs we
1536 * must also propogate the TIOCKPKT status. May be called
1537 * on an already stopped device and will not re-call the driver
1538 * method.
1539 *
1540 * This functionality is used by both the line disciplines for
1541 * halting incoming flow and by the driver. It may therefore be
1542 * called from any context, may be under the tty atomic_write_lock
1543 * but not always.
1544 *
1545 * Locking:
1546 * Broken. Relies on BKL which is unsafe here.
1547 */
1548
1225void stop_tty(struct tty_struct *tty) 1549void stop_tty(struct tty_struct *tty)
1226{ 1550{
1227 if (tty->stopped) 1551 if (tty->stopped)
@@ -1238,6 +1562,19 @@ void stop_tty(struct tty_struct *tty)
1238 1562
1239EXPORT_SYMBOL(stop_tty); 1563EXPORT_SYMBOL(stop_tty);
1240 1564
1565/**
1566 * start_tty - propogate flow control
1567 * @tty: tty to start
1568 *
1569 * Start a tty that has been stopped if at all possible. Perform
1570 * any neccessary wakeups and propogate the TIOCPKT status. If this
1571 * is the tty was previous stopped and is being started then the
1572 * driver start method is invoked and the line discipline woken.
1573 *
1574 * Locking:
1575 * Broken. Relies on BKL which is unsafe here.
1576 */
1577
1241void start_tty(struct tty_struct *tty) 1578void start_tty(struct tty_struct *tty)
1242{ 1579{
1243 if (!tty->stopped || tty->flow_stopped) 1580 if (!tty->stopped || tty->flow_stopped)
@@ -1258,6 +1595,23 @@ void start_tty(struct tty_struct *tty)
1258 1595
1259EXPORT_SYMBOL(start_tty); 1596EXPORT_SYMBOL(start_tty);
1260 1597
1598/**
1599 * tty_read - read method for tty device files
1600 * @file: pointer to tty file
1601 * @buf: user buffer
1602 * @count: size of user buffer
1603 * @ppos: unused
1604 *
1605 * Perform the read system call function on this terminal device. Checks
1606 * for hung up devices before calling the line discipline method.
1607 *
1608 * Locking:
1609 * Locks the line discipline internally while needed
1610 * For historical reasons the line discipline read method is
1611 * invoked under the BKL. This will go away in time so do not rely on it
1612 * in new code. Multiple read calls may be outstanding in parallel.
1613 */
1614
1261static ssize_t tty_read(struct file * file, char __user * buf, size_t count, 1615static ssize_t tty_read(struct file * file, char __user * buf, size_t count,
1262 loff_t *ppos) 1616 loff_t *ppos)
1263{ 1617{
@@ -1302,6 +1656,7 @@ static inline ssize_t do_tty_write(
1302 ssize_t ret = 0, written = 0; 1656 ssize_t ret = 0, written = 0;
1303 unsigned int chunk; 1657 unsigned int chunk;
1304 1658
1659 /* FIXME: O_NDELAY ... */
1305 if (mutex_lock_interruptible(&tty->atomic_write_lock)) { 1660 if (mutex_lock_interruptible(&tty->atomic_write_lock)) {
1306 return -ERESTARTSYS; 1661 return -ERESTARTSYS;
1307 } 1662 }
@@ -1318,6 +1673,9 @@ static inline ssize_t do_tty_write(
1318 * layer has problems with bigger chunks. It will 1673 * layer has problems with bigger chunks. It will
1319 * claim to be able to handle more characters than 1674 * claim to be able to handle more characters than
1320 * it actually does. 1675 * it actually does.
1676 *
1677 * FIXME: This can probably go away now except that 64K chunks
1678 * are too likely to fail unless switched to vmalloc...
1321 */ 1679 */
1322 chunk = 2048; 1680 chunk = 2048;
1323 if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags)) 1681 if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
@@ -1375,6 +1733,24 @@ static inline ssize_t do_tty_write(
1375} 1733}
1376 1734
1377 1735
1736/**
1737 * tty_write - write method for tty device file
1738 * @file: tty file pointer
1739 * @buf: user data to write
1740 * @count: bytes to write
1741 * @ppos: unused
1742 *
1743 * Write data to a tty device via the line discipline.
1744 *
1745 * Locking:
1746 * Locks the line discipline as required
1747 * Writes to the tty driver are serialized by the atomic_write_lock
1748 * and are then processed in chunks to the device. The line discipline
1749 * write method will not be involked in parallel for each device
1750 * The line discipline write method is called under the big
1751 * kernel lock for historical reasons. New code should not rely on this.
1752 */
1753
1378static ssize_t tty_write(struct file * file, const char __user * buf, size_t count, 1754static ssize_t tty_write(struct file * file, const char __user * buf, size_t count,
1379 loff_t *ppos) 1755 loff_t *ppos)
1380{ 1756{
@@ -1422,7 +1798,18 @@ ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t
1422 1798
1423static char ptychar[] = "pqrstuvwxyzabcde"; 1799static char ptychar[] = "pqrstuvwxyzabcde";
1424 1800
1425static inline void pty_line_name(struct tty_driver *driver, int index, char *p) 1801/**
1802 * pty_line_name - generate name for a pty
1803 * @driver: the tty driver in use
1804 * @index: the minor number
1805 * @p: output buffer of at least 6 bytes
1806 *
1807 * Generate a name from a driver reference and write it to the output
1808 * buffer.
1809 *
1810 * Locking: None
1811 */
1812static void pty_line_name(struct tty_driver *driver, int index, char *p)
1426{ 1813{
1427 int i = index + driver->name_base; 1814 int i = index + driver->name_base;
1428 /* ->name is initialized to "ttyp", but "tty" is expected */ 1815 /* ->name is initialized to "ttyp", but "tty" is expected */
@@ -1431,24 +1818,53 @@ static inline void pty_line_name(struct tty_driver *driver, int index, char *p)
1431 ptychar[i >> 4 & 0xf], i & 0xf); 1818 ptychar[i >> 4 & 0xf], i & 0xf);
1432} 1819}
1433 1820
1434static inline void tty_line_name(struct tty_driver *driver, int index, char *p) 1821/**
1822 * pty_line_name - generate name for a tty
1823 * @driver: the tty driver in use
1824 * @index: the minor number
1825 * @p: output buffer of at least 7 bytes
1826 *
1827 * Generate a name from a driver reference and write it to the output
1828 * buffer.
1829 *
1830 * Locking: None
1831 */
1832static void tty_line_name(struct tty_driver *driver, int index, char *p)
1435{ 1833{
1436 sprintf(p, "%s%d", driver->name, index + driver->name_base); 1834 sprintf(p, "%s%d", driver->name, index + driver->name_base);
1437} 1835}
1438 1836
1439/* 1837/**
1838 * init_dev - initialise a tty device
1839 * @driver: tty driver we are opening a device on
1840 * @idx: device index
1841 * @tty: returned tty structure
1842 *
1843 * Prepare a tty device. This may not be a "new" clean device but
1844 * could also be an active device. The pty drivers require special
1845 * handling because of this.
1846 *
1847 * Locking:
1848 * The function is called under the tty_mutex, which
1849 * protects us from the tty struct or driver itself going away.
1850 *
1851 * On exit the tty device has the line discipline attached and
1852 * a reference count of 1. If a pair was created for pty/tty use
1853 * and the other was a pty master then it too has a reference count of 1.
1854 *
1440 * WSH 06/09/97: Rewritten to remove races and properly clean up after a 1855 * WSH 06/09/97: Rewritten to remove races and properly clean up after a
1441 * failed open. The new code protects the open with a mutex, so it's 1856 * failed open. The new code protects the open with a mutex, so it's
1442 * really quite straightforward. The mutex locking can probably be 1857 * really quite straightforward. The mutex locking can probably be
1443 * relaxed for the (most common) case of reopening a tty. 1858 * relaxed for the (most common) case of reopening a tty.
1444 */ 1859 */
1860
1445static int init_dev(struct tty_driver *driver, int idx, 1861static int init_dev(struct tty_driver *driver, int idx,
1446 struct tty_struct **ret_tty) 1862 struct tty_struct **ret_tty)
1447{ 1863{
1448 struct tty_struct *tty, *o_tty; 1864 struct tty_struct *tty, *o_tty;
1449 struct termios *tp, **tp_loc, *o_tp, **o_tp_loc; 1865 struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
1450 struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc; 1866 struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
1451 int retval=0; 1867 int retval = 0;
1452 1868
1453 /* check whether we're reopening an existing tty */ 1869 /* check whether we're reopening an existing tty */
1454 if (driver->flags & TTY_DRIVER_DEVPTS_MEM) { 1870 if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
@@ -1662,10 +2078,20 @@ release_mem_out:
1662 goto end_init; 2078 goto end_init;
1663} 2079}
1664 2080
1665/* 2081/**
1666 * Releases memory associated with a tty structure, and clears out the 2082 * release_mem - release tty structure memory
1667 * driver table slots. 2083 *
2084 * Releases memory associated with a tty structure, and clears out the
2085 * driver table slots. This function is called when a device is no longer
2086 * in use. It also gets called when setup of a device fails.
2087 *
2088 * Locking:
2089 * tty_mutex - sometimes only
2090 * takes the file list lock internally when working on the list
2091 * of ttys that the driver keeps.
2092 * FIXME: should we require tty_mutex is held here ??
1668 */ 2093 */
2094
1669static void release_mem(struct tty_struct *tty, int idx) 2095static void release_mem(struct tty_struct *tty, int idx)
1670{ 2096{
1671 struct tty_struct *o_tty; 2097 struct tty_struct *o_tty;
@@ -2006,18 +2432,27 @@ static void release_dev(struct file * filp)
2006 2432
2007} 2433}
2008 2434
2009/* 2435/**
2010 * tty_open and tty_release keep up the tty count that contains the 2436 * tty_open - open a tty device
2011 * number of opens done on a tty. We cannot use the inode-count, as 2437 * @inode: inode of device file
2012 * different inodes might point to the same tty. 2438 * @filp: file pointer to tty
2439 *
2440 * tty_open and tty_release keep up the tty count that contains the
2441 * number of opens done on a tty. We cannot use the inode-count, as
2442 * different inodes might point to the same tty.
2013 * 2443 *
2014 * Open-counting is needed for pty masters, as well as for keeping 2444 * Open-counting is needed for pty masters, as well as for keeping
2015 * track of serial lines: DTR is dropped when the last close happens. 2445 * track of serial lines: DTR is dropped when the last close happens.
2016 * (This is not done solely through tty->count, now. - Ted 1/27/92) 2446 * (This is not done solely through tty->count, now. - Ted 1/27/92)
2017 * 2447 *
2018 * The termios state of a pty is reset on first open so that 2448 * The termios state of a pty is reset on first open so that
2019 * settings don't persist across reuse. 2449 * settings don't persist across reuse.
2450 *
2451 * Locking: tty_mutex protects current->signal->tty, get_tty_driver and
2452 * init_dev work. tty->count should protect the rest.
2453 * task_lock is held to update task details for sessions
2020 */ 2454 */
2455
2021static int tty_open(struct inode * inode, struct file * filp) 2456static int tty_open(struct inode * inode, struct file * filp)
2022{ 2457{
2023 struct tty_struct *tty; 2458 struct tty_struct *tty;
@@ -2132,6 +2567,18 @@ got_driver:
2132} 2567}
2133 2568
2134#ifdef CONFIG_UNIX98_PTYS 2569#ifdef CONFIG_UNIX98_PTYS
2570/**
2571 * ptmx_open - open a unix 98 pty master
2572 * @inode: inode of device file
2573 * @filp: file pointer to tty
2574 *
2575 * Allocate a unix98 pty master device from the ptmx driver.
2576 *
2577 * Locking: tty_mutex protects theinit_dev work. tty->count should
2578 protect the rest.
2579 * allocated_ptys_lock handles the list of free pty numbers
2580 */
2581
2135static int ptmx_open(struct inode * inode, struct file * filp) 2582static int ptmx_open(struct inode * inode, struct file * filp)
2136{ 2583{
2137 struct tty_struct *tty; 2584 struct tty_struct *tty;
@@ -2191,6 +2638,18 @@ out:
2191} 2638}
2192#endif 2639#endif
2193 2640
2641/**
2642 * tty_release - vfs callback for close
2643 * @inode: inode of tty
2644 * @filp: file pointer for handle to tty
2645 *
2646 * Called the last time each file handle is closed that references
2647 * this tty. There may however be several such references.
2648 *
2649 * Locking:
2650 * Takes bkl. See release_dev
2651 */
2652
2194static int tty_release(struct inode * inode, struct file * filp) 2653static int tty_release(struct inode * inode, struct file * filp)
2195{ 2654{
2196 lock_kernel(); 2655 lock_kernel();
@@ -2199,7 +2658,18 @@ static int tty_release(struct inode * inode, struct file * filp)
2199 return 0; 2658 return 0;
2200} 2659}
2201 2660
2202/* No kernel lock held - fine */ 2661/**
2662 * tty_poll - check tty status
2663 * @filp: file being polled
2664 * @wait: poll wait structures to update
2665 *
2666 * Call the line discipline polling method to obtain the poll
2667 * status of the device.
2668 *
2669 * Locking: locks called line discipline but ldisc poll method
2670 * may be re-entered freely by other callers.
2671 */
2672
2203static unsigned int tty_poll(struct file * filp, poll_table * wait) 2673static unsigned int tty_poll(struct file * filp, poll_table * wait)
2204{ 2674{
2205 struct tty_struct * tty; 2675 struct tty_struct * tty;
@@ -2243,6 +2713,21 @@ static int tty_fasync(int fd, struct file * filp, int on)
2243 return 0; 2713 return 0;
2244} 2714}
2245 2715
2716/**
2717 * tiocsti - fake input character
2718 * @tty: tty to fake input into
2719 * @p: pointer to character
2720 *
2721 * Fake input to a tty device. Does the neccessary locking and
2722 * input management.
2723 *
2724 * FIXME: does not honour flow control ??
2725 *
2726 * Locking:
2727 * Called functions take tty_ldisc_lock
2728 * current->signal->tty check is safe without locks
2729 */
2730
2246static int tiocsti(struct tty_struct *tty, char __user *p) 2731static int tiocsti(struct tty_struct *tty, char __user *p)
2247{ 2732{
2248 char ch, mbz = 0; 2733 char ch, mbz = 0;
@@ -2258,6 +2743,18 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2258 return 0; 2743 return 0;
2259} 2744}
2260 2745
2746/**
2747 * tiocgwinsz - implement window query ioctl
2748 * @tty; tty
2749 * @arg: user buffer for result
2750 *
2751 * Copies the kernel idea of the window size into the user buffer. No
2752 * locking is done.
2753 *
2754 * FIXME: Returning random values racing a window size set is wrong
2755 * should lock here against that
2756 */
2757
2261static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) 2758static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
2262{ 2759{
2263 if (copy_to_user(arg, &tty->winsize, sizeof(*arg))) 2760 if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
@@ -2265,6 +2762,24 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
2265 return 0; 2762 return 0;
2266} 2763}
2267 2764
2765/**
2766 * tiocswinsz - implement window size set ioctl
2767 * @tty; tty
2768 * @arg: user buffer for result
2769 *
2770 * Copies the user idea of the window size to the kernel. Traditionally
2771 * this is just advisory information but for the Linux console it
2772 * actually has driver level meaning and triggers a VC resize.
2773 *
2774 * Locking:
2775 * The console_sem is used to ensure we do not try and resize
2776 * the console twice at once.
2777 * FIXME: Two racing size sets may leave the console and kernel
2778 * parameters disagreeing. Is this exploitable ?
2779 * FIXME: Random values racing a window size get is wrong
2780 * should lock here against that
2781 */
2782
2268static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, 2783static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2269 struct winsize __user * arg) 2784 struct winsize __user * arg)
2270{ 2785{
@@ -2294,6 +2809,15 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2294 return 0; 2809 return 0;
2295} 2810}
2296 2811
2812/**
2813 * tioccons - allow admin to move logical console
2814 * @file: the file to become console
2815 *
2816 * Allow the adminstrator to move the redirected console device
2817 *
2818 * Locking: uses redirect_lock to guard the redirect information
2819 */
2820
2297static int tioccons(struct file *file) 2821static int tioccons(struct file *file)
2298{ 2822{
2299 if (!capable(CAP_SYS_ADMIN)) 2823 if (!capable(CAP_SYS_ADMIN))
@@ -2319,6 +2843,17 @@ static int tioccons(struct file *file)
2319 return 0; 2843 return 0;
2320} 2844}
2321 2845
2846/**
2847 * fionbio - non blocking ioctl
2848 * @file: file to set blocking value
2849 * @p: user parameter
2850 *
2851 * Historical tty interfaces had a blocking control ioctl before
2852 * the generic functionality existed. This piece of history is preserved
2853 * in the expected tty API of posix OS's.
2854 *
2855 * Locking: none, the open fle handle ensures it won't go away.
2856 */
2322 2857
2323static int fionbio(struct file *file, int __user *p) 2858static int fionbio(struct file *file, int __user *p)
2324{ 2859{
@@ -2334,6 +2869,23 @@ static int fionbio(struct file *file, int __user *p)
2334 return 0; 2869 return 0;
2335} 2870}
2336 2871
2872/**
2873 * tiocsctty - set controlling tty
2874 * @tty: tty structure
2875 * @arg: user argument
2876 *
2877 * This ioctl is used to manage job control. It permits a session
2878 * leader to set this tty as the controlling tty for the session.
2879 *
2880 * Locking:
2881 * Takes tasklist lock internally to walk sessions
2882 * Takes task_lock() when updating signal->tty
2883 *
2884 * FIXME: tty_mutex is needed to protect signal->tty references.
2885 * FIXME: why task_lock on the signal->tty reference ??
2886 *
2887 */
2888
2337static int tiocsctty(struct tty_struct *tty, int arg) 2889static int tiocsctty(struct tty_struct *tty, int arg)
2338{ 2890{
2339 struct task_struct *p; 2891 struct task_struct *p;
@@ -2374,6 +2926,18 @@ static int tiocsctty(struct tty_struct *tty, int arg)
2374 return 0; 2926 return 0;
2375} 2927}
2376 2928
2929/**
2930 * tiocgpgrp - get process group
2931 * @tty: tty passed by user
2932 * @real_tty: tty side of the tty pased by the user if a pty else the tty
2933 * @p: returned pid
2934 *
2935 * Obtain the process group of the tty. If there is no process group
2936 * return an error.
2937 *
2938 * Locking: none. Reference to ->signal->tty is safe.
2939 */
2940
2377static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 2941static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2378{ 2942{
2379 /* 2943 /*
@@ -2385,6 +2949,20 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
2385 return put_user(real_tty->pgrp, p); 2949 return put_user(real_tty->pgrp, p);
2386} 2950}
2387 2951
2952/**
2953 * tiocspgrp - attempt to set process group
2954 * @tty: tty passed by user
2955 * @real_tty: tty side device matching tty passed by user
2956 * @p: pid pointer
2957 *
2958 * Set the process group of the tty to the session passed. Only
2959 * permitted where the tty session is our session.
2960 *
2961 * Locking: None
2962 *
2963 * FIXME: current->signal->tty referencing is unsafe.
2964 */
2965
2388static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 2966static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2389{ 2967{
2390 pid_t pgrp; 2968 pid_t pgrp;
@@ -2408,6 +2986,18 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
2408 return 0; 2986 return 0;
2409} 2987}
2410 2988
2989/**
2990 * tiocgsid - get session id
2991 * @tty: tty passed by user
2992 * @real_tty: tty side of the tty pased by the user if a pty else the tty
2993 * @p: pointer to returned session id
2994 *
2995 * Obtain the session id of the tty. If there is no session
2996 * return an error.
2997 *
2998 * Locking: none. Reference to ->signal->tty is safe.
2999 */
3000
2411static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) 3001static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
2412{ 3002{
2413 /* 3003 /*
@@ -2421,6 +3011,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
2421 return put_user(real_tty->session, p); 3011 return put_user(real_tty->session, p);
2422} 3012}
2423 3013
3014/**
3015 * tiocsetd - set line discipline
3016 * @tty: tty device
3017 * @p: pointer to user data
3018 *
3019 * Set the line discipline according to user request.
3020 *
3021 * Locking: see tty_set_ldisc, this function is just a helper
3022 */
3023
2424static int tiocsetd(struct tty_struct *tty, int __user *p) 3024static int tiocsetd(struct tty_struct *tty, int __user *p)
2425{ 3025{
2426 int ldisc; 3026 int ldisc;
@@ -2430,6 +3030,21 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
2430 return tty_set_ldisc(tty, ldisc); 3030 return tty_set_ldisc(tty, ldisc);
2431} 3031}
2432 3032
3033/**
3034 * send_break - performed time break
3035 * @tty: device to break on
3036 * @duration: timeout in mS
3037 *
3038 * Perform a timed break on hardware that lacks its own driver level
3039 * timed break functionality.
3040 *
3041 * Locking:
3042 * None
3043 *
3044 * FIXME:
3045 * What if two overlap
3046 */
3047
2433static int send_break(struct tty_struct *tty, unsigned int duration) 3048static int send_break(struct tty_struct *tty, unsigned int duration)
2434{ 3049{
2435 tty->driver->break_ctl(tty, -1); 3050 tty->driver->break_ctl(tty, -1);
@@ -2442,8 +3057,19 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
2442 return 0; 3057 return 0;
2443} 3058}
2444 3059
2445static int 3060/**
2446tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p) 3061 * tiocmget - get modem status
3062 * @tty: tty device
3063 * @file: user file pointer
3064 * @p: pointer to result
3065 *
3066 * Obtain the modem status bits from the tty driver if the feature
3067 * is supported. Return -EINVAL if it is not available.
3068 *
3069 * Locking: none (up to the driver)
3070 */
3071
3072static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
2447{ 3073{
2448 int retval = -EINVAL; 3074 int retval = -EINVAL;
2449 3075
@@ -2456,8 +3082,20 @@ tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
2456 return retval; 3082 return retval;
2457} 3083}
2458 3084
2459static int 3085/**
2460tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd, 3086 * tiocmset - set modem status
3087 * @tty: tty device
3088 * @file: user file pointer
3089 * @cmd: command - clear bits, set bits or set all
3090 * @p: pointer to desired bits
3091 *
3092 * Set the modem status bits from the tty driver if the feature
3093 * is supported. Return -EINVAL if it is not available.
3094 *
3095 * Locking: none (up to the driver)
3096 */
3097
3098static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
2461 unsigned __user *p) 3099 unsigned __user *p)
2462{ 3100{
2463 int retval = -EINVAL; 3101 int retval = -EINVAL;
@@ -2573,6 +3211,7 @@ int tty_ioctl(struct inode * inode, struct file * file,
2573 clear_bit(TTY_EXCLUSIVE, &tty->flags); 3211 clear_bit(TTY_EXCLUSIVE, &tty->flags);
2574 return 0; 3212 return 0;
2575 case TIOCNOTTY: 3213 case TIOCNOTTY:
3214 /* FIXME: taks lock or tty_mutex ? */
2576 if (current->signal->tty != tty) 3215 if (current->signal->tty != tty)
2577 return -ENOTTY; 3216 return -ENOTTY;
2578 if (current->signal->leader) 3217 if (current->signal->leader)
@@ -2753,9 +3392,16 @@ void do_SAK(struct tty_struct *tty)
2753 3392
2754EXPORT_SYMBOL(do_SAK); 3393EXPORT_SYMBOL(do_SAK);
2755 3394
2756/* 3395/**
2757 * This routine is called out of the software interrupt to flush data 3396 * flush_to_ldisc
2758 * from the buffer chain to the line discipline. 3397 * @private_: tty structure passed from work queue.
3398 *
3399 * This routine is called out of the software interrupt to flush data
3400 * from the buffer chain to the line discipline.
3401 *
3402 * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
3403 * while invoking the line discipline receive_buf method. The
3404 * receive_buf method is single threaded for each tty instance.
2759 */ 3405 */
2760 3406
2761static void flush_to_ldisc(void *private_) 3407static void flush_to_ldisc(void *private_)
@@ -2831,6 +3477,8 @@ static int n_baud_table = ARRAY_SIZE(baud_table);
2831 * Convert termios baud rate data into a speed. This should be called 3477 * Convert termios baud rate data into a speed. This should be called
2832 * with the termios lock held if this termios is a terminal termios 3478 * with the termios lock held if this termios is a terminal termios
2833 * structure. May change the termios data. 3479 * structure. May change the termios data.
3480 *
3481 * Locking: none
2834 */ 3482 */
2835 3483
2836int tty_termios_baud_rate(struct termios *termios) 3484int tty_termios_baud_rate(struct termios *termios)
@@ -2859,6 +3507,8 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
2859 * Returns the baud rate as an integer for this terminal. The 3507 * Returns the baud rate as an integer for this terminal. The
2860 * termios lock must be held by the caller and the terminal bit 3508 * termios lock must be held by the caller and the terminal bit
2861 * flags may be updated. 3509 * flags may be updated.
3510 *
3511 * Locking: none
2862 */ 3512 */
2863 3513
2864int tty_get_baud_rate(struct tty_struct *tty) 3514int tty_get_baud_rate(struct tty_struct *tty)
@@ -2888,6 +3538,8 @@ EXPORT_SYMBOL(tty_get_baud_rate);
2888 * 3538 *
2889 * In the event of the queue being busy for flipping the work will be 3539 * In the event of the queue being busy for flipping the work will be
2890 * held off and retried later. 3540 * held off and retried later.
3541 *
3542 * Locking: tty buffer lock. Driver locks in low latency mode.
2891 */ 3543 */
2892 3544
2893void tty_flip_buffer_push(struct tty_struct *tty) 3545void tty_flip_buffer_push(struct tty_struct *tty)
@@ -2907,9 +3559,16 @@ void tty_flip_buffer_push(struct tty_struct *tty)
2907EXPORT_SYMBOL(tty_flip_buffer_push); 3559EXPORT_SYMBOL(tty_flip_buffer_push);
2908 3560
2909 3561
2910/* 3562/**
2911 * This subroutine initializes a tty structure. 3563 * initialize_tty_struct
3564 * @tty: tty to initialize
3565 *
3566 * This subroutine initializes a tty structure that has been newly
3567 * allocated.
3568 *
3569 * Locking: none - tty in question must not be exposed at this point
2912 */ 3570 */
3571
2913static void initialize_tty_struct(struct tty_struct *tty) 3572static void initialize_tty_struct(struct tty_struct *tty)
2914{ 3573{
2915 memset(tty, 0, sizeof(struct tty_struct)); 3574 memset(tty, 0, sizeof(struct tty_struct));
@@ -2935,6 +3594,7 @@ static void initialize_tty_struct(struct tty_struct *tty)
2935/* 3594/*
2936 * The default put_char routine if the driver did not define one. 3595 * The default put_char routine if the driver did not define one.
2937 */ 3596 */
3597
2938static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) 3598static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
2939{ 3599{
2940 tty->driver->write(tty, &ch, 1); 3600 tty->driver->write(tty, &ch, 1);
@@ -2943,19 +3603,23 @@ static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
2943static struct class *tty_class; 3603static struct class *tty_class;
2944 3604
2945/** 3605/**
2946 * tty_register_device - register a tty device 3606 * tty_register_device - register a tty device
2947 * @driver: the tty driver that describes the tty device 3607 * @driver: the tty driver that describes the tty device
2948 * @index: the index in the tty driver for this tty device 3608 * @index: the index in the tty driver for this tty device
2949 * @device: a struct device that is associated with this tty device. 3609 * @device: a struct device that is associated with this tty device.
2950 * This field is optional, if there is no known struct device for this 3610 * This field is optional, if there is no known struct device
2951 * tty device it can be set to NULL safely. 3611 * for this tty device it can be set to NULL safely.
2952 * 3612 *
2953 * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error). 3613 * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error).
2954 * 3614 *
2955 * This call is required to be made to register an individual tty device if 3615 * This call is required to be made to register an individual tty device
2956 * the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If that 3616 * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
2957 * bit is not set, this function should not be called by a tty driver. 3617 * that bit is not set, this function should not be called by a tty
3618 * driver.
3619 *
3620 * Locking: ??
2958 */ 3621 */
3622
2959struct class_device *tty_register_device(struct tty_driver *driver, 3623struct class_device *tty_register_device(struct tty_driver *driver,
2960 unsigned index, struct device *device) 3624 unsigned index, struct device *device)
2961{ 3625{
@@ -2977,13 +3641,16 @@ struct class_device *tty_register_device(struct tty_driver *driver,
2977} 3641}
2978 3642
2979/** 3643/**
2980 * tty_unregister_device - unregister a tty device 3644 * tty_unregister_device - unregister a tty device
2981 * @driver: the tty driver that describes the tty device 3645 * @driver: the tty driver that describes the tty device
2982 * @index: the index in the tty driver for this tty device 3646 * @index: the index in the tty driver for this tty device
2983 * 3647 *
2984 * If a tty device is registered with a call to tty_register_device() then 3648 * If a tty device is registered with a call to tty_register_device() then
2985 * this function must be made when the tty device is gone. 3649 * this function must be called when the tty device is gone.
3650 *
3651 * Locking: ??
2986 */ 3652 */
3653
2987void tty_unregister_device(struct tty_driver *driver, unsigned index) 3654void tty_unregister_device(struct tty_driver *driver, unsigned index)
2988{ 3655{
2989 class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index); 3656 class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
@@ -3094,7 +3761,6 @@ int tty_register_driver(struct tty_driver *driver)
3094 driver->cdev.owner = driver->owner; 3761 driver->cdev.owner = driver->owner;
3095 error = cdev_add(&driver->cdev, dev, driver->num); 3762 error = cdev_add(&driver->cdev, dev, driver->num);
3096 if (error) { 3763 if (error) {
3097 cdev_del(&driver->cdev);
3098 unregister_chrdev_region(dev, driver->num); 3764 unregister_chrdev_region(dev, driver->num);
3099 driver->ttys = NULL; 3765 driver->ttys = NULL;
3100 driver->termios = driver->termios_locked = NULL; 3766 driver->termios = driver->termios_locked = NULL;
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index f19cf9d7792d..4ad47d321bd4 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -36,6 +36,18 @@
36#define TERMIOS_WAIT 2 36#define TERMIOS_WAIT 2
37#define TERMIOS_TERMIO 4 37#define TERMIOS_TERMIO 4
38 38
39
40/**
41 * tty_wait_until_sent - wait for I/O to finish
42 * @tty: tty we are waiting for
43 * @timeout: how long we will wait
44 *
45 * Wait for characters pending in a tty driver to hit the wire, or
46 * for a timeout to occur (eg due to flow control)
47 *
48 * Locking: none
49 */
50
39void tty_wait_until_sent(struct tty_struct * tty, long timeout) 51void tty_wait_until_sent(struct tty_struct * tty, long timeout)
40{ 52{
41 DECLARE_WAITQUEUE(wait, current); 53 DECLARE_WAITQUEUE(wait, current);
@@ -94,6 +106,18 @@ static void unset_locked_termios(struct termios *termios,
94 old->c_cc[i] : termios->c_cc[i]; 106 old->c_cc[i] : termios->c_cc[i];
95} 107}
96 108
109/**
110 * change_termios - update termios values
111 * @tty: tty to update
112 * @new_termios: desired new value
113 *
114 * Perform updates to the termios values set on this terminal. There
115 * is a bit of layering violation here with n_tty in terms of the
116 * internal knowledge of this function.
117 *
118 * Locking: termios_sem
119 */
120
97static void change_termios(struct tty_struct * tty, struct termios * new_termios) 121static void change_termios(struct tty_struct * tty, struct termios * new_termios)
98{ 122{
99 int canon_change; 123 int canon_change;
@@ -155,6 +179,19 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios
155 up(&tty->termios_sem); 179 up(&tty->termios_sem);
156} 180}
157 181
182/**
183 * set_termios - set termios values for a tty
184 * @tty: terminal device
185 * @arg: user data
186 * @opt: option information
187 *
188 * Helper function to prepare termios data and run neccessary other
189 * functions before using change_termios to do the actual changes.
190 *
191 * Locking:
192 * Called functions take ldisc and termios_sem locks
193 */
194
158static int set_termios(struct tty_struct * tty, void __user *arg, int opt) 195static int set_termios(struct tty_struct * tty, void __user *arg, int opt)
159{ 196{
160 struct termios tmp_termios; 197 struct termios tmp_termios;
@@ -284,6 +321,17 @@ static void set_sgflags(struct termios * termios, int flags)
284 } 321 }
285} 322}
286 323
324/**
325 * set_sgttyb - set legacy terminal values
326 * @tty: tty structure
327 * @sgttyb: pointer to old style terminal structure
328 *
329 * Updates a terminal from the legacy BSD style terminal information
330 * structure.
331 *
332 * Locking: termios_sem
333 */
334
287static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) 335static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb)
288{ 336{
289 int retval; 337 int retval;
@@ -369,9 +417,16 @@ static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars)
369} 417}
370#endif 418#endif
371 419
372/* 420/**
373 * Send a high priority character to the tty. 421 * send_prio_char - send priority character
422 *
423 * Send a high priority character to the tty even if stopped
424 *
425 * Locking: none
426 *
427 * FIXME: overlapping calls with start/stop tty lose state of tty
374 */ 428 */
429
375static void send_prio_char(struct tty_struct *tty, char ch) 430static void send_prio_char(struct tty_struct *tty, char ch)
376{ 431{
377 int was_stopped = tty->stopped; 432 int was_stopped = tty->stopped;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index eccffaf26faa..a5628a8b6620 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -1011,6 +1011,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
1011 return -EPERM; 1011 return -EPERM;
1012 vt_dont_switch = 0; 1012 vt_dont_switch = 0;
1013 return 0; 1013 return 0;
1014 case VT_GETHIFONTMASK:
1015 return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg);
1014 default: 1016 default:
1015 return -ENOIOCTLCMD; 1017 return -ENOIOCTLCMD;
1016 } 1018 }
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index cc15c4f2e9ec..35ad1b032726 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -26,6 +26,7 @@
26#include <linux/jiffies.h> 26#include <linux/jiffies.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/err.h> 28#include <linux/err.h>
29#include <linux/delay.h>
29#include <linux/platform_device.h> 30#include <linux/platform_device.h>
30#include <linux/hwmon.h> 31#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h> 32#include <linux/hwmon-sysfs.h>
@@ -64,17 +65,17 @@
64#define ABIT_UGURU_IN_SENSOR 0 65#define ABIT_UGURU_IN_SENSOR 0
65#define ABIT_UGURU_TEMP_SENSOR 1 66#define ABIT_UGURU_TEMP_SENSOR 1
66#define ABIT_UGURU_NC 2 67#define ABIT_UGURU_NC 2
67/* Timeouts / Retries, if these turn out to need a lot of fiddling we could 68/* In many cases we need to wait for the uGuru to reach a certain status, most
68 convert them to params. */ 69 of the time it will reach this status within 30 - 90 ISA reads, and thus we
69/* 250 was determined by trial and error, 200 works most of the time, but not 70 can best busy wait. This define gives the total amount of reads to try. */
70 always. I assume this is cpu-speed independent, since the ISA-bus and not 71#define ABIT_UGURU_WAIT_TIMEOUT 125
71 the CPU should be the bottleneck. Note that 250 sometimes is still not 72/* However sometimes older versions of the uGuru seem to be distracted and they
72 enough (only reported on AN7 mb) this is handled by a higher layer. */ 73 do not respond for a long time. To handle this we sleep before each of the
73#define ABIT_UGURU_WAIT_TIMEOUT 250 74 last ABIT_UGURU_WAIT_TIMEOUT_SLEEP tries. */
75#define ABIT_UGURU_WAIT_TIMEOUT_SLEEP 5
74/* Normally all expected status in abituguru_ready, are reported after the 76/* Normally all expected status in abituguru_ready, are reported after the
75 first read, but sometimes not and we need to poll, 5 polls was not enough 77 first read, but sometimes not and we need to poll. */
76 50 sofar is. */ 78#define ABIT_UGURU_READY_TIMEOUT 5
77#define ABIT_UGURU_READY_TIMEOUT 50
78/* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */ 79/* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */
79#define ABIT_UGURU_MAX_RETRIES 3 80#define ABIT_UGURU_MAX_RETRIES 3
80#define ABIT_UGURU_RETRY_DELAY (HZ/5) 81#define ABIT_UGURU_RETRY_DELAY (HZ/5)
@@ -226,6 +227,10 @@ static int abituguru_wait(struct abituguru_data *data, u8 state)
226 timeout--; 227 timeout--;
227 if (timeout == 0) 228 if (timeout == 0)
228 return -EBUSY; 229 return -EBUSY;
230 /* sleep a bit before our last few tries, see the comment on
231 this where ABIT_UGURU_WAIT_TIMEOUT_SLEEP is defined. */
232 if (timeout <= ABIT_UGURU_WAIT_TIMEOUT_SLEEP)
233 msleep(0);
229 } 234 }
230 return 0; 235 return 0;
231} 236}
@@ -256,6 +261,7 @@ static int abituguru_ready(struct abituguru_data *data)
256 "CMD reg does not hold 0xAC after ready command\n"); 261 "CMD reg does not hold 0xAC after ready command\n");
257 return -EIO; 262 return -EIO;
258 } 263 }
264 msleep(0);
259 } 265 }
260 266
261 /* After this the ABIT_UGURU_DATA port should contain 267 /* After this the ABIT_UGURU_DATA port should contain
@@ -268,6 +274,7 @@ static int abituguru_ready(struct abituguru_data *data)
268 "state != more input after ready command\n"); 274 "state != more input after ready command\n");
269 return -EIO; 275 return -EIO;
270 } 276 }
277 msleep(0);
271 } 278 }
272 279
273 data->uguru_ready = 1; 280 data->uguru_ready = 1;
@@ -331,7 +338,8 @@ static int abituguru_read(struct abituguru_data *data,
331 /* And read the data */ 338 /* And read the data */
332 for (i = 0; i < count; i++) { 339 for (i = 0; i < count; i++) {
333 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { 340 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
334 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for " 341 ABIT_UGURU_DEBUG(retries ? 1 : 3,
342 "timeout exceeded waiting for "
335 "read state (bank: %d, sensor: %d)\n", 343 "read state (bank: %d, sensor: %d)\n",
336 (int)bank_addr, (int)sensor_addr); 344 (int)bank_addr, (int)sensor_addr);
337 break; 345 break;
@@ -350,7 +358,9 @@ static int abituguru_read(struct abituguru_data *data,
350static int abituguru_write(struct abituguru_data *data, 358static int abituguru_write(struct abituguru_data *data,
351 u8 bank_addr, u8 sensor_addr, u8 *buf, int count) 359 u8 bank_addr, u8 sensor_addr, u8 *buf, int count)
352{ 360{
353 int i; 361 /* We use the ready timeout as we have to wait for 0xAC just like the
362 ready function */
363 int i, timeout = ABIT_UGURU_READY_TIMEOUT;
354 364
355 /* Send the address */ 365 /* Send the address */
356 i = abituguru_send_address(data, bank_addr, sensor_addr, 366 i = abituguru_send_address(data, bank_addr, sensor_addr,
@@ -370,7 +380,8 @@ static int abituguru_write(struct abituguru_data *data,
370 } 380 }
371 381
372 /* Now we need to wait till the chip is ready to be read again, 382 /* Now we need to wait till the chip is ready to be read again,
373 don't ask why */ 383 so that we can read 0xAC as confirmation that our write has
384 succeeded. */
374 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) { 385 if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
375 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state " 386 ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state "
376 "after write (bank: %d, sensor: %d)\n", (int)bank_addr, 387 "after write (bank: %d, sensor: %d)\n", (int)bank_addr,
@@ -379,11 +390,15 @@ static int abituguru_write(struct abituguru_data *data,
379 } 390 }
380 391
381 /* Cmd port MUST be read now and should contain 0xAC */ 392 /* Cmd port MUST be read now and should contain 0xAC */
382 if (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) { 393 while (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) {
383 ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after write " 394 timeout--;
384 "(bank: %d, sensor: %d)\n", (int)bank_addr, 395 if (timeout == 0) {
385 (int)sensor_addr); 396 ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after "
386 return -EIO; 397 "write (bank: %d, sensor: %d)\n",
398 (int)bank_addr, (int)sensor_addr);
399 return -EIO;
400 }
401 msleep(0);
387 } 402 }
388 403
389 /* Last put the chip back in ready state */ 404 /* Last put the chip back in ready state */
@@ -403,7 +418,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
403 u8 sensor_addr) 418 u8 sensor_addr)
404{ 419{
405 u8 val, buf[3]; 420 u8 val, buf[3];
406 int ret = ABIT_UGURU_NC; 421 int i, ret = -ENODEV; /* error is the most common used retval :| */
407 422
408 /* If overriden by the user return the user selected type */ 423 /* If overriden by the user return the user selected type */
409 if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR && 424 if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR &&
@@ -439,7 +454,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
439 buf[2] = 250; 454 buf[2] = 250;
440 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 455 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
441 buf, 3) != 3) 456 buf, 3) != 3)
442 return -ENODEV; 457 goto abituguru_detect_bank1_sensor_type_exit;
443 /* Now we need 20 ms to give the uguru time to read the sensors 458 /* Now we need 20 ms to give the uguru time to read the sensors
444 and raise a voltage alarm */ 459 and raise a voltage alarm */
445 set_current_state(TASK_UNINTERRUPTIBLE); 460 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -447,21 +462,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
447 /* Check for alarm and check the alarm is a volt low alarm. */ 462 /* Check for alarm and check the alarm is a volt low alarm. */
448 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, 463 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
449 ABIT_UGURU_MAX_RETRIES) != 3) 464 ABIT_UGURU_MAX_RETRIES) != 3)
450 return -ENODEV; 465 goto abituguru_detect_bank1_sensor_type_exit;
451 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { 466 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
452 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, 467 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
453 sensor_addr, buf, 3, 468 sensor_addr, buf, 3,
454 ABIT_UGURU_MAX_RETRIES) != 3) 469 ABIT_UGURU_MAX_RETRIES) != 3)
455 return -ENODEV; 470 goto abituguru_detect_bank1_sensor_type_exit;
456 if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) { 471 if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) {
457 /* Restore original settings */
458 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
459 sensor_addr,
460 data->bank1_settings[sensor_addr],
461 3) != 3)
462 return -ENODEV;
463 ABIT_UGURU_DEBUG(2, " found volt sensor\n"); 472 ABIT_UGURU_DEBUG(2, " found volt sensor\n");
464 return ABIT_UGURU_IN_SENSOR; 473 ret = ABIT_UGURU_IN_SENSOR;
474 goto abituguru_detect_bank1_sensor_type_exit;
465 } else 475 } else
466 ABIT_UGURU_DEBUG(2, " alarm raised during volt " 476 ABIT_UGURU_DEBUG(2, " alarm raised during volt "
467 "sensor test, but volt low flag not set\n"); 477 "sensor test, but volt low flag not set\n");
@@ -477,7 +487,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
477 buf[2] = 10; 487 buf[2] = 10;
478 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 488 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
479 buf, 3) != 3) 489 buf, 3) != 3)
480 return -ENODEV; 490 goto abituguru_detect_bank1_sensor_type_exit;
481 /* Now we need 50 ms to give the uguru time to read the sensors 491 /* Now we need 50 ms to give the uguru time to read the sensors
482 and raise a temp alarm */ 492 and raise a temp alarm */
483 set_current_state(TASK_UNINTERRUPTIBLE); 493 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -485,15 +495,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
485 /* Check for alarm and check the alarm is a temp high alarm. */ 495 /* Check for alarm and check the alarm is a temp high alarm. */
486 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3, 496 if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
487 ABIT_UGURU_MAX_RETRIES) != 3) 497 ABIT_UGURU_MAX_RETRIES) != 3)
488 return -ENODEV; 498 goto abituguru_detect_bank1_sensor_type_exit;
489 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) { 499 if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
490 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1, 500 if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
491 sensor_addr, buf, 3, 501 sensor_addr, buf, 3,
492 ABIT_UGURU_MAX_RETRIES) != 3) 502 ABIT_UGURU_MAX_RETRIES) != 3)
493 return -ENODEV; 503 goto abituguru_detect_bank1_sensor_type_exit;
494 if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) { 504 if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) {
495 ret = ABIT_UGURU_TEMP_SENSOR;
496 ABIT_UGURU_DEBUG(2, " found temp sensor\n"); 505 ABIT_UGURU_DEBUG(2, " found temp sensor\n");
506 ret = ABIT_UGURU_TEMP_SENSOR;
507 goto abituguru_detect_bank1_sensor_type_exit;
497 } else 508 } else
498 ABIT_UGURU_DEBUG(2, " alarm raised during temp " 509 ABIT_UGURU_DEBUG(2, " alarm raised during temp "
499 "sensor test, but temp high flag not set\n"); 510 "sensor test, but temp high flag not set\n");
@@ -501,11 +512,23 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
501 ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor " 512 ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor "
502 "test\n"); 513 "test\n");
503 514
504 /* Restore original settings */ 515 ret = ABIT_UGURU_NC;
505 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr, 516abituguru_detect_bank1_sensor_type_exit:
506 data->bank1_settings[sensor_addr], 3) != 3) 517 /* Restore original settings, failing here is really BAD, it has been
518 reported that some BIOS-es hang when entering the uGuru menu with
519 invalid settings present in the uGuru, so we try this 3 times. */
520 for (i = 0; i < 3; i++)
521 if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
522 sensor_addr, data->bank1_settings[sensor_addr],
523 3) == 3)
524 break;
525 if (i == 3) {
526 printk(KERN_ERR ABIT_UGURU_NAME
527 ": Fatal error could not restore original settings. "
528 "This should never happen please report this to the "
529 "abituguru maintainer (see MAINTAINERS)\n");
507 return -ENODEV; 530 return -ENODEV;
508 531 }
509 return ret; 532 return ret;
510} 533}
511 534
@@ -1305,7 +1328,7 @@ static struct abituguru_data *abituguru_update_device(struct device *dev)
1305 data->update_timeouts = 0; 1328 data->update_timeouts = 0;
1306LEAVE_UPDATE: 1329LEAVE_UPDATE:
1307 /* handle timeout condition */ 1330 /* handle timeout condition */
1308 if (err == -EBUSY) { 1331 if (!success && (err == -EBUSY || err >= 0)) {
1309 /* No overflow please */ 1332 /* No overflow please */
1310 if (data->update_timeouts < 255u) 1333 if (data->update_timeouts < 255u)
1311 data->update_timeouts++; 1334 data->update_timeouts++;
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index e7e27049fbfa..0be6fd6a267d 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -43,13 +43,12 @@
43/*-------------------------------------------------------------------------*/ 43/*-------------------------------------------------------------------------*/
44 44
45#define DRIVER_VERSION "2 May 2005" 45#define DRIVER_VERSION "2 May 2005"
46#define DRIVER_NAME (tps65010_driver.name) 46#define DRIVER_NAME (tps65010_driver.driver.name)
47 47
48MODULE_DESCRIPTION("TPS6501x Power Management Driver"); 48MODULE_DESCRIPTION("TPS6501x Power Management Driver");
49MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
50 50
51static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END }; 51static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END };
52static unsigned short normal_i2c_range[] = { I2C_CLIENT_END };
53 52
54I2C_CLIENT_INSMOD; 53I2C_CLIENT_INSMOD;
55 54
@@ -100,7 +99,7 @@ struct tps65010 {
100 /* not currently tracking GPIO state */ 99 /* not currently tracking GPIO state */
101}; 100};
102 101
103#define POWER_POLL_DELAY msecs_to_jiffies(800) 102#define POWER_POLL_DELAY msecs_to_jiffies(5000)
104 103
105/*-------------------------------------------------------------------------*/ 104/*-------------------------------------------------------------------------*/
106 105
@@ -520,8 +519,11 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
520 goto fail1; 519 goto fail1;
521 } 520 }
522 521
522 /* the IRQ is active low, but many gpio lines can't support that
523 * so this driver can use falling-edge triggers instead.
524 */
525 irqflags = IRQF_SAMPLE_RANDOM;
523#ifdef CONFIG_ARM 526#ifdef CONFIG_ARM
524 irqflags = IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_LOW;
525 if (machine_is_omap_h2()) { 527 if (machine_is_omap_h2()) {
526 tps->model = TPS65010; 528 tps->model = TPS65010;
527 omap_cfg_reg(W4_GPIO58); 529 omap_cfg_reg(W4_GPIO58);
@@ -543,8 +545,6 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
543 545
544 // FIXME set up this board's IRQ ... 546 // FIXME set up this board's IRQ ...
545 } 547 }
546#else
547 irqflags = IRQF_SAMPLE_RANDOM;
548#endif 548#endif
549 549
550 if (tps->irq > 0) { 550 if (tps->irq > 0) {
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d4bad6704bbe..448df2773377 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -3552,6 +3552,8 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
3552 3552
3553static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) 3553static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3554{ 3554{
3555 pci_save_state(pdev);
3556
3555#ifdef CONFIG_PPC_PMAC 3557#ifdef CONFIG_PPC_PMAC
3556 if (machine_is(powermac)) { 3558 if (machine_is(powermac)) {
3557 struct device_node *of_node; 3559 struct device_node *of_node;
@@ -3563,8 +3565,6 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3563 } 3565 }
3564#endif 3566#endif
3565 3567
3566 pci_save_state(pdev);
3567
3568 return 0; 3568 return 0;
3569} 3569}
3570 3570
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index e05ca2cdc73f..75313ade2e0d 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -301,7 +301,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
301 event->event == IB_EVENT_PORT_ACTIVE || 301 event->event == IB_EVENT_PORT_ACTIVE ||
302 event->event == IB_EVENT_LID_CHANGE || 302 event->event == IB_EVENT_LID_CHANGE ||
303 event->event == IB_EVENT_PKEY_CHANGE || 303 event->event == IB_EVENT_PKEY_CHANGE ||
304 event->event == IB_EVENT_SM_CHANGE) { 304 event->event == IB_EVENT_SM_CHANGE ||
305 event->event == IB_EVENT_CLIENT_REREGISTER) {
305 work = kmalloc(sizeof *work, GFP_ATOMIC); 306 work = kmalloc(sizeof *work, GFP_ATOMIC);
306 if (work) { 307 if (work) {
307 INIT_WORK(&work->work, ib_cache_task, work); 308 INIT_WORK(&work->work, ib_cache_task, work);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index aeda484ffd82..d6b84226bba7 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -405,7 +405,8 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
405 event->event == IB_EVENT_PORT_ACTIVE || 405 event->event == IB_EVENT_PORT_ACTIVE ||
406 event->event == IB_EVENT_LID_CHANGE || 406 event->event == IB_EVENT_LID_CHANGE ||
407 event->event == IB_EVENT_PKEY_CHANGE || 407 event->event == IB_EVENT_PKEY_CHANGE ||
408 event->event == IB_EVENT_SM_CHANGE) { 408 event->event == IB_EVENT_SM_CHANGE ||
409 event->event == IB_EVENT_CLIENT_REREGISTER) {
409 struct ib_sa_device *sa_dev; 410 struct ib_sa_device *sa_dev;
410 sa_dev = container_of(handler, typeof(*sa_dev), event_handler); 411 sa_dev = container_of(handler, typeof(*sa_dev), event_handler);
411 412
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 557cde3a4563..7b82c1907f04 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -967,12 +967,12 @@ static struct {
967} mthca_hca_table[] = { 967} mthca_hca_table[] = {
968 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0), 968 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
969 .flags = 0 }, 969 .flags = 0 },
970 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400), 970 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600),
971 .flags = MTHCA_FLAG_PCIE }, 971 .flags = MTHCA_FLAG_PCIE },
972 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0), 972 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400),
973 .flags = MTHCA_FLAG_MEMFREE | 973 .flags = MTHCA_FLAG_MEMFREE |
974 MTHCA_FLAG_PCIE }, 974 MTHCA_FLAG_PCIE },
975 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800), 975 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0),
976 .flags = MTHCA_FLAG_MEMFREE | 976 .flags = MTHCA_FLAG_MEMFREE |
977 MTHCA_FLAG_PCIE | 977 MTHCA_FLAG_PCIE |
978 MTHCA_FLAG_SINAI_OPT } 978 MTHCA_FLAG_SINAI_OPT }
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 230ae21db8fd..265b1d1c4a62 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1287,11 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev)
1287 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1287 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1288 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1289 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 1290 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1291 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1292 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1293 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1294 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1295 dev->ib_dev.node_type = IB_NODE_CA; 1291 dev->ib_dev.node_type = IB_NODE_CA;
1296 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1292 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1297 dev->ib_dev.dma_device = &dev->pdev->dev; 1293 dev->ib_dev.dma_device = &dev->pdev->dev;
@@ -1316,6 +1312,11 @@ int mthca_register_device(struct mthca_dev *dev)
1316 dev->ib_dev.modify_srq = mthca_modify_srq; 1312 dev->ib_dev.modify_srq = mthca_modify_srq;
1317 dev->ib_dev.query_srq = mthca_query_srq; 1313 dev->ib_dev.query_srq = mthca_query_srq;
1318 dev->ib_dev.destroy_srq = mthca_destroy_srq; 1314 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1315 dev->ib_dev.uverbs_cmd_mask |=
1316 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1317 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1318 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1319 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1319 1320
1320 if (mthca_is_memfree(dev)) 1321 if (mthca_is_memfree(dev))
1321 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; 1322 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 8de2887ba15c..9a5bece3fa5c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -136,8 +136,8 @@ struct mthca_ah {
136 * We have one global lock that protects dev->cq/qp_table. Each 136 * We have one global lock that protects dev->cq/qp_table. Each
137 * struct mthca_cq/qp also has its own lock. An individual qp lock 137 * struct mthca_cq/qp also has its own lock. An individual qp lock
138 * may be taken inside of an individual cq lock. Both cqs attached to 138 * may be taken inside of an individual cq lock. Both cqs attached to
139 * a qp may be locked, with the send cq locked first. No other 139 * a qp may be locked, with the cq with the lower cqn locked first.
140 * nesting should be done. 140 * No other nesting should be done.
141 * 141 *
142 * Each struct mthca_cq/qp also has an ref count, protected by the 142 * Each struct mthca_cq/qp also has an ref count, protected by the
143 * corresponding table lock. The pointer from the cq/qp_table to the 143 * corresponding table lock. The pointer from the cq/qp_table to the
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index cd8b6721ac9c..2e8f6f36e0a5 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -99,6 +99,10 @@ enum {
99 MTHCA_QP_BIT_RSC = 1 << 3 99 MTHCA_QP_BIT_RSC = 1 << 3
100}; 100};
101 101
102enum {
103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5
104};
105
102struct mthca_qp_path { 106struct mthca_qp_path {
103 __be32 port_pkey; 107 __be32 port_pkey;
104 u8 rnr_retry; 108 u8 rnr_retry;
@@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1259 return 0; 1263 return 0;
1260} 1264}
1261 1265
1266static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1267{
1268 if (send_cq == recv_cq)
1269 spin_lock_irq(&send_cq->lock);
1270 else if (send_cq->cqn < recv_cq->cqn) {
1271 spin_lock_irq(&send_cq->lock);
1272 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1273 } else {
1274 spin_lock_irq(&recv_cq->lock);
1275 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1276 }
1277}
1278
1279static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1280{
1281 if (send_cq == recv_cq)
1282 spin_unlock_irq(&send_cq->lock);
1283 else if (send_cq->cqn < recv_cq->cqn) {
1284 spin_unlock(&recv_cq->lock);
1285 spin_unlock_irq(&send_cq->lock);
1286 } else {
1287 spin_unlock(&send_cq->lock);
1288 spin_unlock_irq(&recv_cq->lock);
1289 }
1290}
1291
1262int mthca_alloc_sqp(struct mthca_dev *dev, 1292int mthca_alloc_sqp(struct mthca_dev *dev,
1263 struct mthca_pd *pd, 1293 struct mthca_pd *pd,
1264 struct mthca_cq *send_cq, 1294 struct mthca_cq *send_cq,
@@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1311 * Lock CQs here, so that CQ polling code can do QP lookup 1341 * Lock CQs here, so that CQ polling code can do QP lookup
1312 * without taking a lock. 1342 * without taking a lock.
1313 */ 1343 */
1314 spin_lock_irq(&send_cq->lock); 1344 mthca_lock_cqs(send_cq, recv_cq);
1315 if (send_cq != recv_cq)
1316 spin_lock(&recv_cq->lock);
1317 1345
1318 spin_lock(&dev->qp_table.lock); 1346 spin_lock(&dev->qp_table.lock);
1319 mthca_array_clear(&dev->qp_table.qp, mqpn); 1347 mthca_array_clear(&dev->qp_table.qp, mqpn);
1320 spin_unlock(&dev->qp_table.lock); 1348 spin_unlock(&dev->qp_table.lock);
1321 1349
1322 if (send_cq != recv_cq) 1350 mthca_unlock_cqs(send_cq, recv_cq);
1323 spin_unlock(&recv_cq->lock);
1324 spin_unlock_irq(&send_cq->lock);
1325 1351
1326 err_out: 1352 err_out:
1327 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, 1353 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev,
1355 * Lock CQs here, so that CQ polling code can do QP lookup 1381 * Lock CQs here, so that CQ polling code can do QP lookup
1356 * without taking a lock. 1382 * without taking a lock.
1357 */ 1383 */
1358 spin_lock_irq(&send_cq->lock); 1384 mthca_lock_cqs(send_cq, recv_cq);
1359 if (send_cq != recv_cq)
1360 spin_lock(&recv_cq->lock);
1361 1385
1362 spin_lock(&dev->qp_table.lock); 1386 spin_lock(&dev->qp_table.lock);
1363 mthca_array_clear(&dev->qp_table.qp, 1387 mthca_array_clear(&dev->qp_table.qp,
@@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev,
1365 --qp->refcount; 1389 --qp->refcount;
1366 spin_unlock(&dev->qp_table.lock); 1390 spin_unlock(&dev->qp_table.lock);
1367 1391
1368 if (send_cq != recv_cq) 1392 mthca_unlock_cqs(send_cq, recv_cq);
1369 spin_unlock(&recv_cq->lock);
1370 spin_unlock_irq(&send_cq->lock);
1371 1393
1372 wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1394 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1373 1395
@@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1502 int i; 1524 int i;
1503 int size; 1525 int size;
1504 int size0 = 0; 1526 int size0 = 0;
1505 u32 f0 = 0; 1527 u32 f0;
1506 int ind; 1528 int ind;
1507 u8 op0 = 0; 1529 u8 op0 = 0;
1508 1530
@@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1686 if (!size0) { 1708 if (!size0) {
1687 size0 = size; 1709 size0 = size;
1688 op0 = mthca_opcode[wr->opcode]; 1710 op0 = mthca_opcode[wr->opcode];
1711 f0 = wr->send_flags & IB_SEND_FENCE ?
1712 MTHCA_SEND_DOORBELL_FENCE : 0;
1689 } 1713 }
1690 1714
1691 ++ind; 1715 ++ind;
@@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1843 int i; 1867 int i;
1844 int size; 1868 int size;
1845 int size0 = 0; 1869 int size0 = 0;
1846 u32 f0 = 0; 1870 u32 f0;
1847 int ind; 1871 int ind;
1848 u8 op0 = 0; 1872 u8 op0 = 0;
1849 1873
@@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2051 if (!size0) { 2075 if (!size0) {
2052 size0 = size; 2076 size0 = size;
2053 op0 = mthca_opcode[wr->opcode]; 2077 op0 = mthca_opcode[wr->opcode];
2078 f0 = wr->send_flags & IB_SEND_FENCE ?
2079 MTHCA_SEND_DOORBELL_FENCE : 0;
2054 } 2080 }
2055 2081
2056 ++ind; 2082 ++ind;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 34b0da5cfa0a..1437d7ee3b19 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
378 return iser_conn_set_full_featured_mode(conn); 378 return iser_conn_set_full_featured_mode(conn);
379} 379}
380 380
381static void
382iscsi_iser_conn_terminate(struct iscsi_conn *conn)
383{
384 struct iscsi_iser_conn *iser_conn = conn->dd_data;
385 struct iser_conn *ib_conn = iser_conn->ib_conn;
386
387 BUG_ON(!ib_conn);
388 /* starts conn teardown process, waits until all previously *
389 * posted buffers get flushed, deallocates all conn resources */
390 iser_conn_terminate(ib_conn);
391 iser_conn->ib_conn = NULL;
392 conn->recv_lock = NULL;
393}
394
395
396static struct iscsi_transport iscsi_iser_transport; 381static struct iscsi_transport iscsi_iser_transport;
397 382
398static struct iscsi_cls_session * 383static struct iscsi_cls_session *
@@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
555static void 540static void
556iscsi_iser_ep_disconnect(__u64 ep_handle) 541iscsi_iser_ep_disconnect(__u64 ep_handle)
557{ 542{
558 struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle); 543 struct iser_conn *ib_conn;
559 544
545 ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
560 if (!ib_conn) 546 if (!ib_conn)
561 return; 547 return;
562 548
563 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); 549 iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
564
565 iser_conn_terminate(ib_conn); 550 iser_conn_terminate(ib_conn);
566} 551}
567 552
@@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = {
614 .get_session_param = iscsi_session_get_param, 599 .get_session_param = iscsi_session_get_param,
615 .start_conn = iscsi_iser_conn_start, 600 .start_conn = iscsi_iser_conn_start,
616 .stop_conn = iscsi_conn_stop, 601 .stop_conn = iscsi_conn_stop,
617 /* these are called as part of conn recovery */
618 .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */
619 .terminate_conn = iscsi_iser_conn_terminate,
620 /* IO */ 602 /* IO */
621 .send_pdu = iscsi_conn_send_pdu, 603 .send_pdu = iscsi_conn_send_pdu,
622 .get_stats = iscsi_iser_conn_get_stats, 604 .get_stats = iscsi_iser_conn_get_stats,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6bfa0cf4b1d2..a86afd0a5ef1 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -498,7 +498,7 @@ static int atkbd_set_repeat_rate(struct atkbd *atkbd)
498 i++; 498 i++;
499 dev->rep[REP_PERIOD] = period[i]; 499 dev->rep[REP_PERIOD] = period[i];
500 500
501 while (j < ARRAY_SIZE(period) - 1 && delay[j] < dev->rep[REP_DELAY]) 501 while (j < ARRAY_SIZE(delay) - 1 && delay[j] < dev->rep[REP_DELAY])
502 j++; 502 j++;
503 dev->rep[REP_DELAY] = delay[j]; 503 dev->rep[REP_DELAY] = delay[j];
504 504
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index a8efc1af36cb..de0f46dd9692 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -259,11 +259,11 @@ static int __init dmi_matched(struct dmi_system_id *dmi)
259 return 1; 259 return 1;
260} 260}
261 261
262static struct key_entry keymap_empty[] __initdata = { 262static struct key_entry keymap_empty[] = {
263 { KE_END, 0 } 263 { KE_END, 0 }
264}; 264};
265 265
266static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = { 266static struct key_entry keymap_fs_amilo_pro_v2000[] = {
267 { KE_KEY, 0x01, KEY_HELP }, 267 { KE_KEY, 0x01, KEY_HELP },
268 { KE_KEY, 0x11, KEY_PROG1 }, 268 { KE_KEY, 0x11, KEY_PROG1 },
269 { KE_KEY, 0x12, KEY_PROG2 }, 269 { KE_KEY, 0x12, KEY_PROG2 },
@@ -273,7 +273,7 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = {
273 { KE_END, 0 } 273 { KE_END, 0 }
274}; 274};
275 275
276static struct key_entry keymap_fujitsu_n3510[] __initdata = { 276static struct key_entry keymap_fujitsu_n3510[] = {
277 { KE_KEY, 0x11, KEY_PROG1 }, 277 { KE_KEY, 0x11, KEY_PROG1 },
278 { KE_KEY, 0x12, KEY_PROG2 }, 278 { KE_KEY, 0x12, KEY_PROG2 },
279 { KE_KEY, 0x36, KEY_WWW }, 279 { KE_KEY, 0x36, KEY_WWW },
@@ -285,7 +285,7 @@ static struct key_entry keymap_fujitsu_n3510[] __initdata = {
285 { KE_END, 0 } 285 { KE_END, 0 }
286}; 286};
287 287
288static struct key_entry keymap_wistron_ms2111[] __initdata = { 288static struct key_entry keymap_wistron_ms2111[] = {
289 { KE_KEY, 0x11, KEY_PROG1 }, 289 { KE_KEY, 0x11, KEY_PROG1 },
290 { KE_KEY, 0x12, KEY_PROG2 }, 290 { KE_KEY, 0x12, KEY_PROG2 },
291 { KE_KEY, 0x13, KEY_PROG3 }, 291 { KE_KEY, 0x13, KEY_PROG3 },
@@ -294,7 +294,7 @@ static struct key_entry keymap_wistron_ms2111[] __initdata = {
294 { KE_END, 0 } 294 { KE_END, 0 }
295}; 295};
296 296
297static struct key_entry keymap_wistron_ms2141[] __initdata = { 297static struct key_entry keymap_wistron_ms2141[] = {
298 { KE_KEY, 0x11, KEY_PROG1 }, 298 { KE_KEY, 0x11, KEY_PROG1 },
299 { KE_KEY, 0x12, KEY_PROG2 }, 299 { KE_KEY, 0x12, KEY_PROG2 },
300 { KE_WIFI, 0x30, 0 }, 300 { KE_WIFI, 0x30, 0 },
@@ -307,7 +307,7 @@ static struct key_entry keymap_wistron_ms2141[] __initdata = {
307 { KE_END, 0 } 307 { KE_END, 0 }
308}; 308};
309 309
310static struct key_entry keymap_acer_aspire_1500[] __initdata = { 310static struct key_entry keymap_acer_aspire_1500[] = {
311 { KE_KEY, 0x11, KEY_PROG1 }, 311 { KE_KEY, 0x11, KEY_PROG1 },
312 { KE_KEY, 0x12, KEY_PROG2 }, 312 { KE_KEY, 0x12, KEY_PROG2 },
313 { KE_WIFI, 0x30, 0 }, 313 { KE_WIFI, 0x30, 0 },
@@ -317,7 +317,7 @@ static struct key_entry keymap_acer_aspire_1500[] __initdata = {
317 { KE_END, 0 } 317 { KE_END, 0 }
318}; 318};
319 319
320static struct key_entry keymap_acer_travelmate_240[] __initdata = { 320static struct key_entry keymap_acer_travelmate_240[] = {
321 { KE_KEY, 0x31, KEY_MAIL }, 321 { KE_KEY, 0x31, KEY_MAIL },
322 { KE_KEY, 0x36, KEY_WWW }, 322 { KE_KEY, 0x36, KEY_WWW },
323 { KE_KEY, 0x11, KEY_PROG1 }, 323 { KE_KEY, 0x11, KEY_PROG1 },
@@ -327,7 +327,7 @@ static struct key_entry keymap_acer_travelmate_240[] __initdata = {
327 { KE_END, 0 } 327 { KE_END, 0 }
328}; 328};
329 329
330static struct key_entry keymap_aopen_1559as[] __initdata = { 330static struct key_entry keymap_aopen_1559as[] = {
331 { KE_KEY, 0x01, KEY_HELP }, 331 { KE_KEY, 0x01, KEY_HELP },
332 { KE_KEY, 0x06, KEY_PROG3 }, 332 { KE_KEY, 0x06, KEY_PROG3 },
333 { KE_KEY, 0x11, KEY_PROG1 }, 333 { KE_KEY, 0x11, KEY_PROG1 },
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 8bc9f51ae6c2..343afa38f4c2 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -485,13 +485,6 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties)
485 param[0] = 40; 485 param[0] = 40;
486 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE); 486 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
487 487
488 param[0] = 200;
489 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
490 param[0] = 200;
491 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
492 param[0] = 60;
493 ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
494
495 if (set_properties) { 488 if (set_properties) {
496 set_bit(BTN_MIDDLE, psmouse->dev->keybit); 489 set_bit(BTN_MIDDLE, psmouse->dev->keybit);
497 set_bit(REL_WHEEL, psmouse->dev->relbit); 490 set_bit(REL_WHEEL, psmouse->dev->relbit);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index be48cedf986b..c54de989eb00 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -255,7 +255,9 @@ static struct region *__rh_alloc(struct region_hash *rh, region_t region)
255 struct region *reg, *nreg; 255 struct region *reg, *nreg;
256 256
257 read_unlock(&rh->hash_lock); 257 read_unlock(&rh->hash_lock);
258 nreg = mempool_alloc(rh->region_pool, GFP_NOIO); 258 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
259 if (unlikely(!nreg))
260 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
259 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? 261 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
260 RH_CLEAN : RH_NOSYNC; 262 RH_CLEAN : RH_NOSYNC;
261 nreg->rh = rh; 263 nreg->rh = rh;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b6d16022a53e..8dbab2ef3885 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1597,6 +1597,19 @@ void md_update_sb(mddev_t * mddev)
1597 1597
1598repeat: 1598repeat:
1599 spin_lock_irq(&mddev->write_lock); 1599 spin_lock_irq(&mddev->write_lock);
1600
1601 if (mddev->degraded && mddev->sb_dirty == 3)
1602 /* If the array is degraded, then skipping spares is both
1603 * dangerous and fairly pointless.
1604 * Dangerous because a device that was removed from the array
1605 * might have a event_count that still looks up-to-date,
1606 * so it can be re-added without a resync.
1607 * Pointless because if there are any spares to skip,
1608 * then a recovery will happen and soon that array won't
1609 * be degraded any more and the spare can go back to sleep then.
1610 */
1611 mddev->sb_dirty = 1;
1612
1600 sync_req = mddev->in_sync; 1613 sync_req = mddev->in_sync;
1601 mddev->utime = get_seconds(); 1614 mddev->utime = get_seconds();
1602 if (mddev->sb_dirty == 3) 1615 if (mddev->sb_dirty == 3)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1efe22a2d041..87bfe9e7d8ca 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1625,15 +1625,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1625 return 0; 1625 return 0;
1626 } 1626 }
1627 1627
1628 /* before building a request, check if we can skip these blocks..
1629 * This call the bitmap_start_sync doesn't actually record anything
1630 */
1631 if (mddev->bitmap == NULL && 1628 if (mddev->bitmap == NULL &&
1632 mddev->recovery_cp == MaxSector && 1629 mddev->recovery_cp == MaxSector &&
1630 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1633 conf->fullsync == 0) { 1631 conf->fullsync == 0) {
1634 *skipped = 1; 1632 *skipped = 1;
1635 return max_sector - sector_nr; 1633 return max_sector - sector_nr;
1636 } 1634 }
1635 /* before building a request, check if we can skip these blocks..
1636 * This call the bitmap_start_sync doesn't actually record anything
1637 */
1637 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1638 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1638 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 1639 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1639 /* We can skip this block, and probably several more */ 1640 /* We can skip this block, and probably several more */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index d4cb144ab402..c537d71c18e4 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -640,7 +640,6 @@ typedef struct _MPT_ADAPTER
640 struct work_struct fc_setup_reset_work; 640 struct work_struct fc_setup_reset_work;
641 struct list_head fc_rports; 641 struct list_head fc_rports;
642 spinlock_t fc_rescan_work_lock; 642 spinlock_t fc_rescan_work_lock;
643 int fc_rescan_work_count;
644 struct work_struct fc_rescan_work; 643 struct work_struct fc_rescan_work;
645 char fc_rescan_work_q_name[KOBJ_NAME_LEN]; 644 char fc_rescan_work_q_name[KOBJ_NAME_LEN];
646 struct workqueue_struct *fc_rescan_work_q; 645 struct workqueue_struct *fc_rescan_work_q;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 90da7d63b08e..85696f34c310 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -669,7 +669,10 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
669 * if still doing discovery, 669 * if still doing discovery,
670 * hang loose a while until finished 670 * hang loose a while until finished
671 */ 671 */
672 if (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) { 672 if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) ||
673 (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE &&
674 (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK)
675 == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) {
673 if (count-- > 0) { 676 if (count-- > 0) {
674 msleep(100); 677 msleep(100);
675 goto try_again; 678 goto try_again;
@@ -895,59 +898,45 @@ mptfc_rescan_devices(void *arg)
895{ 898{
896 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; 899 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
897 int ii; 900 int ii;
898 int work_to_do;
899 u64 pn; 901 u64 pn;
900 unsigned long flags;
901 struct mptfc_rport_info *ri; 902 struct mptfc_rport_info *ri;
902 903
903 do { 904 /* start by tagging all ports as missing */
904 /* start by tagging all ports as missing */ 905 list_for_each_entry(ri, &ioc->fc_rports, list) {
905 list_for_each_entry(ri, &ioc->fc_rports, list) { 906 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
906 if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { 907 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
907 ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
908 }
909 } 908 }
909 }
910 910
911 /* 911 /*
912 * now rescan devices known to adapter, 912 * now rescan devices known to adapter,
913 * will reregister existing rports 913 * will reregister existing rports
914 */ 914 */
915 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { 915 for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
916 (void) mptfc_GetFcPortPage0(ioc, ii); 916 (void) mptfc_GetFcPortPage0(ioc, ii);
917 mptfc_init_host_attr(ioc,ii); /* refresh */ 917 mptfc_init_host_attr(ioc, ii); /* refresh */
918 mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); 918 mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev);
919 } 919 }
920 920
921 /* delete devices still missing */ 921 /* delete devices still missing */
922 list_for_each_entry(ri, &ioc->fc_rports, list) { 922 list_for_each_entry(ri, &ioc->fc_rports, list) {
923 /* if newly missing, delete it */ 923 /* if newly missing, delete it */
924 if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { 924 if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
925 925
926 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| 926 ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
927 MPT_RPORT_INFO_FLAGS_MISSING); 927 MPT_RPORT_INFO_FLAGS_MISSING);
928 fc_remote_port_delete(ri->rport); /* won't sleep */ 928 fc_remote_port_delete(ri->rport); /* won't sleep */
929 ri->rport = NULL; 929 ri->rport = NULL;
930 930
931 pn = (u64)ri->pg0.WWPN.High << 32 | 931 pn = (u64)ri->pg0.WWPN.High << 32 |
932 (u64)ri->pg0.WWPN.Low; 932 (u64)ri->pg0.WWPN.Low;
933 dfcprintk ((MYIOC_s_INFO_FMT 933 dfcprintk ((MYIOC_s_INFO_FMT
934 "mptfc_rescan.%d: %llx deleted\n", 934 "mptfc_rescan.%d: %llx deleted\n",
935 ioc->name, 935 ioc->name,
936 ioc->sh->host_no, 936 ioc->sh->host_no,
937 (unsigned long long)pn)); 937 (unsigned long long)pn));
938 }
939 } 938 }
940 939 }
941 /*
942 * allow multiple passes as target state
943 * might have changed during scan
944 */
945 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
946 if (ioc->fc_rescan_work_count > 2) /* only need one more */
947 ioc->fc_rescan_work_count = 2;
948 work_to_do = --ioc->fc_rescan_work_count;
949 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
950 } while (work_to_do);
951} 940}
952 941
953static int 942static int
@@ -1159,7 +1148,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1159 * by doing it via the workqueue, some locking is eliminated 1148 * by doing it via the workqueue, some locking is eliminated
1160 */ 1149 */
1161 1150
1162 ioc->fc_rescan_work_count = 1;
1163 queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); 1151 queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
1164 flush_workqueue(ioc->fc_rescan_work_q); 1152 flush_workqueue(ioc->fc_rescan_work_q);
1165 1153
@@ -1202,10 +1190,8 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
1202 case MPI_EVENT_RESCAN: 1190 case MPI_EVENT_RESCAN:
1203 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 1191 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
1204 if (ioc->fc_rescan_work_q) { 1192 if (ioc->fc_rescan_work_q) {
1205 if (ioc->fc_rescan_work_count++ == 0) { 1193 queue_work(ioc->fc_rescan_work_q,
1206 queue_work(ioc->fc_rescan_work_q, 1194 &ioc->fc_rescan_work);
1207 &ioc->fc_rescan_work);
1208 }
1209 } 1195 }
1210 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 1196 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
1211 break; 1197 break;
@@ -1248,10 +1234,8 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
1248 mptfc_SetFcPortPage1_defaults(ioc); 1234 mptfc_SetFcPortPage1_defaults(ioc);
1249 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); 1235 spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
1250 if (ioc->fc_rescan_work_q) { 1236 if (ioc->fc_rescan_work_q) {
1251 if (ioc->fc_rescan_work_count++ == 0) { 1237 queue_work(ioc->fc_rescan_work_q,
1252 queue_work(ioc->fc_rescan_work_q, 1238 &ioc->fc_rescan_work);
1253 &ioc->fc_rescan_work);
1254 }
1255 } 1239 }
1256 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); 1240 spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
1257 } 1241 }
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index d7897dc6b3c8..a0ba07c36ee9 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -130,11 +130,13 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
130 if (ctrl & NAND_CTRL_CHANGE) { 130 if (ctrl & NAND_CTRL_CHANGE) {
131 unsigned long bits; 131 unsigned long bits;
132 132
133 bits = (~ctrl & NAND_NCE) << 2; 133 bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0;
134 bits |= (ctrl & NAND_CLE) << 7; 134 bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0;
135 bits |= (ctrl & NAND_ALE) << 6; 135 bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0;
136 136
137 ams_delta_latch2_write(0xC2, bits); 137 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE |
138 AMS_DELTA_LATCH2_NAND_ALE |
139 AMS_DELTA_LATCH2_NAND_NCE, bits);
138 } 140 }
139 141
140 if (cmd != NAND_CMD_NONE) 142 if (cmd != NAND_CMD_NONE)
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 62b861304e03..c8cbc00243fe 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1093,9 +1093,10 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1093 1093
1094 ret = nand_do_read_ops(mtd, from, &chip->ops); 1094 ret = nand_do_read_ops(mtd, from, &chip->ops);
1095 1095
1096 *retlen = chip->ops.retlen;
1097
1096 nand_release_device(mtd); 1098 nand_release_device(mtd);
1097 1099
1098 *retlen = chip->ops.retlen;
1099 return ret; 1100 return ret;
1100} 1101}
1101 1102
@@ -1691,9 +1692,10 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1691 1692
1692 ret = nand_do_write_ops(mtd, to, &chip->ops); 1693 ret = nand_do_write_ops(mtd, to, &chip->ops);
1693 1694
1695 *retlen = chip->ops.retlen;
1696
1694 nand_release_device(mtd); 1697 nand_release_device(mtd);
1695 1698
1696 *retlen = chip->ops.retlen;
1697 return ret; 1699 return ret;
1698} 1700}
1699 1701
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 3c148eaf2f4d..8a60f391ffcf 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -76,7 +76,7 @@ config HOTPLUG_PCI_IBM
76 76
77config HOTPLUG_PCI_ACPI 77config HOTPLUG_PCI_ACPI
78 tristate "ACPI PCI Hotplug driver" 78 tristate "ACPI PCI Hotplug driver"
79 depends on ACPI_DOCK && HOTPLUG_PCI 79 depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI)
80 help 80 help
81 Say Y here if you have a system that supports PCI Hotplug using 81 Say Y here if you have a system that supports PCI Hotplug using
82 ACPI. 82 ACPI.
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 02be74caa89f..4afcaffd031c 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -254,8 +254,8 @@ int cpci_led_off(struct slot* slot)
254 254
255int cpci_configure_slot(struct slot* slot) 255int cpci_configure_slot(struct slot* slot)
256{ 256{
257 unsigned char busnr; 257 struct pci_bus *parent;
258 struct pci_bus *child; 258 int fn;
259 259
260 dbg("%s - enter", __FUNCTION__); 260 dbg("%s - enter", __FUNCTION__);
261 261
@@ -276,23 +276,53 @@ int cpci_configure_slot(struct slot* slot)
276 */ 276 */
277 n = pci_scan_slot(slot->bus, slot->devfn); 277 n = pci_scan_slot(slot->bus, slot->devfn);
278 dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n); 278 dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n);
279 if (n > 0)
280 pci_bus_add_devices(slot->bus);
281 slot->dev = pci_get_slot(slot->bus, slot->devfn); 279 slot->dev = pci_get_slot(slot->bus, slot->devfn);
282 if (slot->dev == NULL) { 280 if (slot->dev == NULL) {
283 err("Could not find PCI device for slot %02x", slot->number); 281 err("Could not find PCI device for slot %02x", slot->number);
284 return 1; 282 return -ENODEV;
285 } 283 }
286 } 284 }
287 285 parent = slot->dev->bus;
288 if (slot->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 286
289 pci_read_config_byte(slot->dev, PCI_SECONDARY_BUS, &busnr); 287 for (fn = 0; fn < 8; fn++) {
290 child = pci_add_new_bus(slot->dev->bus, slot->dev, busnr); 288 struct pci_dev *dev;
291 pci_do_scan_bus(child); 289
292 pci_bus_size_bridges(child); 290 dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
291 if (!dev)
292 continue;
293 if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
294 (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
295 /* Find an unused bus number for the new bridge */
296 struct pci_bus *child;
297 unsigned char busnr, start = parent->secondary;
298 unsigned char end = parent->subordinate;
299
300 for (busnr = start; busnr <= end; busnr++) {
301 if (!pci_find_bus(pci_domain_nr(parent),
302 busnr))
303 break;
304 }
305 if (busnr >= end) {
306 err("No free bus for hot-added bridge\n");
307 pci_dev_put(dev);
308 continue;
309 }
310 child = pci_add_new_bus(parent, dev, busnr);
311 if (!child) {
312 err("Cannot add new bus for %s\n",
313 pci_name(dev));
314 pci_dev_put(dev);
315 continue;
316 }
317 child->subordinate = pci_do_scan_bus(child);
318 pci_bus_size_bridges(child);
319 }
320 pci_dev_put(dev);
293 } 321 }
294 322
295 pci_bus_assign_resources(slot->dev->bus); 323 pci_bus_assign_resources(parent);
324 pci_bus_add_devices(parent);
325 pci_enable_bridges(parent);
296 326
297 dbg("%s - exit", __FUNCTION__); 327 dbg("%s - exit", __FUNCTION__);
298 return 0; 328 return 0;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 10e1a905c144..474e9cd0e9e4 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -139,9 +139,8 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
139/** 139/**
140 * pci_match_device - Tell if a PCI device structure has a matching 140 * pci_match_device - Tell if a PCI device structure has a matching
141 * PCI device id structure 141 * PCI device id structure
142 * @ids: array of PCI device id structures to search in
143 * @dev: the PCI device structure to match against
144 * @drv: the PCI driver to match against 142 * @drv: the PCI driver to match against
143 * @dev: the PCI device structure to match against
145 * 144 *
146 * Used by a driver to check whether a PCI device present in the 145 * Used by a driver to check whether a PCI device present in the
147 * system is in its list of supported devices. Returns the matching 146 * system is in its list of supported devices. Returns the matching
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fb08bc951ac0..73177429fe74 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -438,6 +438,7 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
438 pci_read_config_dword(dev, 0x48, &region); 438 pci_read_config_dword(dev, 0x48, &region);
439 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); 439 quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
440} 440}
441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi );
441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi ); 442DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi );
442 443
443/* 444/*
@@ -1091,7 +1092,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu
1091DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); 1092DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc );
1092DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); 1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc );
1093DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); 1094DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
1094DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc );
1095 1095
1096static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) 1096static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1097{ 1097{
@@ -1518,6 +1518,63 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
1518} 1518}
1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); 1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
1520 1520
1521static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
1522{
1523 u16 command;
1524 u32 bar;
1525 u8 __iomem *csr;
1526 u8 cmd_hi;
1527
1528 switch (dev->device) {
1529 /* PCI IDs taken from drivers/net/e100.c */
1530 case 0x1029:
1531 case 0x1030 ... 0x1034:
1532 case 0x1038 ... 0x103E:
1533 case 0x1050 ... 0x1057:
1534 case 0x1059:
1535 case 0x1064 ... 0x106B:
1536 case 0x1091 ... 0x1095:
1537 case 0x1209:
1538 case 0x1229:
1539 case 0x2449:
1540 case 0x2459:
1541 case 0x245D:
1542 case 0x27DC:
1543 break;
1544 default:
1545 return;
1546 }
1547
1548 /*
1549 * Some firmware hands off the e100 with interrupts enabled,
1550 * which can cause a flood of interrupts if packets are
1551 * received before the driver attaches to the device. So
1552 * disable all e100 interrupts here. The driver will
1553 * re-enable them when it's ready.
1554 */
1555 pci_read_config_word(dev, PCI_COMMAND, &command);
1556 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
1557
1558 if (!(command & PCI_COMMAND_MEMORY) || !bar)
1559 return;
1560
1561 csr = ioremap(bar, 8);
1562 if (!csr) {
1563 printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
1564 pci_name(dev));
1565 return;
1566 }
1567
1568 cmd_hi = readb(csr + 3);
1569 if (cmd_hi == 0) {
1570 printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts "
1571 "enabled, disabling\n", pci_name(dev));
1572 writeb(1, csr + 3);
1573 }
1574
1575 iounmap(csr);
1576}
1577DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
1521 1578
1522static void __devinit fixup_rev1_53c810(struct pci_dev* dev) 1579static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
1523{ 1580{
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index d6d1bff52b8e..2c7de79c83b9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -69,12 +69,12 @@ static void s3c_rtc_setaie(int to)
69 69
70 pr_debug("%s: aie=%d\n", __FUNCTION__, to); 70 pr_debug("%s: aie=%d\n", __FUNCTION__, to);
71 71
72 tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; 72 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
73 73
74 if (to) 74 if (to)
75 tmp |= S3C2410_RTCALM_ALMEN; 75 tmp |= S3C2410_RTCALM_ALMEN;
76 76
77 writeb(tmp, S3C2410_RTCALM); 77 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
78} 78}
79 79
80static void s3c_rtc_setpie(int to) 80static void s3c_rtc_setpie(int to)
@@ -84,12 +84,12 @@ static void s3c_rtc_setpie(int to)
84 pr_debug("%s: pie=%d\n", __FUNCTION__, to); 84 pr_debug("%s: pie=%d\n", __FUNCTION__, to);
85 85
86 spin_lock_irq(&s3c_rtc_pie_lock); 86 spin_lock_irq(&s3c_rtc_pie_lock);
87 tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE; 87 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
88 88
89 if (to) 89 if (to)
90 tmp |= S3C2410_TICNT_ENABLE; 90 tmp |= S3C2410_TICNT_ENABLE;
91 91
92 writeb(tmp, S3C2410_TICNT); 92 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
93 spin_unlock_irq(&s3c_rtc_pie_lock); 93 spin_unlock_irq(&s3c_rtc_pie_lock);
94} 94}
95 95
@@ -98,13 +98,13 @@ static void s3c_rtc_setfreq(int freq)
98 unsigned int tmp; 98 unsigned int tmp;
99 99
100 spin_lock_irq(&s3c_rtc_pie_lock); 100 spin_lock_irq(&s3c_rtc_pie_lock);
101 tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE; 101 tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
102 102
103 s3c_rtc_freq = freq; 103 s3c_rtc_freq = freq;
104 104
105 tmp |= (128 / freq)-1; 105 tmp |= (128 / freq)-1;
106 106
107 writeb(tmp, S3C2410_TICNT); 107 writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
108 spin_unlock_irq(&s3c_rtc_pie_lock); 108 spin_unlock_irq(&s3c_rtc_pie_lock);
109} 109}
110 110
@@ -113,14 +113,15 @@ static void s3c_rtc_setfreq(int freq)
113static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 113static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
114{ 114{
115 unsigned int have_retried = 0; 115 unsigned int have_retried = 0;
116 void __iomem *base = s3c_rtc_base;
116 117
117 retry_get_time: 118 retry_get_time:
118 rtc_tm->tm_min = readb(S3C2410_RTCMIN); 119 rtc_tm->tm_min = readb(base + S3C2410_RTCMIN);
119 rtc_tm->tm_hour = readb(S3C2410_RTCHOUR); 120 rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR);
120 rtc_tm->tm_mday = readb(S3C2410_RTCDATE); 121 rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE);
121 rtc_tm->tm_mon = readb(S3C2410_RTCMON); 122 rtc_tm->tm_mon = readb(base + S3C2410_RTCMON);
122 rtc_tm->tm_year = readb(S3C2410_RTCYEAR); 123 rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR);
123 rtc_tm->tm_sec = readb(S3C2410_RTCSEC); 124 rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC);
124 125
125 /* the only way to work out wether the system was mid-update 126 /* the only way to work out wether the system was mid-update
126 * when we read it is to check the second counter, and if it 127 * when we read it is to check the second counter, and if it
@@ -151,17 +152,26 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
151 152
152static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) 153static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
153{ 154{
154 /* the rtc gets round the y2k problem by just not supporting it */ 155 void __iomem *base = s3c_rtc_base;
156 int year = tm->tm_year - 100;
155 157
156 if (tm->tm_year < 100) 158 pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
159 tm->tm_year, tm->tm_mon, tm->tm_mday,
160 tm->tm_hour, tm->tm_min, tm->tm_sec);
161
162 /* we get around y2k by simply not supporting it */
163
164 if (year < 0 || year >= 100) {
165 dev_err(dev, "rtc only supports 100 years\n");
157 return -EINVAL; 166 return -EINVAL;
167 }
158 168
159 writeb(BIN2BCD(tm->tm_sec), S3C2410_RTCSEC); 169 writeb(BIN2BCD(tm->tm_sec), base + S3C2410_RTCSEC);
160 writeb(BIN2BCD(tm->tm_min), S3C2410_RTCMIN); 170 writeb(BIN2BCD(tm->tm_min), base + S3C2410_RTCMIN);
161 writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR); 171 writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR);
162 writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE); 172 writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE);
163 writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON); 173 writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON);
164 writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR); 174 writeb(BIN2BCD(year), base + S3C2410_RTCYEAR);
165 175
166 return 0; 176 return 0;
167} 177}
@@ -169,16 +179,17 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
169static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) 179static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
170{ 180{
171 struct rtc_time *alm_tm = &alrm->time; 181 struct rtc_time *alm_tm = &alrm->time;
182 void __iomem *base = s3c_rtc_base;
172 unsigned int alm_en; 183 unsigned int alm_en;
173 184
174 alm_tm->tm_sec = readb(S3C2410_ALMSEC); 185 alm_tm->tm_sec = readb(base + S3C2410_ALMSEC);
175 alm_tm->tm_min = readb(S3C2410_ALMMIN); 186 alm_tm->tm_min = readb(base + S3C2410_ALMMIN);
176 alm_tm->tm_hour = readb(S3C2410_ALMHOUR); 187 alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR);
177 alm_tm->tm_mon = readb(S3C2410_ALMMON); 188 alm_tm->tm_mon = readb(base + S3C2410_ALMMON);
178 alm_tm->tm_mday = readb(S3C2410_ALMDATE); 189 alm_tm->tm_mday = readb(base + S3C2410_ALMDATE);
179 alm_tm->tm_year = readb(S3C2410_ALMYEAR); 190 alm_tm->tm_year = readb(base + S3C2410_ALMYEAR);
180 191
181 alm_en = readb(S3C2410_RTCALM); 192 alm_en = readb(base + S3C2410_RTCALM);
182 193
183 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", 194 pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
184 alm_en, 195 alm_en,
@@ -226,6 +237,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
226static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) 237static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
227{ 238{
228 struct rtc_time *tm = &alrm->time; 239 struct rtc_time *tm = &alrm->time;
240 void __iomem *base = s3c_rtc_base;
229 unsigned int alrm_en; 241 unsigned int alrm_en;
230 242
231 pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n", 243 pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
@@ -234,32 +246,32 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
234 tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec); 246 tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
235 247
236 248
237 alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; 249 alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
238 writeb(0x00, S3C2410_RTCALM); 250 writeb(0x00, base + S3C2410_RTCALM);
239 251
240 if (tm->tm_sec < 60 && tm->tm_sec >= 0) { 252 if (tm->tm_sec < 60 && tm->tm_sec >= 0) {
241 alrm_en |= S3C2410_RTCALM_SECEN; 253 alrm_en |= S3C2410_RTCALM_SECEN;
242 writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC); 254 writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC);
243 } 255 }
244 256
245 if (tm->tm_min < 60 && tm->tm_min >= 0) { 257 if (tm->tm_min < 60 && tm->tm_min >= 0) {
246 alrm_en |= S3C2410_RTCALM_MINEN; 258 alrm_en |= S3C2410_RTCALM_MINEN;
247 writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN); 259 writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN);
248 } 260 }
249 261
250 if (tm->tm_hour < 24 && tm->tm_hour >= 0) { 262 if (tm->tm_hour < 24 && tm->tm_hour >= 0) {
251 alrm_en |= S3C2410_RTCALM_HOUREN; 263 alrm_en |= S3C2410_RTCALM_HOUREN;
252 writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR); 264 writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR);
253 } 265 }
254 266
255 pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en); 267 pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
256 268
257 writeb(alrm_en, S3C2410_RTCALM); 269 writeb(alrm_en, base + S3C2410_RTCALM);
258 270
259 if (0) { 271 if (0) {
260 alrm_en = readb(S3C2410_RTCALM); 272 alrm_en = readb(base + S3C2410_RTCALM);
261 alrm_en &= ~S3C2410_RTCALM_ALMEN; 273 alrm_en &= ~S3C2410_RTCALM_ALMEN;
262 writeb(alrm_en, S3C2410_RTCALM); 274 writeb(alrm_en, base + S3C2410_RTCALM);
263 disable_irq_wake(s3c_rtc_alarmno); 275 disable_irq_wake(s3c_rtc_alarmno);
264 } 276 }
265 277
@@ -319,8 +331,8 @@ static int s3c_rtc_ioctl(struct device *dev,
319 331
320static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) 332static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
321{ 333{
322 unsigned int rtcalm = readb(S3C2410_RTCALM); 334 unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM);
323 unsigned int ticnt = readb (S3C2410_TICNT); 335 unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
324 336
325 seq_printf(seq, "alarm_IRQ\t: %s\n", 337 seq_printf(seq, "alarm_IRQ\t: %s\n",
326 (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" ); 338 (rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" );
@@ -387,39 +399,40 @@ static struct rtc_class_ops s3c_rtcops = {
387 399
388static void s3c_rtc_enable(struct platform_device *pdev, int en) 400static void s3c_rtc_enable(struct platform_device *pdev, int en)
389{ 401{
402 void __iomem *base = s3c_rtc_base;
390 unsigned int tmp; 403 unsigned int tmp;
391 404
392 if (s3c_rtc_base == NULL) 405 if (s3c_rtc_base == NULL)
393 return; 406 return;
394 407
395 if (!en) { 408 if (!en) {
396 tmp = readb(S3C2410_RTCCON); 409 tmp = readb(base + S3C2410_RTCCON);
397 writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON); 410 writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON);
398 411
399 tmp = readb(S3C2410_TICNT); 412 tmp = readb(base + S3C2410_TICNT);
400 writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT); 413 writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT);
401 } else { 414 } else {
402 /* re-enable the device, and check it is ok */ 415 /* re-enable the device, and check it is ok */
403 416
404 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){ 417 if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
405 dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); 418 dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
406 419
407 tmp = readb(S3C2410_RTCCON); 420 tmp = readb(base + S3C2410_RTCCON);
408 writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON); 421 writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
409 } 422 }
410 423
411 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){ 424 if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
412 dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n"); 425 dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
413 426
414 tmp = readb(S3C2410_RTCCON); 427 tmp = readb(base + S3C2410_RTCCON);
415 writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON); 428 writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
416 } 429 }
417 430
418 if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){ 431 if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
419 dev_info(&pdev->dev, "removing RTCCON_CLKRST\n"); 432 dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
420 433
421 tmp = readb(S3C2410_RTCCON); 434 tmp = readb(base + S3C2410_RTCCON);
422 writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON); 435 writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
423 } 436 }
424 } 437 }
425} 438}
@@ -475,8 +488,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
475 } 488 }
476 489
477 s3c_rtc_mem = request_mem_region(res->start, 490 s3c_rtc_mem = request_mem_region(res->start,
478 res->end-res->start+1, 491 res->end-res->start+1,
479 pdev->name); 492 pdev->name);
480 493
481 if (s3c_rtc_mem == NULL) { 494 if (s3c_rtc_mem == NULL) {
482 dev_err(&pdev->dev, "failed to reserve memory region\n"); 495 dev_err(&pdev->dev, "failed to reserve memory region\n");
@@ -495,7 +508,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
495 508
496 s3c_rtc_enable(pdev, 1); 509 s3c_rtc_enable(pdev, 1);
497 510
498 pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON)); 511 pr_debug("s3c2410_rtc: RTCCON=%02x\n",
512 readb(s3c_rtc_base + S3C2410_RTCCON));
499 513
500 s3c_rtc_setfreq(s3c_rtc_freq); 514 s3c_rtc_setfreq(s3c_rtc_freq);
501 515
@@ -543,7 +557,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
543 557
544 /* save TICNT for anyone using periodic interrupts */ 558 /* save TICNT for anyone using periodic interrupts */
545 559
546 ticnt_save = readb(S3C2410_TICNT); 560 ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
547 561
548 /* calculate time delta for suspend */ 562 /* calculate time delta for suspend */
549 563
@@ -567,7 +581,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
567 rtc_tm_to_time(&tm, &time.tv_sec); 581 rtc_tm_to_time(&tm, &time.tv_sec);
568 restore_time_delta(&s3c_rtc_delta, &time); 582 restore_time_delta(&s3c_rtc_delta, &time);
569 583
570 writeb(ticnt_save, S3C2410_TICNT); 584 writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
571 return 0; 585 return 0;
572} 586}
573#else 587#else
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 9d0c6e1a0e66..9af02c79ce8a 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -54,11 +54,11 @@ struct dasd_devmap {
54 */ 54 */
55struct dasd_server_ssid_map { 55struct dasd_server_ssid_map {
56 struct list_head list; 56 struct list_head list;
57 struct server_id { 57 struct system_id {
58 char vendor[4]; 58 char vendor[4];
59 char serial[15]; 59 char serial[15];
60 __u16 ssid;
60 } sid; 61 } sid;
61 __u16 ssid;
62}; 62};
63 63
64static struct list_head dasd_server_ssid_list; 64static struct list_head dasd_server_ssid_list;
@@ -904,14 +904,14 @@ dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
904 return -ENOMEM; 904 return -ENOMEM;
905 strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1); 905 strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
906 strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1); 906 strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
907 srv->ssid = uid->ssid; 907 srv->sid.ssid = uid->ssid;
908 908
909 /* server is already contained ? */ 909 /* server is already contained ? */
910 spin_lock(&dasd_devmap_lock); 910 spin_lock(&dasd_devmap_lock);
911 devmap->uid = *uid; 911 devmap->uid = *uid;
912 list_for_each_entry(tmp, &dasd_server_ssid_list, list) { 912 list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
913 if (!memcmp(&srv->sid, &tmp->sid, 913 if (!memcmp(&srv->sid, &tmp->sid,
914 sizeof(struct dasd_server_ssid_map))) { 914 sizeof(struct system_id))) {
915 kfree(srv); 915 kfree(srv);
916 srv = NULL; 916 srv = NULL;
917 break; 917 break;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 957ed5db98e4..b7a7fac3f7c3 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device)
607 * Valide storage server of current device. 607 * Valide storage server of current device.
608 */ 608 */
609static int 609static int
610dasd_eckd_validate_server(struct dasd_device *device) 610dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
611{ 611{
612 int rc; 612 int rc;
613 613
@@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device)
616 return 0; 616 return 0;
617 617
618 rc = dasd_eckd_psf_ssc(device); 618 rc = dasd_eckd_psf_ssc(device);
619 if (rc) 619 /* may be requested feature is not available on server,
620 /* may be requested feature is not available on server, 620 * therefore just report error and go ahead */
621 * therefore just report error and go ahead */ 621 DEV_MESSAGE(KERN_INFO, device,
622 DEV_MESSAGE(KERN_INFO, device, 622 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
623 "Perform Subsystem Function returned rc=%d", rc); 623 uid->vendor, uid->serial, uid->ssid, rc);
624 /* RE-Read Configuration Data */ 624 /* RE-Read Configuration Data */
625 return dasd_eckd_read_conf(device); 625 return dasd_eckd_read_conf(device);
626} 626}
@@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
666 return rc; 666 return rc;
667 rc = dasd_set_uid(device->cdev, &uid); 667 rc = dasd_set_uid(device->cdev, &uid);
668 if (rc == 1) /* new server found */ 668 if (rc == 1) /* new server found */
669 rc = dasd_eckd_validate_server(device); 669 rc = dasd_eckd_validate_server(device, &uid);
670 if (rc) 670 if (rc)
671 return rc; 671 return rc;
672 672
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cd789b8acd4..adc9d8f2c28f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count)
112 printk("\n"); 112 printk("\n");
113} 113}
114 114
115
116/****************************************************************/
117/****** Functions to handle the request ID hash table ********/
118/****************************************************************/
119
120#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
121
122static int zfcp_reqlist_init(struct zfcp_adapter *adapter)
123{
124 int i;
125
126 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
127 GFP_KERNEL);
128
129 if (!adapter->req_list)
130 return -ENOMEM;
131
132 for (i=0; i<REQUEST_LIST_SIZE; i++)
133 INIT_LIST_HEAD(&adapter->req_list[i]);
134
135 return 0;
136}
137
138static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
139{
140 struct zfcp_fsf_req *request, *tmp;
141 unsigned int i;
142
143 for (i=0; i<REQUEST_LIST_SIZE; i++) {
144 if (list_empty(&adapter->req_list[i]))
145 continue;
146
147 list_for_each_entry_safe(request, tmp,
148 &adapter->req_list[i], list)
149 list_del(&request->list);
150 }
151
152 kfree(adapter->req_list);
153}
154
155void zfcp_reqlist_add(struct zfcp_adapter *adapter,
156 struct zfcp_fsf_req *fsf_req)
157{
158 unsigned int i;
159
160 i = fsf_req->req_id % REQUEST_LIST_SIZE;
161 list_add_tail(&fsf_req->list, &adapter->req_list[i]);
162}
163
164void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id)
165{
166 struct zfcp_fsf_req *request, *tmp;
167 unsigned int i, counter;
168 u64 dbg_tmp[2];
169
170 i = req_id % REQUEST_LIST_SIZE;
171 BUG_ON(list_empty(&adapter->req_list[i]));
172
173 counter = 0;
174 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) {
175 if (request->req_id == req_id) {
176 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
177 dbg_tmp[1] = (u64) counter;
178 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
179 list_del(&request->list);
180 break;
181 }
182 counter++;
183 }
184}
185
186struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
187 unsigned long req_id)
188{
189 struct zfcp_fsf_req *request, *tmp;
190 unsigned int i;
191
192 i = req_id % REQUEST_LIST_SIZE;
193
194 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
195 if (request->req_id == req_id)
196 return request;
197
198 return NULL;
199}
200
201int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
202{
203 unsigned int i;
204
205 for (i=0; i<REQUEST_LIST_SIZE; i++)
206 if (!list_empty(&adapter->req_list[i]))
207 return 0;
208
209 return 1;
210}
211
212#undef ZFCP_LOG_AREA
213
115/****************************************************************/ 214/****************************************************************/
116/************** Uncategorised Functions *************************/ 215/************** Uncategorised Functions *************************/
117/****************************************************************/ 216/****************************************************************/
@@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
961 INIT_LIST_HEAD(&adapter->port_remove_lh); 1060 INIT_LIST_HEAD(&adapter->port_remove_lh);
962 1061
963 /* initialize list of fsf requests */ 1062 /* initialize list of fsf requests */
964 spin_lock_init(&adapter->fsf_req_list_lock); 1063 spin_lock_init(&adapter->req_list_lock);
965 INIT_LIST_HEAD(&adapter->fsf_req_list_head); 1064 retval = zfcp_reqlist_init(adapter);
1065 if (retval) {
1066 ZFCP_LOG_INFO("request list initialization failed\n");
1067 goto failed_low_mem_buffers;
1068 }
966 1069
967 /* initialize debug locks */ 1070 /* initialize debug locks */
968 1071
@@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1041 * !0 - struct zfcp_adapter data structure could not be removed 1144 * !0 - struct zfcp_adapter data structure could not be removed
1042 * (e.g. still used) 1145 * (e.g. still used)
1043 * locks: adapter list write lock is assumed to be held by caller 1146 * locks: adapter list write lock is assumed to be held by caller
1044 * adapter->fsf_req_list_lock is taken and released within this
1045 * function and must not be held on entry
1046 */ 1147 */
1047void 1148void
1048zfcp_adapter_dequeue(struct zfcp_adapter *adapter) 1149zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
@@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1054 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 1155 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
1055 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 1156 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
1056 /* sanity check: no pending FSF requests */ 1157 /* sanity check: no pending FSF requests */
1057 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 1158 spin_lock_irqsave(&adapter->req_list_lock, flags);
1058 retval = !list_empty(&adapter->fsf_req_list_head); 1159 retval = zfcp_reqlist_isempty(adapter);
1059 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 1160 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
1060 if (retval) { 1161 if (!retval) {
1061 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " 1162 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
1062 "%i requests outstanding\n", 1163 "%i requests outstanding\n",
1063 zfcp_get_busid_by_adapter(adapter), adapter, 1164 zfcp_get_busid_by_adapter(adapter), adapter,
1064 atomic_read(&adapter->fsf_reqs_active)); 1165 atomic_read(&adapter->reqs_active));
1065 retval = -EBUSY; 1166 retval = -EBUSY;
1066 goto out; 1167 goto out;
1067 } 1168 }
@@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1087 zfcp_free_low_mem_buffers(adapter); 1188 zfcp_free_low_mem_buffers(adapter);
1088 /* free memory of adapter data structure and queues */ 1189 /* free memory of adapter data structure and queues */
1089 zfcp_qdio_free_queues(adapter); 1190 zfcp_qdio_free_queues(adapter);
1191 zfcp_reqlist_free(adapter);
1090 kfree(adapter->fc_stats); 1192 kfree(adapter->fc_stats);
1091 kfree(adapter->stats_reset_data); 1193 kfree(adapter->stats_reset_data);
1092 ZFCP_LOG_TRACE("freeing adapter structure\n"); 1194 ZFCP_LOG_TRACE("freeing adapter structure\n");
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 57d8e4bfb8d9..fdabadeaa9ee 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
164 retval = zfcp_adapter_scsi_register(adapter); 164 retval = zfcp_adapter_scsi_register(adapter);
165 if (retval) 165 if (retval)
166 goto out_scsi_register; 166 goto out_scsi_register;
167
168 /* initialize request counter */
169 BUG_ON(!zfcp_reqlist_isempty(adapter));
170 adapter->req_no = 0;
171
167 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, 172 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
168 ZFCP_SET); 173 ZFCP_SET);
169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 174 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2df512a18e2c..94d1b74db356 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -52,7 +52,7 @@
52/********************* GENERAL DEFINES *********************************/ 52/********************* GENERAL DEFINES *********************************/
53 53
54/* zfcp version number, it consists of major, minor, and patch-level number */ 54/* zfcp version number, it consists of major, minor, and patch-level number */
55#define ZFCP_VERSION "4.7.0" 55#define ZFCP_VERSION "4.8.0"
56 56
57/** 57/**
58 * zfcp_sg_to_address - determine kernel address from struct scatterlist 58 * zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
80#define REQUEST_LIST_SIZE 128 80#define REQUEST_LIST_SIZE 128
81 81
82/********************* SCSI SPECIFIC DEFINES *********************************/ 82/********************* SCSI SPECIFIC DEFINES *********************************/
83#define ZFCP_SCSI_ER_TIMEOUT (100*HZ) 83#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
84 84
85/********************* CIO/QDIO SPECIFIC DEFINES *****************************/ 85/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
86 86
@@ -886,11 +886,11 @@ struct zfcp_adapter {
886 struct list_head port_remove_lh; /* head of ports to be 886 struct list_head port_remove_lh; /* head of ports to be
887 removed */ 887 removed */
888 u32 ports; /* number of remote ports */ 888 u32 ports; /* number of remote ports */
889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */ 889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */
890 struct list_head fsf_req_list_head; /* head of FSF req list */ 890 atomic_t reqs_active; /* # active FSF reqs */
891 spinlock_t fsf_req_list_lock; /* lock for ops on list of 891 unsigned long req_no; /* unique FSF req number */
892 FSF requests */ 892 struct list_head *req_list; /* list of pending reqs */
893 atomic_t fsf_reqs_active; /* # active FSF reqs */ 893 spinlock_t req_list_lock; /* request list lock */
894 struct zfcp_qdio_queue request_queue; /* request queue */ 894 struct zfcp_qdio_queue request_queue; /* request queue */
895 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 895 u32 fsf_req_seq_no; /* FSF cmnd seq number */
896 wait_queue_head_t request_wq; /* can be used to wait for 896 wait_queue_head_t request_wq; /* can be used to wait for
@@ -986,6 +986,7 @@ struct zfcp_unit {
986/* FSF request */ 986/* FSF request */
987struct zfcp_fsf_req { 987struct zfcp_fsf_req {
988 struct list_head list; /* list of FSF requests */ 988 struct list_head list; /* list of FSF requests */
989 unsigned long req_id; /* unique request ID */
989 struct zfcp_adapter *adapter; /* adapter request belongs to */ 990 struct zfcp_adapter *adapter; /* adapter request belongs to */
990 u8 sbal_number; /* nr of SBALs free for use */ 991 u8 sbal_number; /* nr of SBALs free for use */
991 u8 sbal_first; /* first SBAL for this request */ 992 u8 sbal_first; /* first SBAL for this request */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 8ec8da0beaa8..7f60b6fdf724 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); 64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); 65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); 66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
67static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); 67static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
68static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); 68static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); 69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); 93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); 94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
95 95
96static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); 96static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
97static int zfcp_erp_action_dismiss_port(struct zfcp_port *); 97static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
98static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); 98static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
99static int zfcp_erp_action_dismiss(struct zfcp_erp_action *);
100 99
101static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, 100static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
102 struct zfcp_port *, struct zfcp_unit *); 101 struct zfcp_port *, struct zfcp_unit *);
@@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data)
135 zfcp_erp_adapter_reopen(adapter, 0); 134 zfcp_erp_adapter_reopen(adapter, 0);
136} 135}
137 136
138/* 137/**
139 * function: zfcp_fsf_scsi_er_timeout_handler 138 * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks
140 * 139 *
141 * purpose: This function needs to be called whenever a SCSI error recovery 140 * This function needs to be called whenever a SCSI error recovery
142 * action (abort/reset) does not return. 141 * action (abort/reset) does not return. Re-opening the adapter means
143 * Re-opening the adapter means that the command can be returned 142 * that the abort/reset command can be returned by zfcp. It won't complete
144 * by zfcp (it is guarranteed that it does not return via the 143 * via the adapter anymore (because qdio queues are closed). If ERP is
145 * adapter anymore). The buffer can then be used again. 144 * already running on this adapter it will be stopped.
146 *
147 * returns: sod all
148 */ 145 */
149void 146void zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
150zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
151{ 147{
152 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 148 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
149 unsigned long flags;
153 150
154 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " 151 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. "
155 "Restarting all operations on the adapter %s\n", 152 "Restarting all operations on the adapter %s\n",
156 zfcp_get_busid_by_adapter(adapter)); 153 zfcp_get_busid_by_adapter(adapter));
157 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); 154 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout");
158 zfcp_erp_adapter_reopen(adapter, 0);
159 155
160 return; 156 write_lock_irqsave(&adapter->erp_lock, flags);
157 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
158 &adapter->status)) {
159 zfcp_erp_modify_adapter_status(adapter,
160 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
161 ZFCP_CLEAR);
162 zfcp_erp_action_dismiss_adapter(adapter);
163 write_unlock_irqrestore(&adapter->erp_lock, flags);
164 /* dismiss all pending requests including requests for ERP */
165 zfcp_fsf_req_dismiss_all(adapter);
166 adapter->fsf_req_seq_no = 0;
167 } else
168 write_unlock_irqrestore(&adapter->erp_lock, flags);
169 zfcp_erp_adapter_reopen(adapter, 0);
161} 170}
162 171
163/* 172/*
@@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
670 return retval; 679 return retval;
671} 680}
672 681
673/* 682/**
674 * function: 683 * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
675 *
676 * purpose: disable I/O,
677 * return any open requests and clean them up,
678 * aim: no pending and incoming I/O
679 *
680 * returns:
681 */ 684 */
682static void 685static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
683zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
684{ 686{
685 debug_text_event(adapter->erp_dbf, 6, "a_bl"); 687 debug_text_event(adapter->erp_dbf, 6, "a_bl");
686 zfcp_erp_modify_adapter_status(adapter, 688 zfcp_erp_modify_adapter_status(adapter,
@@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
688 clear_mask, ZFCP_CLEAR); 690 clear_mask, ZFCP_CLEAR);
689} 691}
690 692
691/* 693/**
692 * function: 694 * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
693 *
694 * purpose: enable I/O
695 *
696 * returns:
697 */ 695 */
698static void 696static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
699zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
700{ 697{
701 debug_text_event(adapter->erp_dbf, 6, "a_ubl"); 698 debug_text_event(adapter->erp_dbf, 6, "a_ubl");
702 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 699 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
@@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
848 struct zfcp_adapter *adapter = erp_action->adapter; 845 struct zfcp_adapter *adapter = erp_action->adapter;
849 846
850 if (erp_action->fsf_req) { 847 if (erp_action->fsf_req) {
851 /* take lock to ensure that request is not being deleted meanwhile */ 848 /* take lock to ensure that request is not deleted meanwhile */
852 spin_lock(&adapter->fsf_req_list_lock); 849 spin_lock(&adapter->req_list_lock);
853 /* check whether fsf req does still exist */ 850 if ((!zfcp_reqlist_ismember(adapter,
854 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) 851 erp_action->fsf_req->req_id)) &&
855 if (fsf_req == erp_action->fsf_req) 852 (fsf_req->erp_action == erp_action)) {
856 break;
857 if (fsf_req && (fsf_req->erp_action == erp_action)) {
858 /* fsf_req still exists */ 853 /* fsf_req still exists */
859 debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); 854 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
860 debug_event(adapter->erp_dbf, 3, &fsf_req, 855 debug_event(adapter->erp_dbf, 3, &fsf_req,
861 sizeof (unsigned long)); 856 sizeof (unsigned long));
862 /* dismiss fsf_req of timed out or dismissed erp_action */ 857 /* dismiss fsf_req of timed out/dismissed erp_action */
863 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | 858 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
864 ZFCP_STATUS_ERP_TIMEDOUT)) { 859 ZFCP_STATUS_ERP_TIMEDOUT)) {
865 debug_text_event(adapter->erp_dbf, 3, 860 debug_text_event(adapter->erp_dbf, 3,
@@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
892 */ 887 */
893 erp_action->fsf_req = NULL; 888 erp_action->fsf_req = NULL;
894 } 889 }
895 spin_unlock(&adapter->fsf_req_list_lock); 890 spin_unlock(&adapter->req_list_lock);
896 } else 891 } else
897 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); 892 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
898 893
899 return retval; 894 return retval;
900} 895}
901 896
902/* 897/**
903 * purpose: generic handler for asynchronous events related to erp_action events 898 * zfcp_erp_async_handler_nolock - complete erp_action
904 * (normal completion, time-out, dismissing, retry after
905 * low memory condition)
906 *
907 * note: deletion of timer is not required (e.g. in case of a time-out),
908 * but a second try does no harm,
909 * we leave it in here to allow for greater simplification
910 * 899 *
911 * returns: 0 - there was an action to handle 900 * Used for normal completion, time-out, dismissal and failure after
912 * !0 - otherwise 901 * low memory condition.
913 */ 902 */
914static int 903static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
915zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, 904 unsigned long set_mask)
916 unsigned long set_mask)
917{ 905{
918 int retval;
919 struct zfcp_adapter *adapter = erp_action->adapter; 906 struct zfcp_adapter *adapter = erp_action->adapter;
920 907
921 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { 908 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
@@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
926 del_timer(&erp_action->timer); 913 del_timer(&erp_action->timer);
927 erp_action->status |= set_mask; 914 erp_action->status |= set_mask;
928 zfcp_erp_action_ready(erp_action); 915 zfcp_erp_action_ready(erp_action);
929 retval = 0;
930 } else { 916 } else {
931 /* action is ready or gone - nothing to do */ 917 /* action is ready or gone - nothing to do */
932 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); 918 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
933 debug_event(adapter->erp_dbf, 3, &erp_action->action, 919 debug_event(adapter->erp_dbf, 3, &erp_action->action,
934 sizeof (int)); 920 sizeof (int));
935 retval = 1;
936 } 921 }
937
938 return retval;
939} 922}
940 923
941/* 924/**
942 * purpose: generic handler for asynchronous events related to erp_action 925 * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
943 * events (normal completion, time-out, dismissing, retry after
944 * low memory condition)
945 *
946 * note: deletion of timer is not required (e.g. in case of a time-out),
947 * but a second try does no harm,
948 * we leave it in here to allow for greater simplification
949 *
950 * returns: 0 - there was an action to handle
951 * !0 - otherwise
952 */ 926 */
953int 927void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
954zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, 928 unsigned long set_mask)
955 unsigned long set_mask)
956{ 929{
957 struct zfcp_adapter *adapter = erp_action->adapter; 930 struct zfcp_adapter *adapter = erp_action->adapter;
958 unsigned long flags; 931 unsigned long flags;
959 int retval;
960 932
961 write_lock_irqsave(&adapter->erp_lock, flags); 933 write_lock_irqsave(&adapter->erp_lock, flags);
962 retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); 934 zfcp_erp_async_handler_nolock(erp_action, set_mask);
963 write_unlock_irqrestore(&adapter->erp_lock, flags); 935 write_unlock_irqrestore(&adapter->erp_lock, flags);
964
965 return retval;
966} 936}
967 937
968/* 938/*
@@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data)
999 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); 969 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
1000} 970}
1001 971
1002/* 972/**
1003 * purpose: is called for an erp_action which needs to be ended 973 * zfcp_erp_action_dismiss - dismiss an erp_action
1004 * though not being done,
1005 * this is usually required if an higher is generated,
1006 * action gets an appropriate flag and will be processed
1007 * accordingly
1008 * 974 *
1009 * locks: erp_lock held (thus we need to call another handler variant) 975 * adapter->erp_lock must be held
976 *
977 * Dismissal of an erp_action is usually required if an erp_action of
978 * higher priority is generated.
1010 */ 979 */
1011static int 980static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1012zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1013{ 981{
1014 struct zfcp_adapter *adapter = erp_action->adapter; 982 struct zfcp_adapter *adapter = erp_action->adapter;
1015 983
@@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1017 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); 985 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
1018 986
1019 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); 987 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
1020
1021 return 0;
1022} 988}
1023 989
1024int 990int
@@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2074 return retval; 2040 return retval;
2075} 2041}
2076 2042
2077/* 2043/**
2078 * function: zfcp_qdio_cleanup 2044 * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
2079 *
2080 * purpose: cleans up QDIO operation for the specified adapter
2081 *
2082 * returns: 0 - successful cleanup
2083 * !0 - failed cleanup
2084 */ 2045 */
2085int 2046static void
2086zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) 2047zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2087{ 2048{
2088 int retval = ZFCP_ERP_SUCCEEDED;
2089 int first_used; 2049 int first_used;
2090 int used_count; 2050 int used_count;
2091 struct zfcp_adapter *adapter = erp_action->adapter; 2051 struct zfcp_adapter *adapter = erp_action->adapter;
@@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2094 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " 2054 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
2095 "queues on adapter %s\n", 2055 "queues on adapter %s\n",
2096 zfcp_get_busid_by_adapter(adapter)); 2056 zfcp_get_busid_by_adapter(adapter));
2097 retval = ZFCP_ERP_FAILED; 2057 return;
2098 goto out;
2099 } 2058 }
2100 2059
2101 /* 2060 /*
2102 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that 2061 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
2103 * do_QDIO won't be called while qdio_shutdown is in progress. 2062 * do_QDIO won't be called while qdio_shutdown is in progress.
2104 */ 2063 */
2105
2106 write_lock_irq(&adapter->request_queue.queue_lock); 2064 write_lock_irq(&adapter->request_queue.queue_lock);
2107 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 2065 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2108 write_unlock_irq(&adapter->request_queue.queue_lock); 2066 write_unlock_irq(&adapter->request_queue.queue_lock);
@@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2134 adapter->request_queue.free_index = 0; 2092 adapter->request_queue.free_index = 0;
2135 atomic_set(&adapter->request_queue.free_count, 0); 2093 atomic_set(&adapter->request_queue.free_count, 0);
2136 adapter->request_queue.distance_from_int = 0; 2094 adapter->request_queue.distance_from_int = 0;
2137 out:
2138 return retval;
2139} 2095}
2140 2096
2141static int 2097static int
@@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2258 "%s)\n", zfcp_get_busid_by_adapter(adapter)); 2214 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2259 ret = ZFCP_ERP_FAILED; 2215 ret = ZFCP_ERP_FAILED;
2260 } 2216 }
2261 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { 2217
2262 ZFCP_LOG_INFO("error: exchange port data failed (adapter " 2218 /* don't treat as error for the sake of compatibility */
2219 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
2220 ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
2263 "%s\n", zfcp_get_busid_by_adapter(adapter)); 2221 "%s\n", zfcp_get_busid_by_adapter(adapter));
2264 ret = ZFCP_ERP_FAILED;
2265 }
2266 2222
2267 return ret; 2223 return ret;
2268} 2224}
@@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2292 return retval; 2248 return retval;
2293} 2249}
2294 2250
2295/* 2251/**
2296 * function: zfcp_fsf_cleanup 2252 * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
2297 *
2298 * purpose: cleanup FSF operation for specified adapter
2299 *
2300 * returns: 0 - FSF operation successfully cleaned up
2301 * !0 - failed to cleanup FSF operation for this adapter
2302 */ 2253 */
2303static int 2254static void
2304zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) 2255zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2305{ 2256{
2306 int retval = ZFCP_ERP_SUCCEEDED;
2307 struct zfcp_adapter *adapter = erp_action->adapter; 2257 struct zfcp_adapter *adapter = erp_action->adapter;
2308 2258
2309 /* 2259 /*
@@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2317 /* all ports and units are closed */ 2267 /* all ports and units are closed */
2318 zfcp_erp_modify_adapter_status(adapter, 2268 zfcp_erp_modify_adapter_status(adapter,
2319 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 2269 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
2320
2321 return retval;
2322} 2270}
2323 2271
2324/* 2272/*
@@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3293} 3241}
3294 3242
3295 3243
3296static int 3244void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3297zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3298{ 3245{
3299 int retval = 0;
3300 struct zfcp_port *port; 3246 struct zfcp_port *port;
3301 3247
3302 debug_text_event(adapter->erp_dbf, 5, "a_actab"); 3248 debug_text_event(adapter->erp_dbf, 5, "a_actab");
@@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3305 else 3251 else
3306 list_for_each_entry(port, &adapter->port_list_head, list) 3252 list_for_each_entry(port, &adapter->port_list_head, list)
3307 zfcp_erp_action_dismiss_port(port); 3253 zfcp_erp_action_dismiss_port(port);
3308
3309 return retval;
3310} 3254}
3311 3255
3312static int 3256static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3313zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3314{ 3257{
3315 int retval = 0;
3316 struct zfcp_unit *unit; 3258 struct zfcp_unit *unit;
3317 struct zfcp_adapter *adapter = port->adapter; 3259 struct zfcp_adapter *adapter = port->adapter;
3318 3260
@@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3323 else 3265 else
3324 list_for_each_entry(unit, &port->unit_list_head, list) 3266 list_for_each_entry(unit, &port->unit_list_head, list)
3325 zfcp_erp_action_dismiss_unit(unit); 3267 zfcp_erp_action_dismiss_unit(unit);
3326
3327 return retval;
3328} 3268}
3329 3269
3330static int 3270static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3331zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3332{ 3271{
3333 int retval = 0;
3334 struct zfcp_adapter *adapter = unit->port->adapter; 3272 struct zfcp_adapter *adapter = unit->port->adapter;
3335 3273
3336 debug_text_event(adapter->erp_dbf, 5, "u_actab"); 3274 debug_text_event(adapter->erp_dbf, 5, "u_actab");
3337 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); 3275 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
3338 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) 3276 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
3339 zfcp_erp_action_dismiss(&unit->erp_action); 3277 zfcp_erp_action_dismiss(&unit->erp_action);
3340
3341 return retval;
3342} 3278}
3343 3279
3344static inline void 3280static inline void
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index d02366004cdd..146d7a2b4c4a 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
63extern void zfcp_qdio_free_queues(struct zfcp_adapter *); 63extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
64extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, 64extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
65 struct zfcp_fsf_req *); 65 struct zfcp_fsf_req *);
66extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *);
67 66
68extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req 67extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
69 (struct zfcp_fsf_req *, int, int); 68 (struct zfcp_fsf_req *, int, int);
@@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
140extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); 139extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
141extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); 140extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
142extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); 141extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
142extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
143 143
144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); 144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
145extern int zfcp_erp_port_reopen(struct zfcp_port *, int); 145extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *);
156extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 156extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
157extern int zfcp_erp_thread_kill(struct zfcp_adapter *); 157extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
158extern int zfcp_erp_wait(struct zfcp_adapter *); 158extern int zfcp_erp_wait(struct zfcp_adapter *);
159extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); 159extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
160 160
161extern int zfcp_test_link(struct zfcp_port *); 161extern int zfcp_test_link(struct zfcp_port *);
162 162
@@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
190 struct zfcp_fsf_req *); 190 struct zfcp_fsf_req *);
191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
192 struct scsi_cmnd *); 192 struct scsi_cmnd *);
193extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
194extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long);
195extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *,
196 unsigned long);
197extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
193 198
194#endif /* ZFCP_EXT_H */ 199#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 31db2b06faba..ff2eacf5ec8c 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
49static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, 49static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
50 struct fsf_link_down_info *); 50 struct fsf_link_down_info *);
51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); 51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
52static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
53 52
54/* association between FSF command and FSF QTCB type */ 53/* association between FSF command and FSF QTCB type */
55static u32 fsf_qtcb_type[] = { 54static u32 fsf_qtcb_type[] = {
@@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
146 kfree(fsf_req); 145 kfree(fsf_req);
147} 146}
148 147
149/* 148/**
150 * function: 149 * zfcp_fsf_req_dismiss - dismiss a single fsf request
151 *
152 * purpose:
153 *
154 * returns:
155 *
156 * note: qdio queues shall be down (no ongoing inbound processing)
157 */ 150 */
158int 151static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter,
159zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 152 struct zfcp_fsf_req *fsf_req,
153 unsigned int counter)
160{ 154{
161 struct zfcp_fsf_req *fsf_req, *tmp; 155 u64 dbg_tmp[2];
162 unsigned long flags;
163 LIST_HEAD(remove_queue);
164 156
165 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 157 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
166 list_splice_init(&adapter->fsf_req_list_head, &remove_queue); 158 dbg_tmp[1] = (u64) counter;
167 atomic_set(&adapter->fsf_reqs_active, 0); 159 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
168 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 160 list_del(&fsf_req->list);
169 161 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
170 list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { 162 zfcp_fsf_req_complete(fsf_req);
171 list_del(&fsf_req->list);
172 zfcp_fsf_req_dismiss(fsf_req);
173 }
174
175 return 0;
176} 163}
177 164
178/* 165/**
179 * function: 166 * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests
180 *
181 * purpose:
182 *
183 * returns:
184 */ 167 */
185static void 168int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
186zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req)
187{ 169{
188 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 170 struct zfcp_fsf_req *request, *tmp;
189 zfcp_fsf_req_complete(fsf_req); 171 unsigned long flags;
172 unsigned int i, counter;
173
174 spin_lock_irqsave(&adapter->req_list_lock, flags);
175 atomic_set(&adapter->reqs_active, 0);
176 for (i=0; i<REQUEST_LIST_SIZE; i++) {
177 if (list_empty(&adapter->req_list[i]))
178 continue;
179
180 counter = 0;
181 list_for_each_entry_safe(request, tmp,
182 &adapter->req_list[i], list) {
183 zfcp_fsf_req_dismiss(adapter, request, counter);
184 counter++;
185 }
186 }
187 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
188
189 return 0;
190} 190}
191 191
192/* 192/*
@@ -4592,12 +4592,14 @@ static inline void
4592zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4592zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4593{ 4593{
4594 if (likely(fsf_req->qtcb != NULL)) { 4594 if (likely(fsf_req->qtcb != NULL)) {
4595 fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; 4595 fsf_req->qtcb->prefix.req_seq_no =
4596 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; 4596 fsf_req->adapter->fsf_req_seq_no;
4597 fsf_req->qtcb->prefix.req_id = fsf_req->req_id;
4597 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; 4598 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4598 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; 4599 fsf_req->qtcb->prefix.qtcb_type =
4600 fsf_qtcb_type[fsf_req->fsf_command];
4599 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; 4601 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4600 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; 4602 fsf_req->qtcb->header.req_handle = fsf_req->req_id;
4601 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; 4603 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4602 } 4604 }
4603} 4605}
@@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4654{ 4656{
4655 volatile struct qdio_buffer_element *sbale; 4657 volatile struct qdio_buffer_element *sbale;
4656 struct zfcp_fsf_req *fsf_req = NULL; 4658 struct zfcp_fsf_req *fsf_req = NULL;
4659 unsigned long flags;
4657 int ret = 0; 4660 int ret = 0;
4658 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 4661 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4659 4662
@@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4668 4671
4669 fsf_req->adapter = adapter; 4672 fsf_req->adapter = adapter;
4670 fsf_req->fsf_command = fsf_cmd; 4673 fsf_req->fsf_command = fsf_cmd;
4674 INIT_LIST_HEAD(&fsf_req->list);
4675
4676 /* unique request id */
4677 spin_lock_irqsave(&adapter->req_list_lock, flags);
4678 fsf_req->req_id = adapter->req_no++;
4679 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
4671 4680
4672 zfcp_fsf_req_qtcb_init(fsf_req); 4681 zfcp_fsf_req_qtcb_init(fsf_req);
4673 4682
@@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4707 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 4716 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
4708 4717
4709 /* setup common SBALE fields */ 4718 /* setup common SBALE fields */
4710 sbale[0].addr = fsf_req; 4719 sbale[0].addr = (void *) fsf_req->req_id;
4711 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 4720 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
4712 if (likely(fsf_req->qtcb != NULL)) { 4721 if (likely(fsf_req->qtcb != NULL)) {
4713 sbale[1].addr = (void *) fsf_req->qtcb; 4722 sbale[1].addr = (void *) fsf_req->qtcb;
@@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4747 volatile struct qdio_buffer_element *sbale; 4756 volatile struct qdio_buffer_element *sbale;
4748 int inc_seq_no; 4757 int inc_seq_no;
4749 int new_distance_from_int; 4758 int new_distance_from_int;
4750 unsigned long flags; 4759 u64 dbg_tmp[2];
4751 int retval = 0; 4760 int retval = 0;
4752 4761
4753 adapter = fsf_req->adapter; 4762 adapter = fsf_req->adapter;
@@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4761 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 4770 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
4762 sbale[1].length); 4771 sbale[1].length);
4763 4772
4764 /* put allocated FSF request at list tail */ 4773 /* put allocated FSF request into hash table */
4765 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4774 spin_lock(&adapter->req_list_lock);
4766 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); 4775 zfcp_reqlist_add(adapter, fsf_req);
4767 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4776 spin_unlock(&adapter->req_list_lock);
4768 4777
4769 inc_seq_no = (fsf_req->qtcb != NULL); 4778 inc_seq_no = (fsf_req->qtcb != NULL);
4770 4779
@@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4803 QDIO_FLAG_SYNC_OUTPUT, 4812 QDIO_FLAG_SYNC_OUTPUT,
4804 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 4813 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
4805 4814
4815 dbg_tmp[0] = (unsigned long) sbale[0].addr;
4816 dbg_tmp[1] = (u64) retval;
4817 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
4818
4806 if (unlikely(retval)) { 4819 if (unlikely(retval)) {
4807 /* Queues are down..... */ 4820 /* Queues are down..... */
4808 retval = -EIO; 4821 retval = -EIO;
@@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4812 */ 4825 */
4813 if (timer) 4826 if (timer)
4814 del_timer(timer); 4827 del_timer(timer);
4815 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4828 spin_lock(&adapter->req_list_lock);
4816 list_del(&fsf_req->list); 4829 zfcp_reqlist_remove(adapter, fsf_req->req_id);
4817 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4830 spin_unlock(&adapter->req_list_lock);
4818 /* 4831 /* undo changes in request queue made for this request */
4819 * adjust the number of free SBALs in request queue as well as
4820 * position of first one
4821 */
4822 zfcp_qdio_zero_sbals(req_queue->buffer, 4832 zfcp_qdio_zero_sbals(req_queue->buffer,
4823 fsf_req->sbal_first, fsf_req->sbal_number); 4833 fsf_req->sbal_first, fsf_req->sbal_number);
4824 atomic_add(fsf_req->sbal_number, &req_queue->free_count); 4834 atomic_add(fsf_req->sbal_number, &req_queue->free_count);
4825 req_queue->free_index -= fsf_req->sbal_number; /* increase */ 4835 req_queue->free_index -= fsf_req->sbal_number;
4826 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; 4836 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
4827 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ 4837 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
4828 ZFCP_LOG_DEBUG 4838 zfcp_erp_adapter_reopen(adapter, 0);
4829 ("error: do_QDIO failed. Buffers could not be enqueued "
4830 "to request queue.\n");
4831 } else { 4839 } else {
4832 req_queue->distance_from_int = new_distance_from_int; 4840 req_queue->distance_from_int = new_distance_from_int;
4833 /* 4841 /*
@@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4843 adapter->fsf_req_seq_no++; 4851 adapter->fsf_req_seq_no++;
4844 4852
4845 /* count FSF requests pending */ 4853 /* count FSF requests pending */
4846 atomic_inc(&adapter->fsf_reqs_active); 4854 atomic_inc(&adapter->reqs_active);
4847 } 4855 }
4848 return retval; 4856 return retval;
4849} 4857}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 49ea5add4abc..dbd9f48e863e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
282 return; 282 return;
283} 283}
284 284
285/**
286 * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status
287 */
288static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
289 unsigned long req_id)
290{
291 struct zfcp_fsf_req *fsf_req;
292 unsigned long flags;
293
294 debug_long_event(adapter->erp_dbf, 4, req_id);
295
296 spin_lock_irqsave(&adapter->req_list_lock, flags);
297 fsf_req = zfcp_reqlist_ismember(adapter, req_id);
298
299 if (!fsf_req) {
300 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
301 ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id);
302 zfcp_erp_adapter_reopen(adapter, 0);
303 return -EINVAL;
304 }
305
306 zfcp_reqlist_remove(adapter, req_id);
307 atomic_dec(&adapter->reqs_active);
308 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
309
310 /* finish the FSF request */
311 zfcp_fsf_req_complete(fsf_req);
312
313 return 0;
314}
315
285/* 316/*
286 * function: zfcp_qdio_response_handler 317 * function: zfcp_qdio_response_handler
287 * 318 *
@@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
344 /* look for QDIO request identifiers in SB */ 375 /* look for QDIO request identifiers in SB */
345 buffere = &buffer->element[buffere_index]; 376 buffere = &buffer->element[buffere_index];
346 retval = zfcp_qdio_reqid_check(adapter, 377 retval = zfcp_qdio_reqid_check(adapter,
347 (void *) buffere->addr); 378 (unsigned long) buffere->addr);
348 379
349 if (retval) { 380 if (retval) {
350 ZFCP_LOG_NORMAL("bug: unexpected inbound " 381 ZFCP_LOG_NORMAL("bug: unexpected inbound "
@@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
415 return; 446 return;
416} 447}
417 448
418/*
419 * function: zfcp_qdio_reqid_check
420 *
421 * purpose: checks for valid reqids or unsolicited status
422 *
423 * returns: 0 - valid request id or unsolicited status
424 * !0 - otherwise
425 */
426int
427zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
428{
429 struct zfcp_fsf_req *fsf_req;
430 unsigned long flags;
431
432 /* invalid (per convention used in this driver) */
433 if (unlikely(!sbale_addr)) {
434 ZFCP_LOG_NORMAL("bug: invalid reqid\n");
435 return -EINVAL;
436 }
437
438 /* valid request id and thus (hopefully :) valid fsf_req address */
439 fsf_req = (struct zfcp_fsf_req *) sbale_addr;
440
441 /* serialize with zfcp_fsf_req_dismiss_all */
442 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
443 if (list_empty(&adapter->fsf_req_list_head)) {
444 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
445 return 0;
446 }
447 list_del(&fsf_req->list);
448 atomic_dec(&adapter->fsf_reqs_active);
449 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
450
451 if (unlikely(adapter != fsf_req->adapter)) {
452 ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
453 "fsf_req->adapter=%p, adapter=%p)\n",
454 fsf_req, fsf_req->adapter, adapter);
455 return -EINVAL;
456 }
457
458 /* finish the FSF request */
459 zfcp_fsf_req_complete(fsf_req);
460
461 return 0;
462}
463
464/** 449/**
465 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue 450 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue
466 * @queue: queue from which SBALE should be returned 451 * @queue: queue from which SBALE should be returned
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 671f4a6a5d18..1bb55086db9f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
30 void (*done) (struct scsi_cmnd *)); 30 void (*done) (struct scsi_cmnd *));
31static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); 31static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
32static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); 32static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
33static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
34static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); 33static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
35static int zfcp_task_management_function(struct zfcp_unit *, u8, 34static int zfcp_task_management_function(struct zfcp_unit *, u8,
36 struct scsi_cmnd *); 35 struct scsi_cmnd *);
@@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = {
46 .scsi_host_template = { 45 .scsi_host_template = {
47 .name = ZFCP_NAME, 46 .name = ZFCP_NAME,
48 .proc_name = "zfcp", 47 .proc_name = "zfcp",
49 .proc_info = NULL,
50 .detect = NULL,
51 .slave_alloc = zfcp_scsi_slave_alloc, 48 .slave_alloc = zfcp_scsi_slave_alloc,
52 .slave_configure = zfcp_scsi_slave_configure, 49 .slave_configure = zfcp_scsi_slave_configure,
53 .slave_destroy = zfcp_scsi_slave_destroy, 50 .slave_destroy = zfcp_scsi_slave_destroy,
54 .queuecommand = zfcp_scsi_queuecommand, 51 .queuecommand = zfcp_scsi_queuecommand,
55 .eh_abort_handler = zfcp_scsi_eh_abort_handler, 52 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
56 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, 53 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
57 .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, 54 .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler,
58 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 55 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
59 .can_queue = 4096, 56 .can_queue = 4096,
60 .this_id = -1, 57 .this_id = -1,
61 /*
62 * FIXME:
63 * one less? can zfcp_create_sbale cope with it?
64 */
65 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, 58 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
66 .cmd_per_lun = 1, 59 .cmd_per_lun = 1,
67 .unchecked_isa_dma = 0,
68 .use_clustering = 1, 60 .use_clustering = 1,
69 .sdev_attrs = zfcp_sysfs_sdev_attrs, 61 .sdev_attrs = zfcp_sysfs_sdev_attrs,
70 }, 62 },
71 .driver_version = ZFCP_VERSION, 63 .driver_version = ZFCP_VERSION,
72 /* rest initialised with zeros */
73}; 64};
74 65
75/* Find start of Response Information in FCP response unit*/ 66/* Find start of Response Information in FCP response unit*/
@@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp)
176 return retval; 167 return retval;
177} 168}
178 169
179static void 170/**
180zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 171 * zfcp_scsi_slave_destroy - called when scsi device is removed
172 *
173 * Remove reference to associated scsi device for an zfcp_unit.
174 * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs
175 * or a scan for this device might have failed.
176 */
177static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
181{ 178{
182 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 179 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
183 180
@@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
185 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 182 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
186 sdpnt->hostdata = NULL; 183 sdpnt->hostdata = NULL;
187 unit->device = NULL; 184 unit->device = NULL;
185 zfcp_erp_unit_failed(unit);
188 zfcp_unit_put(unit); 186 zfcp_unit_put(unit);
189 } else { 187 } else {
190 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " 188 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
@@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
549} 547}
550 548
551/** 549/**
552 * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) 550 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
551 *
552 * If ERP is already running it will be stopped.
553 */ 553 */
554int 554int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
555zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
556{ 555{
557 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; 556 struct zfcp_unit *unit;
558 struct zfcp_adapter *adapter = unit->port->adapter; 557 struct zfcp_adapter *adapter;
559 558 unsigned long flags;
560 ZFCP_LOG_NORMAL("bus reset because of problems with "
561 "unit 0x%016Lx\n", unit->fcp_lun);
562 zfcp_erp_adapter_reopen(adapter, 0);
563 zfcp_erp_wait(adapter);
564
565 return SUCCESS;
566}
567 559
568/** 560 unit = (struct zfcp_unit*) scpnt->device->hostdata;
569 * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) 561 adapter = unit->port->adapter;
570 */
571int
572zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
573{
574 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
575 struct zfcp_adapter *adapter = unit->port->adapter;
576 562
577 ZFCP_LOG_NORMAL("host reset because of problems with " 563 ZFCP_LOG_NORMAL("host/bus reset because of problems with "
578 "unit 0x%016Lx\n", unit->fcp_lun); 564 "unit 0x%016Lx\n", unit->fcp_lun);
579 zfcp_erp_adapter_reopen(adapter, 0); 565
580 zfcp_erp_wait(adapter); 566 write_lock_irqsave(&adapter->erp_lock, flags);
567 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
568 &adapter->status)) {
569 zfcp_erp_modify_adapter_status(adapter,
570 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
571 ZFCP_CLEAR);
572 zfcp_erp_action_dismiss_adapter(adapter);
573 write_unlock_irqrestore(&adapter->erp_lock, flags);
574 zfcp_fsf_req_dismiss_all(adapter);
575 adapter->fsf_req_seq_no = 0;
576 zfcp_erp_adapter_reopen(adapter, 0);
577 } else {
578 write_unlock_irqrestore(&adapter->erp_lock, flags);
579 zfcp_erp_adapter_reopen(adapter, 0);
580 zfcp_erp_wait(adapter);
581 }
581 582
582 return SUCCESS; 583 return SUCCESS;
583} 584}
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 5e8afc876980..2d20caf377f5 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -390,7 +390,8 @@ static struct ata_port_info piix_port_info[] = {
390 /* ich5_sata */ 390 /* ich5_sata */
391 { 391 {
392 .sht = &piix_sht, 392 .sht = &piix_sht,
393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, 393 .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
394 PIIX_FLAG_IGNORE_PCS,
394 .pio_mask = 0x1f, /* pio0-4 */ 395 .pio_mask = 0x1f, /* pio0-4 */
395 .mwdma_mask = 0x07, /* mwdma0-2 */ 396 .mwdma_mask = 0x07, /* mwdma0-2 */
396 .udma_mask = 0x7f, /* udma0-6 */ 397 .udma_mask = 0x7f, /* udma0-6 */
@@ -467,6 +468,11 @@ MODULE_LICENSE("GPL");
467MODULE_DEVICE_TABLE(pci, piix_pci_tbl); 468MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
468MODULE_VERSION(DRV_VERSION); 469MODULE_VERSION(DRV_VERSION);
469 470
471static int force_pcs = 0;
472module_param(force_pcs, int, 0444);
473MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
474 "device mis-detection (0=default, 1=ignore PCS, 2=honor PCS)");
475
470/** 476/**
471 * piix_pata_cbl_detect - Probe host controller cable detect info 477 * piix_pata_cbl_detect - Probe host controller cable detect info
472 * @ap: Port for which cable detect info is desired 478 * @ap: Port for which cable detect info is desired
@@ -531,27 +537,25 @@ static void piix_pata_error_handler(struct ata_port *ap)
531} 537}
532 538
533/** 539/**
534 * piix_sata_prereset - prereset for SATA host controller 540 * piix_sata_present_mask - determine present mask for SATA host controller
535 * @ap: Target port 541 * @ap: Target port
536 * 542 *
537 * Reads and configures SATA PCI device's PCI config register 543 * Reads SATA PCI device's PCI config register Port Configuration
538 * Port Configuration and Status (PCS) to determine port and 544 * and Status (PCS) to determine port and device availability.
539 * device availability. Return -ENODEV to skip reset if no
540 * device is present.
541 * 545 *
542 * LOCKING: 546 * LOCKING:
543 * None (inherited from caller). 547 * None (inherited from caller).
544 * 548 *
545 * RETURNS: 549 * RETURNS:
546 * 0 if device is present, -ENODEV otherwise. 550 * determined present_mask
547 */ 551 */
548static int piix_sata_prereset(struct ata_port *ap) 552static unsigned int piix_sata_present_mask(struct ata_port *ap)
549{ 553{
550 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 554 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
551 struct piix_host_priv *hpriv = ap->host_set->private_data; 555 struct piix_host_priv *hpriv = ap->host_set->private_data;
552 const unsigned int *map = hpriv->map; 556 const unsigned int *map = hpriv->map;
553 int base = 2 * ap->hard_port_no; 557 int base = 2 * ap->hard_port_no;
554 unsigned int present = 0; 558 unsigned int present_mask = 0;
555 int port, i; 559 int port, i;
556 u16 pcs; 560 u16 pcs;
557 561
@@ -564,24 +568,52 @@ static int piix_sata_prereset(struct ata_port *ap)
564 continue; 568 continue;
565 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) || 569 if ((ap->flags & PIIX_FLAG_IGNORE_PCS) ||
566 (pcs & 1 << (hpriv->map_db->present_shift + port))) 570 (pcs & 1 << (hpriv->map_db->present_shift + port)))
567 present = 1; 571 present_mask |= 1 << i;
568 } 572 }
569 573
570 DPRINTK("ata%u: LEAVE, pcs=0x%x present=0x%x\n", 574 DPRINTK("ata%u: LEAVE, pcs=0x%x present_mask=0x%x\n",
571 ap->id, pcs, present); 575 ap->id, pcs, present_mask);
572 576
573 if (!present) { 577 return present_mask;
574 ata_port_printk(ap, KERN_INFO, "SATA port has no device.\n"); 578}
575 ap->eh_context.i.action &= ~ATA_EH_RESET_MASK; 579
576 return 0; 580/**
581 * piix_sata_softreset - reset SATA host port via ATA SRST
582 * @ap: port to reset
583 * @classes: resulting classes of attached devices
584 *
585 * Reset SATA host port via ATA SRST. On controllers with
586 * reliable PCS present bits, the bits are used to determine
587 * device presence.
588 *
589 * LOCKING:
590 * Kernel thread context (may sleep)
591 *
592 * RETURNS:
593 * 0 on success, -errno otherwise.
594 */
595static int piix_sata_softreset(struct ata_port *ap, unsigned int *classes)
596{
597 unsigned int present_mask;
598 int i, rc;
599
600 present_mask = piix_sata_present_mask(ap);
601
602 rc = ata_std_softreset(ap, classes);
603 if (rc)
604 return rc;
605
606 for (i = 0; i < ATA_MAX_DEVICES; i++) {
607 if (!(present_mask & (1 << i)))
608 classes[i] = ATA_DEV_NONE;
577 } 609 }
578 610
579 return ata_std_prereset(ap); 611 return 0;
580} 612}
581 613
582static void piix_sata_error_handler(struct ata_port *ap) 614static void piix_sata_error_handler(struct ata_port *ap)
583{ 615{
584 ata_bmdma_drive_eh(ap, piix_sata_prereset, ata_std_softreset, NULL, 616 ata_bmdma_drive_eh(ap, ata_std_prereset, piix_sata_softreset, NULL,
585 ata_std_postreset); 617 ata_std_postreset);
586} 618}
587 619
@@ -785,6 +817,7 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
785} 817}
786 818
787static void __devinit piix_init_pcs(struct pci_dev *pdev, 819static void __devinit piix_init_pcs(struct pci_dev *pdev,
820 struct ata_port_info *pinfo,
788 const struct piix_map_db *map_db) 821 const struct piix_map_db *map_db)
789{ 822{
790 u16 pcs, new_pcs; 823 u16 pcs, new_pcs;
@@ -798,6 +831,18 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
798 pci_write_config_word(pdev, ICH5_PCS, new_pcs); 831 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
799 msleep(150); 832 msleep(150);
800 } 833 }
834
835 if (force_pcs == 1) {
836 dev_printk(KERN_INFO, &pdev->dev,
837 "force ignoring PCS (0x%x)\n", new_pcs);
838 pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS;
839 pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS;
840 } else if (force_pcs == 2) {
841 dev_printk(KERN_INFO, &pdev->dev,
842 "force honoring PCS (0x%x)\n", new_pcs);
843 pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
844 pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
845 }
801} 846}
802 847
803static void __devinit piix_init_sata_map(struct pci_dev *pdev, 848static void __devinit piix_init_sata_map(struct pci_dev *pdev,
@@ -906,7 +951,8 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
906 if (host_flags & ATA_FLAG_SATA) { 951 if (host_flags & ATA_FLAG_SATA) {
907 piix_init_sata_map(pdev, port_info, 952 piix_init_sata_map(pdev, port_info,
908 piix_map_db_table[ent->driver_data]); 953 piix_map_db_table[ent->driver_data]);
909 piix_init_pcs(pdev, piix_map_db_table[ent->driver_data]); 954 piix_init_pcs(pdev, port_info,
955 piix_map_db_table[ent->driver_data]);
910 } 956 }
911 957
912 /* On ICH5, some BIOSen disable the interrupt using the 958 /* On ICH5, some BIOSen disable the interrupt using the
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 98bd22714d0d..5630868c1b25 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev;
1146static int __init esp_sun4_probe(struct scsi_host_template *tpnt) 1146static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
1147{ 1147{
1148 if (sun4_esp_physaddr) { 1148 if (sun4_esp_physaddr) {
1149 memset(&sun4_esp_dev, 0, sizeof(esp_dev)); 1149 memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev));
1150 sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; 1150 sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr;
1151 sun4_esp_dev.irqs[0] = 4; 1151 sun4_esp_dev.irqs[0] = 4;
1152 sun4_esp_dev.resource[0].start = sun4_esp_physaddr; 1152 sun4_esp_dev.resource[0].start = sun4_esp_physaddr;
@@ -1162,6 +1162,7 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
1162 1162
1163static int __devexit esp_sun4_remove(void) 1163static int __devexit esp_sun4_remove(void)
1164{ 1164{
1165 struct of_device *dev = &sun4_esp_dev.ofdev;
1165 struct esp *esp = dev_get_drvdata(&dev->dev); 1166 struct esp *esp = dev_get_drvdata(&dev->dev);
1166 1167
1167 return esp_remove_common(esp); 1168 return esp_remove_common(esp);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index ab2f8b267908..bcb3444f1dcf 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -45,10 +45,6 @@ static char driver_name[] = "hptiop";
45static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver"; 45static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
46static const char driver_ver[] = "v1.0 (060426)"; 46static const char driver_ver[] = "v1.0 (060426)";
47 47
48static DEFINE_SPINLOCK(hptiop_hba_list_lock);
49static LIST_HEAD(hptiop_hba_list);
50static int hptiop_cdev_major = -1;
51
52static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag); 48static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
53static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag); 49static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
54static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); 50static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
@@ -577,7 +573,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
577 if (atomic_xchg(&hba->resetting, 1) == 0) { 573 if (atomic_xchg(&hba->resetting, 1) == 0) {
578 atomic_inc(&hba->reset_count); 574 atomic_inc(&hba->reset_count);
579 writel(IOPMU_INBOUND_MSG0_RESET, 575 writel(IOPMU_INBOUND_MSG0_RESET,
580 &hba->iop->outbound_msgaddr0); 576 &hba->iop->inbound_msgaddr0);
581 hptiop_pci_posting_flush(hba->iop); 577 hptiop_pci_posting_flush(hba->iop);
582 } 578 }
583 579
@@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
620 return queue_depth; 616 return queue_depth;
621} 617}
622 618
623struct hptiop_getinfo {
624 char __user *buffer;
625 loff_t buflength;
626 loff_t bufoffset;
627 loff_t buffillen;
628 loff_t filpos;
629};
630
631static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
632 char *data, int datalen)
633{
634 if (pinfo->filpos < pinfo->bufoffset) {
635 if (pinfo->filpos + datalen <= pinfo->bufoffset) {
636 pinfo->filpos += datalen;
637 return;
638 } else {
639 data += (pinfo->bufoffset - pinfo->filpos);
640 datalen -= (pinfo->bufoffset - pinfo->filpos);
641 pinfo->filpos = pinfo->bufoffset;
642 }
643 }
644
645 pinfo->filpos += datalen;
646 if (pinfo->buffillen == pinfo->buflength)
647 return;
648
649 if (pinfo->buflength - pinfo->buffillen < datalen)
650 datalen = pinfo->buflength - pinfo->buffillen;
651
652 if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
653 return;
654
655 pinfo->buffillen += datalen;
656}
657
658static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
659{
660 va_list args;
661 char buf[128];
662 int len;
663
664 va_start(args, fmt);
665 len = vsnprintf(buf, sizeof(buf), fmt, args);
666 va_end(args);
667 hptiop_copy_mem_info(pinfo, buf, len);
668 return len;
669}
670
671static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
672{
673 arg->done = NULL;
674 wake_up(&arg->hba->ioctl_wq);
675}
676
677static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
678{
679 struct hptiop_hba *hba = arg->hba;
680 u32 val;
681 struct hpt_iop_request_ioctl_command __iomem *req;
682 int ioctl_retry = 0;
683
684 dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
685
686 /*
687 * check (in + out) buff size from application.
688 * outbuf must be dword aligned.
689 */
690 if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
691 hba->max_request_size
692 - sizeof(struct hpt_iop_request_header)
693 - 4 * sizeof(u32)) {
694 dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
695 hba->host->host_no,
696 arg->inbuf_size, arg->outbuf_size);
697 arg->result = HPT_IOCTL_RESULT_FAILED;
698 return;
699 }
700
701retry:
702 spin_lock_irq(hba->host->host_lock);
703
704 val = readl(&hba->iop->inbound_queue);
705 if (val == IOPMU_QUEUE_EMPTY) {
706 spin_unlock_irq(hba->host->host_lock);
707 dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
708 arg->result = -1;
709 return;
710 }
711
712 req = (struct hpt_iop_request_ioctl_command __iomem *)
713 ((unsigned long)hba->iop + val);
714
715 writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
716 &req->ioctl_code);
717 writel(arg->inbuf_size, &req->inbuf_size);
718 writel(arg->outbuf_size, &req->outbuf_size);
719
720 /*
721 * use the buffer on the IOP local memory first, then copy it
722 * back to host.
723 * the caller's request buffer shoudl be little-endian.
724 */
725 if (arg->inbuf_size)
726 memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
727
728 /* correct the controller ID for IOP */
729 if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
730 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
731 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
732 && arg->inbuf_size >= sizeof(u32))
733 writel(0, req->buf);
734
735 writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
736 writel(0, &req->header.flags);
737 writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
738 + arg->inbuf_size, &req->header.size);
739 writel((u32)(unsigned long)arg, &req->header.context);
740 writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
741 &req->header.context_hi32);
742 writel(IOP_RESULT_PENDING, &req->header.result);
743
744 arg->result = HPT_IOCTL_RESULT_FAILED;
745 arg->done = hptiop_ioctl_done;
746
747 writel(val, &hba->iop->inbound_queue);
748 hptiop_pci_posting_flush(hba->iop);
749
750 spin_unlock_irq(hba->host->host_lock);
751
752 wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
753
754 if (arg->done != NULL) {
755 hptiop_reset_hba(hba);
756 if (ioctl_retry++ < 3)
757 goto retry;
758 }
759
760 dprintk("hpt_iop_ioctl %x result %d\n",
761 arg->ioctl_code, arg->result);
762}
763
764static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
765 u32 insize, void *outbuf, u32 outsize)
766{
767 struct hpt_ioctl_k arg;
768 arg.hba = hba;
769 arg.ioctl_code = code;
770 arg.inbuf = inbuf;
771 arg.outbuf = outbuf;
772 arg.inbuf_size = insize;
773 arg.outbuf_size = outsize;
774 arg.bytes_returned = NULL;
775 hptiop_do_ioctl(&arg);
776 return arg.result;
777}
778
779static inline int hpt_id_valid(__le32 id)
780{
781 return id != 0 && id != cpu_to_le32(0xffffffff);
782}
783
784static int hptiop_get_controller_info(struct hptiop_hba *hba,
785 struct hpt_controller_info *pinfo)
786{
787 int id = 0;
788
789 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
790 &id, sizeof(int), pinfo, sizeof(*pinfo));
791}
792
793
794static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
795 struct hpt_channel_info *pinfo)
796{
797 u32 ids[2];
798
799 ids[0] = 0;
800 ids[1] = bus;
801 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
802 ids, sizeof(ids), pinfo, sizeof(*pinfo));
803
804}
805
806static int hptiop_get_logical_devices(struct hptiop_hba *hba,
807 __le32 *pids, int maxcount)
808{
809 int i;
810 u32 count = maxcount - 1;
811
812 if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
813 &count, sizeof(u32),
814 pids, sizeof(u32) * maxcount))
815 return -1;
816
817 maxcount = le32_to_cpu(pids[0]);
818 for (i = 0; i < maxcount; i++)
819 pids[i] = pids[i+1];
820
821 return maxcount;
822}
823
824static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
825 struct hpt_logical_device_info_v3 *pinfo)
826{
827 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
828 &id, sizeof(u32),
829 pinfo, sizeof(*pinfo));
830}
831
832static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
833{
834 static char s[64];
835 u32 flags = le32_to_cpu(devinfo->u.array.flags);
836 u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
837 u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
838
839 if (flags & ARRAY_FLAG_DISABLED)
840 return "Disabled";
841 else if (flags & ARRAY_FLAG_TRANSFORMING)
842 sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
843 trans_prog / 100,
844 trans_prog % 100,
845 (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
846 ", Critical" : "",
847 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
848 !(flags & ARRAY_FLAG_REBUILDING) &&
849 !(flags & ARRAY_FLAG_INITIALIZING))?
850 ", Unintialized" : "");
851 else if ((flags & ARRAY_FLAG_BROKEN) &&
852 devinfo->u.array.array_type != AT_RAID6)
853 return "Critical";
854 else if (flags & ARRAY_FLAG_REBUILDING)
855 sprintf(s,
856 (flags & ARRAY_FLAG_NEEDINITIALIZING)?
857 "%sBackground initializing %d.%d%%" :
858 "%sRebuilding %d.%d%%",
859 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
860 reb_prog / 100,
861 reb_prog % 100);
862 else if (flags & ARRAY_FLAG_VERIFYING)
863 sprintf(s, "%sVerifying %d.%d%%",
864 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
865 reb_prog / 100,
866 reb_prog % 100);
867 else if (flags & ARRAY_FLAG_INITIALIZING)
868 sprintf(s, "%sForground initializing %d.%d%%",
869 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
870 reb_prog / 100,
871 reb_prog % 100);
872 else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
873 sprintf(s,"%s%s%s", "Need Expanding/Migrating",
874 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
875 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
876 !(flags & ARRAY_FLAG_REBUILDING) &&
877 !(flags & ARRAY_FLAG_INITIALIZING))?
878 ", Unintialized" : "");
879 else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
880 !(flags & ARRAY_FLAG_REBUILDING) &&
881 !(flags & ARRAY_FLAG_INITIALIZING))
882 sprintf(s,"%sUninitialized",
883 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
884 else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
885 (flags & ARRAY_FLAG_BROKEN))
886 return "Critical";
887 else
888 return "Normal";
889 return s;
890}
891
892static void hptiop_dump_devinfo(struct hptiop_hba *hba,
893 struct hptiop_getinfo *pinfo, __le32 id, int indent)
894{
895 struct hpt_logical_device_info_v3 devinfo;
896 int i;
897 u64 capacity;
898
899 for (i = 0; i < indent; i++)
900 hptiop_copy_info(pinfo, "\t");
901
902 if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
903 hptiop_copy_info(pinfo, "unknown\n");
904 return;
905 }
906
907 switch (devinfo.type) {
908
909 case LDT_DEVICE: {
910 struct hd_driveid *driveid;
911 u32 flags = le32_to_cpu(devinfo.u.device.flags);
912
913 driveid = (struct hd_driveid *)devinfo.u.device.ident;
914 /* model[] is 40 chars long, but we just want 20 chars here */
915 driveid->model[20] = 0;
916
917 if (indent)
918 if (flags & DEVICE_FLAG_DISABLED)
919 hptiop_copy_info(pinfo,"Missing\n");
920 else
921 hptiop_copy_info(pinfo, "CH%d %s\n",
922 devinfo.u.device.path_id + 1,
923 driveid->model);
924 else {
925 capacity = le64_to_cpu(devinfo.capacity) * 512;
926 do_div(capacity, 1000000);
927 hptiop_copy_info(pinfo,
928 "CH%d %s, %lluMB, %s %s%s%s%s\n",
929 devinfo.u.device.path_id + 1,
930 driveid->model,
931 capacity,
932 (flags & DEVICE_FLAG_DISABLED)?
933 "Disabled" : "Normal",
934 devinfo.u.device.read_ahead_enabled?
935 "[RA]" : "",
936 devinfo.u.device.write_cache_enabled?
937 "[WC]" : "",
938 devinfo.u.device.TCQ_enabled?
939 "[TCQ]" : "",
940 devinfo.u.device.NCQ_enabled?
941 "[NCQ]" : ""
942 );
943 }
944 break;
945 }
946
947 case LDT_ARRAY:
948 if (devinfo.target_id != INVALID_TARGET_ID)
949 hptiop_copy_info(pinfo, "[DISK %d_%d] ",
950 devinfo.vbus_id, devinfo.target_id);
951
952 capacity = le64_to_cpu(devinfo.capacity) * 512;
953 do_div(capacity, 1000000);
954 hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
955 devinfo.u.array.name,
956 devinfo.u.array.array_type==AT_RAID0? "RAID0" :
957 devinfo.u.array.array_type==AT_RAID1? "RAID1" :
958 devinfo.u.array.array_type==AT_RAID5? "RAID5" :
959 devinfo.u.array.array_type==AT_RAID6? "RAID6" :
960 devinfo.u.array.array_type==AT_JBOD? "JBOD" :
961 "unknown",
962 capacity,
963 get_array_status(&devinfo));
964 for (i = 0; i < devinfo.u.array.ndisk; i++) {
965 if (hpt_id_valid(devinfo.u.array.members[i])) {
966 if (cpu_to_le16(1<<i) &
967 devinfo.u.array.critical_members)
968 hptiop_copy_info(pinfo, "\t*");
969 hptiop_dump_devinfo(hba, pinfo,
970 devinfo.u.array.members[i], indent+1);
971 }
972 else
973 hptiop_copy_info(pinfo, "\tMissing\n");
974 }
975 if (id == devinfo.u.array.transform_source) {
976 hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
977 hptiop_dump_devinfo(hba, pinfo,
978 devinfo.u.array.transform_target, indent+1);
979 }
980 break;
981 }
982}
983
984static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf) 619static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
985{ 620{
986 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver); 621 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
987} 622}
988 623
989static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
990 size_t count, loff_t *ppos)
991{
992 struct hptiop_hba *hba = filp->private_data;
993 struct hptiop_getinfo info;
994 int i, j, ndev;
995 struct hpt_controller_info con_info;
996 struct hpt_channel_info chan_info;
997 __le32 ids[32];
998
999 info.buffer = buf;
1000 info.buflength = count;
1001 info.bufoffset = ppos ? *ppos : 0;
1002 info.filpos = 0;
1003 info.buffillen = 0;
1004
1005 if (hptiop_get_controller_info(hba, &con_info))
1006 return -EIO;
1007
1008 for (i = 0; i < con_info.num_buses; i++) {
1009 if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
1010 if (hpt_id_valid(chan_info.devices[0]))
1011 hptiop_dump_devinfo(hba, &info,
1012 chan_info.devices[0], 0);
1013 if (hpt_id_valid(chan_info.devices[1]))
1014 hptiop_dump_devinfo(hba, &info,
1015 chan_info.devices[1], 0);
1016 }
1017 }
1018
1019 ndev = hptiop_get_logical_devices(hba, ids,
1020 sizeof(ids) / sizeof(ids[0]));
1021
1022 /*
1023 * if hptiop_get_logical_devices fails, ndev==-1 and it just
1024 * output nothing here
1025 */
1026 for (j = 0; j < ndev; j++)
1027 hptiop_dump_devinfo(hba, &info, ids[j], 0);
1028
1029 if (ppos)
1030 *ppos += info.buffillen;
1031
1032 return info.buffillen;
1033}
1034
1035static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
1036 unsigned int cmd, unsigned long arg)
1037{
1038 struct hptiop_hba *hba = file->private_data;
1039 struct hpt_ioctl_u ioctl_u;
1040 struct hpt_ioctl_k ioctl_k;
1041 u32 bytes_returned;
1042 int err = -EINVAL;
1043
1044 if (copy_from_user(&ioctl_u,
1045 (void __user *)arg, sizeof(struct hpt_ioctl_u)))
1046 return -EINVAL;
1047
1048 if (ioctl_u.magic != HPT_IOCTL_MAGIC)
1049 return -EINVAL;
1050
1051 ioctl_k.ioctl_code = ioctl_u.ioctl_code;
1052 ioctl_k.inbuf = NULL;
1053 ioctl_k.inbuf_size = ioctl_u.inbuf_size;
1054 ioctl_k.outbuf = NULL;
1055 ioctl_k.outbuf_size = ioctl_u.outbuf_size;
1056 ioctl_k.hba = hba;
1057 ioctl_k.bytes_returned = &bytes_returned;
1058
1059 /* verify user buffer */
1060 if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
1061 ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
1062 (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
1063 ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
1064 (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
1065 ioctl_u.bytes_returned, sizeof(u32))) ||
1066 ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
1067
1068 dprintk("scsi%d: got bad user address\n", hba->host->host_no);
1069 return -EINVAL;
1070 }
1071
1072 /* map buffer to kernel. */
1073 if (ioctl_k.inbuf_size) {
1074 ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
1075 if (!ioctl_k.inbuf) {
1076 dprintk("scsi%d: fail to alloc inbuf\n",
1077 hba->host->host_no);
1078 err = -ENOMEM;
1079 goto err_exit;
1080 }
1081
1082 if (copy_from_user(ioctl_k.inbuf,
1083 ioctl_u.inbuf, ioctl_k.inbuf_size)) {
1084 goto err_exit;
1085 }
1086 }
1087
1088 if (ioctl_k.outbuf_size) {
1089 ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
1090 if (!ioctl_k.outbuf) {
1091 dprintk("scsi%d: fail to alloc outbuf\n",
1092 hba->host->host_no);
1093 err = -ENOMEM;
1094 goto err_exit;
1095 }
1096 }
1097
1098 hptiop_do_ioctl(&ioctl_k);
1099
1100 if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
1101 if (ioctl_k.outbuf_size &&
1102 copy_to_user(ioctl_u.outbuf,
1103 ioctl_k.outbuf, ioctl_k.outbuf_size))
1104 goto err_exit;
1105
1106 if (ioctl_u.bytes_returned &&
1107 copy_to_user(ioctl_u.bytes_returned,
1108 &bytes_returned, sizeof(u32)))
1109 goto err_exit;
1110
1111 err = 0;
1112 }
1113
1114err_exit:
1115 kfree(ioctl_k.inbuf);
1116 kfree(ioctl_k.outbuf);
1117
1118 return err;
1119}
1120
1121static int hptiop_cdev_open(struct inode *inode, struct file *file)
1122{
1123 struct hptiop_hba *hba;
1124 unsigned i = 0, minor = iminor(inode);
1125 int ret = -ENODEV;
1126
1127 spin_lock(&hptiop_hba_list_lock);
1128 list_for_each_entry(hba, &hptiop_hba_list, link) {
1129 if (i == minor) {
1130 file->private_data = hba;
1131 ret = 0;
1132 goto out;
1133 }
1134 i++;
1135 }
1136
1137out:
1138 spin_unlock(&hptiop_hba_list_lock);
1139 return ret;
1140}
1141
1142static struct file_operations hptiop_cdev_fops = {
1143 .owner = THIS_MODULE,
1144 .read = hptiop_cdev_read,
1145 .ioctl = hptiop_cdev_ioctl,
1146 .open = hptiop_cdev_open,
1147};
1148
1149static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf) 624static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
1150{ 625{
1151 struct Scsi_Host *host = class_to_shost(class_dev); 626 struct Scsi_Host *host = class_to_shost(class_dev);
@@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1296 goto unmap_pci_bar; 771 goto unmap_pci_bar;
1297 } 772 }
1298 773
1299 if (scsi_add_host(host, &pcidev->dev)) {
1300 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1301 hba->host->host_no);
1302 goto unmap_pci_bar;
1303 }
1304
1305 pci_set_drvdata(pcidev, host); 774 pci_set_drvdata(pcidev, host);
1306 775
1307 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED, 776 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1308 driver_name, hba)) { 777 driver_name, hba)) {
1309 printk(KERN_ERR "scsi%d: request irq %d failed\n", 778 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1310 hba->host->host_no, pcidev->irq); 779 hba->host->host_no, pcidev->irq);
1311 goto remove_scsi_host; 780 goto unmap_pci_bar;
1312 } 781 }
1313 782
1314 /* Allocate request mem */ 783 /* Allocate request mem */
@@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
1355 if (hptiop_initialize_iop(hba)) 824 if (hptiop_initialize_iop(hba))
1356 goto free_request_mem; 825 goto free_request_mem;
1357 826
1358 spin_lock(&hptiop_hba_list_lock); 827 if (scsi_add_host(host, &pcidev->dev)) {
1359 list_add_tail(&hba->link, &hptiop_hba_list); 828 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1360 spin_unlock(&hptiop_hba_list_lock); 829 hba->host->host_no);
830 goto free_request_mem;
831 }
832
1361 833
1362 scsi_scan_host(host); 834 scsi_scan_host(host);
1363 835
@@ -1372,9 +844,6 @@ free_request_mem:
1372free_request_irq: 844free_request_irq:
1373 free_irq(hba->pcidev->irq, hba); 845 free_irq(hba->pcidev->irq, hba);
1374 846
1375remove_scsi_host:
1376 scsi_remove_host(host);
1377
1378unmap_pci_bar: 847unmap_pci_bar:
1379 iounmap(hba->iop); 848 iounmap(hba->iop);
1380 849
@@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev)
1422 891
1423 scsi_remove_host(host); 892 scsi_remove_host(host);
1424 893
1425 spin_lock(&hptiop_hba_list_lock);
1426 list_del_init(&hba->link);
1427 spin_unlock(&hptiop_hba_list_lock);
1428
1429 hptiop_shutdown(pcidev); 894 hptiop_shutdown(pcidev);
1430 895
1431 free_irq(hba->pcidev->irq, hba); 896 free_irq(hba->pcidev->irq, hba);
@@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = {
1462 927
1463static int __init hptiop_module_init(void) 928static int __init hptiop_module_init(void)
1464{ 929{
1465 int error;
1466
1467 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver); 930 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1468 931 return pci_register_driver(&hptiop_pci_driver);
1469 error = pci_register_driver(&hptiop_pci_driver);
1470 if (error < 0)
1471 return error;
1472
1473 hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
1474 if (hptiop_cdev_major < 0) {
1475 printk(KERN_WARNING "unable to register hptiop device.\n");
1476 return hptiop_cdev_major;
1477 }
1478
1479 return 0;
1480} 932}
1481 933
1482static void __exit hptiop_module_exit(void) 934static void __exit hptiop_module_exit(void)
1483{ 935{
1484 dprintk("hptiop_module_exit\n");
1485 unregister_chrdev(hptiop_cdev_major, "hptiop");
1486 pci_unregister_driver(&hptiop_pci_driver); 936 pci_unregister_driver(&hptiop_pci_driver);
1487} 937}
1488 938
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f7b5d7372d26..94d1de55607f 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
517 /* No more interrupts */ 517 /* No more interrupts */
518 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) 518 if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
519 printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); 519 printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred);
520 local_irq_enable(); 520 local_irq_enable_in_hardirq();
521 if (status.b.check) 521 if (status.b.check)
522 rq->errors++; 522 rq->errors++;
523 idescsi_end_request (drive, 1, 0); 523 idescsi_end_request (drive, 1, 0);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 848fb2aa4ca3..058f094f945a 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -43,13 +43,10 @@
43 43
44#include "iscsi_tcp.h" 44#include "iscsi_tcp.h"
45 45
46#define ISCSI_TCP_VERSION "1.0-595"
47
48MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, " 46MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>"); 47 "Alex Aizman <itn780@yahoo.com>");
50MODULE_DESCRIPTION("iSCSI/TCP data-path"); 48MODULE_DESCRIPTION("iSCSI/TCP data-path");
51MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
52MODULE_VERSION(ISCSI_TCP_VERSION);
53/* #define DEBUG_TCP */ 50/* #define DEBUG_TCP */
54#define DEBUG_ASSERT 51#define DEBUG_ASSERT
55 52
@@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
185 * must be called with session lock 182 * must be called with session lock
186 */ 183 */
187static void 184static void
188__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 185iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
189{ 186{
190 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 187 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
188 struct iscsi_r2t_info *r2t;
191 struct scsi_cmnd *sc; 189 struct scsi_cmnd *sc;
192 190
191 /* flush ctask's r2t queues */
192 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
193 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
194 sizeof(void*));
195 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
196 }
197
193 sc = ctask->sc; 198 sc = ctask->sc;
194 if (unlikely(!sc)) 199 if (unlikely(!sc))
195 return; 200 return;
@@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
374 spin_unlock(&session->lock); 379 spin_unlock(&session->lock);
375 return 0; 380 return 0;
376 } 381 }
382
377 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*)); 383 rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
378 BUG_ON(!rc); 384 BUG_ON(!rc);
379 385
@@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
399 tcp_ctask->exp_r2tsn = r2tsn + 1; 405 tcp_ctask->exp_r2tsn = r2tsn + 1;
400 tcp_ctask->xmstate |= XMSTATE_SOL_HDR; 406 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
401 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 407 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
402 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); 408 list_move_tail(&ctask->running, &conn->xmitqueue);
403 409
404 scsi_queue_work(session->host, &conn->xmitwork); 410 scsi_queue_work(session->host, &conn->xmitwork);
405 conn->r2t_pdus_cnt++; 411 conn->r2t_pdus_cnt++;
@@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
477 case ISCSI_OP_SCSI_DATA_IN: 483 case ISCSI_OP_SCSI_DATA_IN:
478 tcp_conn->in.ctask = session->cmds[itt]; 484 tcp_conn->in.ctask = session->cmds[itt];
479 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask); 485 rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
486 if (rc)
487 return rc;
480 /* fall through */ 488 /* fall through */
481 case ISCSI_OP_SCSI_CMD_RSP: 489 case ISCSI_OP_SCSI_CMD_RSP:
482 tcp_conn->in.ctask = session->cmds[itt]; 490 tcp_conn->in.ctask = session->cmds[itt];
@@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
484 goto copy_hdr; 492 goto copy_hdr;
485 493
486 spin_lock(&session->lock); 494 spin_lock(&session->lock);
487 __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); 495 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
488 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); 496 rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
489 spin_unlock(&session->lock); 497 spin_unlock(&session->lock);
490 break; 498 break;
@@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
500 break; 508 break;
501 case ISCSI_OP_LOGIN_RSP: 509 case ISCSI_OP_LOGIN_RSP:
502 case ISCSI_OP_TEXT_RSP: 510 case ISCSI_OP_TEXT_RSP:
503 case ISCSI_OP_LOGOUT_RSP:
504 case ISCSI_OP_NOOP_IN:
505 case ISCSI_OP_REJECT: 511 case ISCSI_OP_REJECT:
506 case ISCSI_OP_ASYNC_EVENT: 512 case ISCSI_OP_ASYNC_EVENT:
513 /*
514 * It is possible that we could get a PDU with a buffer larger
515 * than 8K, but there are no targets that currently do this.
516 * For now we fail until we find a vendor that needs it
517 */
518 if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH <
519 tcp_conn->in.datalen) {
520 printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
521 "but conn buffer is only %u (opcode %0x)\n",
522 tcp_conn->in.datalen,
523 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode);
524 rc = ISCSI_ERR_PROTO;
525 break;
526 }
527
507 if (tcp_conn->in.datalen) 528 if (tcp_conn->in.datalen)
508 goto copy_hdr; 529 goto copy_hdr;
509 /* fall through */ 530 /* fall through */
531 case ISCSI_OP_LOGOUT_RSP:
532 case ISCSI_OP_NOOP_IN:
510 case ISCSI_OP_SCSI_TMFUNC_RSP: 533 case ISCSI_OP_SCSI_TMFUNC_RSP:
511 rc = iscsi_complete_pdu(conn, hdr, NULL, 0); 534 rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
512 break; 535 break;
@@ -523,7 +546,7 @@ copy_hdr:
523 * skbs to complete the command then we have to copy the header 546 * skbs to complete the command then we have to copy the header
524 * for later use 547 * for later use
525 */ 548 */
526 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy < 549 if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
527 (tcp_conn->in.datalen + tcp_conn->in.padding + 550 (tcp_conn->in.datalen + tcp_conn->in.padding +
528 (conn->datadgst_en ? 4 : 0))) { 551 (conn->datadgst_en ? 4 : 0))) {
529 debug_tcp("Copying header for later use. in.copy %d in.datalen" 552 debug_tcp("Copying header for later use. in.copy %d in.datalen"
@@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
614 * byte counters. 637 * byte counters.
615 **/ 638 **/
616static inline int 639static inline int
617iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn) 640iscsi_tcp_copy(struct iscsi_conn *conn)
618{ 641{
619 void *buf = tcp_conn->data; 642 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
620 int buf_size = tcp_conn->in.datalen; 643 int buf_size = tcp_conn->in.datalen;
621 int buf_left = buf_size - tcp_conn->data_copied; 644 int buf_left = buf_size - tcp_conn->data_copied;
622 int size = min(tcp_conn->in.copy, buf_left); 645 int size = min(tcp_conn->in.copy, buf_left);
@@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
627 BUG_ON(size <= 0); 650 BUG_ON(size <= 0);
628 651
629 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 652 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
630 (char*)buf + tcp_conn->data_copied, size); 653 (char*)conn->data + tcp_conn->data_copied, size);
631 BUG_ON(rc); 654 BUG_ON(rc);
632 655
633 tcp_conn->in.offset += size; 656 tcp_conn->in.offset += size;
@@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
745done: 768done:
746 /* check for non-exceptional status */ 769 /* check for non-exceptional status */
747 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { 770 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
748 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 771 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
749 (long)sc, sc->result, ctask->itt); 772 (long)sc, sc->result, ctask->itt,
773 tcp_conn->in.hdr->flags);
750 spin_lock(&conn->session->lock); 774 spin_lock(&conn->session->lock);
751 __iscsi_ctask_cleanup(conn, ctask); 775 iscsi_tcp_cleanup_ctask(conn, ctask);
752 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0); 776 __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
753 spin_unlock(&conn->session->lock); 777 spin_unlock(&conn->session->lock);
754 } 778 }
@@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn)
769 break; 793 break;
770 case ISCSI_OP_SCSI_CMD_RSP: 794 case ISCSI_OP_SCSI_CMD_RSP:
771 spin_lock(&conn->session->lock); 795 spin_lock(&conn->session->lock);
772 __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask); 796 iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
773 spin_unlock(&conn->session->lock); 797 spin_unlock(&conn->session->lock);
774 case ISCSI_OP_TEXT_RSP: 798 case ISCSI_OP_TEXT_RSP:
775 case ISCSI_OP_LOGIN_RSP: 799 case ISCSI_OP_LOGIN_RSP:
776 case ISCSI_OP_NOOP_IN:
777 case ISCSI_OP_ASYNC_EVENT: 800 case ISCSI_OP_ASYNC_EVENT:
778 case ISCSI_OP_REJECT: 801 case ISCSI_OP_REJECT:
779 /* 802 /*
780 * Collect data segment to the connection's data 803 * Collect data segment to the connection's data
781 * placeholder 804 * placeholder
782 */ 805 */
783 if (iscsi_tcp_copy(tcp_conn)) { 806 if (iscsi_tcp_copy(conn)) {
784 rc = -EAGAIN; 807 rc = -EAGAIN;
785 goto exit; 808 goto exit;
786 } 809 }
787 810
788 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data, 811 rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
789 tcp_conn->in.datalen); 812 tcp_conn->in.datalen);
790 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP) 813 if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
791 iscsi_recv_digest_update(tcp_conn, tcp_conn->data, 814 iscsi_recv_digest_update(tcp_conn, conn->data,
792 tcp_conn->in.datalen); 815 tcp_conn->in.datalen);
793 break; 816 break;
794 default: 817 default:
@@ -843,7 +866,7 @@ more:
843 if (rc == -EAGAIN) 866 if (rc == -EAGAIN)
844 goto nomore; 867 goto nomore;
845 else { 868 else {
846 iscsi_conn_failure(conn, rc); 869 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
847 return 0; 870 return 0;
848 } 871 }
849 } 872 }
@@ -897,7 +920,7 @@ more:
897 if (rc) { 920 if (rc) {
898 if (rc == -EAGAIN) 921 if (rc == -EAGAIN)
899 goto again; 922 goto again;
900 iscsi_conn_failure(conn, rc); 923 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
901 return 0; 924 return 0;
902 } 925 }
903 tcp_conn->in.copy -= tcp_conn->in.padding; 926 tcp_conn->in.copy -= tcp_conn->in.padding;
@@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1028} 1051}
1029 1052
1030static void 1053static void
1031iscsi_conn_restore_callbacks(struct iscsi_conn *conn) 1054iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
1032{ 1055{
1033 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1034 struct sock *sk = tcp_conn->sock->sk; 1056 struct sock *sk = tcp_conn->sock->sk;
1035 1057
1036 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */ 1058 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1308 ctask->imm_count - 1330 ctask->imm_count -
1309 ctask->unsol_count; 1331 ctask->unsol_count;
1310 1332
1311 debug_scsi("cmd [itt %x total %d imm %d imm_data %d " 1333 debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d "
1312 "r2t_data %d]\n", 1334 "r2t_data %d]\n",
1313 ctask->itt, ctask->total_length, ctask->imm_count, 1335 ctask->itt, ctask->total_length, ctask->imm_count,
1314 ctask->unsol_count, tcp_ctask->r2t_data_count); 1336 ctask->unsol_count, tcp_ctask->r2t_data_count);
@@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1636 } 1658 }
1637solicit_again: 1659solicit_again:
1638 /* 1660 /*
1639 * send Data-Out whitnin this R2T sequence. 1661 * send Data-Out within this R2T sequence.
1640 */ 1662 */
1641 if (!r2t->data_count) 1663 if (!r2t->data_count)
1642 goto data_out_done; 1664 goto data_out_done;
@@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1731 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1753 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1732 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1754 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1733 struct iscsi_data_task *dtask = tcp_ctask->dtask; 1755 struct iscsi_data_task *dtask = tcp_ctask->dtask;
1734 int sent, rc; 1756 int sent = 0, rc;
1735 1757
1736 tcp_ctask->xmstate &= ~XMSTATE_W_PAD; 1758 tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
1737 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad, 1759 iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
@@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1900 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 1922 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1901 /* initial operational parameters */ 1923 /* initial operational parameters */
1902 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1924 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1903 tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
1904
1905 /* allocate initial PDU receive place holder */
1906 if (tcp_conn->data_size <= PAGE_SIZE)
1907 tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL);
1908 else
1909 tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL,
1910 get_order(tcp_conn->data_size));
1911 if (!tcp_conn->data)
1912 goto max_recv_dlenght_alloc_fail;
1913 1925
1914 return cls_conn; 1926 return cls_conn;
1915 1927
1916max_recv_dlenght_alloc_fail:
1917 kfree(tcp_conn);
1918tcp_conn_alloc_fail: 1928tcp_conn_alloc_fail:
1919 iscsi_conn_teardown(cls_conn); 1929 iscsi_conn_teardown(cls_conn);
1920 return NULL; 1930 return NULL;
1921} 1931}
1922 1932
1923static void 1933static void
1934iscsi_tcp_release_conn(struct iscsi_conn *conn)
1935{
1936 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1937
1938 if (!tcp_conn->sock)
1939 return;
1940
1941 sock_hold(tcp_conn->sock->sk);
1942 iscsi_conn_restore_callbacks(tcp_conn);
1943 sock_put(tcp_conn->sock->sk);
1944
1945 sock_release(tcp_conn->sock);
1946 tcp_conn->sock = NULL;
1947 conn->recv_lock = NULL;
1948}
1949
1950static void
1924iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) 1951iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1925{ 1952{
1926 struct iscsi_conn *conn = cls_conn->dd_data; 1953 struct iscsi_conn *conn = cls_conn->dd_data;
@@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1930 if (conn->hdrdgst_en || conn->datadgst_en) 1957 if (conn->hdrdgst_en || conn->datadgst_en)
1931 digest = 1; 1958 digest = 1;
1932 1959
1960 iscsi_tcp_release_conn(conn);
1933 iscsi_conn_teardown(cls_conn); 1961 iscsi_conn_teardown(cls_conn);
1934 1962
1935 /* now free tcp_conn */ 1963 /* now free tcp_conn */
@@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
1944 crypto_free_tfm(tcp_conn->data_rx_tfm); 1972 crypto_free_tfm(tcp_conn->data_rx_tfm);
1945 } 1973 }
1946 1974
1947 /* free conn->data, size = MaxRecvDataSegmentLength */
1948 if (tcp_conn->data_size <= PAGE_SIZE)
1949 kfree(tcp_conn->data);
1950 else
1951 free_pages((unsigned long)tcp_conn->data,
1952 get_order(tcp_conn->data_size));
1953 kfree(tcp_conn); 1975 kfree(tcp_conn);
1954} 1976}
1955 1977
1978static void
1979iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1980{
1981 struct iscsi_conn *conn = cls_conn->dd_data;
1982
1983 iscsi_conn_stop(cls_conn, flag);
1984 iscsi_tcp_release_conn(conn);
1985}
1986
1956static int 1987static int
1957iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, 1988iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1958 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 1989 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
2001 return 0; 2032 return 0;
2002} 2033}
2003 2034
2004static void
2005iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2006{
2007 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
2008 struct iscsi_r2t_info *r2t;
2009
2010 /* flush ctask's r2t queues */
2011 while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)))
2012 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
2013 sizeof(void*));
2014
2015 __iscsi_ctask_cleanup(conn, ctask);
2016}
2017
2018static void
2019iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn)
2020{
2021 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2022 struct sock *sk;
2023
2024 if (!tcp_conn->sock)
2025 return;
2026
2027 sk = tcp_conn->sock->sk;
2028 write_lock_bh(&sk->sk_callback_lock);
2029 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2030 write_unlock_bh(&sk->sk_callback_lock);
2031}
2032
2033static void
2034iscsi_tcp_terminate_conn(struct iscsi_conn *conn)
2035{
2036 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2037
2038 if (!tcp_conn->sock)
2039 return;
2040
2041 sock_hold(tcp_conn->sock->sk);
2042 iscsi_conn_restore_callbacks(conn);
2043 sock_put(tcp_conn->sock->sk);
2044
2045 sock_release(tcp_conn->sock);
2046 tcp_conn->sock = NULL;
2047 conn->recv_lock = NULL;
2048}
2049
2050/* called with host lock */ 2035/* called with host lock */
2051static void 2036static void
2052iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, 2037iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
@@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
2057 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr, 2042 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
2058 sizeof(struct iscsi_hdr)); 2043 sizeof(struct iscsi_hdr));
2059 tcp_mtask->xmstate = XMSTATE_IMM_HDR; 2044 tcp_mtask->xmstate = XMSTATE_IMM_HDR;
2045 tcp_mtask->sent = 0;
2060 2046
2061 if (mtask->data_count) 2047 if (mtask->data_count)
2062 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data, 2048 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
@@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
2138 int value; 2124 int value;
2139 2125
2140 switch(param) { 2126 switch(param) {
2141 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
2142 char *saveptr = tcp_conn->data;
2143 gfp_t flags = GFP_KERNEL;
2144
2145 sscanf(buf, "%d", &value);
2146 if (tcp_conn->data_size >= value) {
2147 iscsi_set_param(cls_conn, param, buf, buflen);
2148 break;
2149 }
2150
2151 spin_lock_bh(&session->lock);
2152 if (conn->stop_stage == STOP_CONN_RECOVER)
2153 flags = GFP_ATOMIC;
2154 spin_unlock_bh(&session->lock);
2155
2156 if (value <= PAGE_SIZE)
2157 tcp_conn->data = kmalloc(value, flags);
2158 else
2159 tcp_conn->data = (void*)__get_free_pages(flags,
2160 get_order(value));
2161 if (tcp_conn->data == NULL) {
2162 tcp_conn->data = saveptr;
2163 return -ENOMEM;
2164 }
2165 if (tcp_conn->data_size <= PAGE_SIZE)
2166 kfree(saveptr);
2167 else
2168 free_pages((unsigned long)saveptr,
2169 get_order(tcp_conn->data_size));
2170 iscsi_set_param(cls_conn, param, buf, buflen);
2171 tcp_conn->data_size = value;
2172 break;
2173 }
2174 case ISCSI_PARAM_HDRDGST_EN: 2127 case ISCSI_PARAM_HDRDGST_EN:
2175 iscsi_set_param(cls_conn, param, buf, buflen); 2128 iscsi_set_param(cls_conn, param, buf, buflen);
2176 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 2129 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
@@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
2361} 2314}
2362 2315
2363static struct scsi_host_template iscsi_sht = { 2316static struct scsi_host_template iscsi_sht = {
2364 .name = "iSCSI Initiator over TCP/IP, v" 2317 .name = "iSCSI Initiator over TCP/IP",
2365 ISCSI_TCP_VERSION,
2366 .queuecommand = iscsi_queuecommand, 2318 .queuecommand = iscsi_queuecommand,
2367 .change_queue_depth = iscsi_change_queue_depth, 2319 .change_queue_depth = iscsi_change_queue_depth,
2368 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 2320 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
@@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = {
2414 .get_conn_param = iscsi_tcp_conn_get_param, 2366 .get_conn_param = iscsi_tcp_conn_get_param,
2415 .get_session_param = iscsi_session_get_param, 2367 .get_session_param = iscsi_session_get_param,
2416 .start_conn = iscsi_conn_start, 2368 .start_conn = iscsi_conn_start,
2417 .stop_conn = iscsi_conn_stop, 2369 .stop_conn = iscsi_tcp_conn_stop,
2418 /* these are called as part of conn recovery */
2419 .suspend_conn_recv = iscsi_tcp_suspend_conn_rx,
2420 .terminate_conn = iscsi_tcp_terminate_conn,
2421 /* IO */ 2370 /* IO */
2422 .send_pdu = iscsi_conn_send_pdu, 2371 .send_pdu = iscsi_conn_send_pdu,
2423 .get_stats = iscsi_conn_get_stats, 2372 .get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 808302832e68..6a4ee704e46e 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -78,8 +78,6 @@ struct iscsi_tcp_conn {
78 char hdrext[4*sizeof(__u16) + 78 char hdrext[4*sizeof(__u16) +
79 sizeof(__u32)]; 79 sizeof(__u32)];
80 int data_copied; 80 int data_copied;
81 char *data; /* data placeholder */
82 int data_size; /* actual recv_dlength */
83 int stop_stage; /* conn_stop() flag: * 81 int stop_stage; /* conn_stop() flag: *
84 * stop to recover, * 82 * stop to recover, *
85 * stop to terminate */ 83 * stop to terminate */
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 16fc2dd8f2f7..73dd6c8deede 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2746,7 +2746,7 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2746 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol))) 2746 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2747 return rc; 2747 return rc;
2748 2748
2749 scontrol = (scontrol & 0x0f0) | 0x302; 2749 scontrol = (scontrol & 0x0f0) | 0x304;
2750 2750
2751 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol))) 2751 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2752 return rc; 2752 return rc;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7e6e031cc41b..5884cd26d53a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session,
189{ 189{
190 struct scsi_cmnd *sc = ctask->sc; 190 struct scsi_cmnd *sc = ctask->sc;
191 191
192 ctask->state = ISCSI_TASK_COMPLETED;
192 ctask->sc = NULL; 193 ctask->sc = NULL;
193 list_del_init(&ctask->running); 194 list_del_init(&ctask->running);
194 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*)); 195 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
@@ -275,6 +276,25 @@ out:
275 return rc; 276 return rc;
276} 277}
277 278
279static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
280{
281 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
282
283 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
284 conn->tmfrsp_pdus_cnt++;
285
286 if (conn->tmabort_state != TMABORT_INITIAL)
287 return;
288
289 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
290 conn->tmabort_state = TMABORT_SUCCESS;
291 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
292 conn->tmabort_state = TMABORT_NOT_FOUND;
293 else
294 conn->tmabort_state = TMABORT_FAILED;
295 wake_up(&conn->ehwait);
296}
297
278/** 298/**
279 * __iscsi_complete_pdu - complete pdu 299 * __iscsi_complete_pdu - complete pdu
280 * @conn: iscsi conn 300 * @conn: iscsi conn
@@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
340 360
341 switch(opcode) { 361 switch(opcode) {
342 case ISCSI_OP_LOGOUT_RSP: 362 case ISCSI_OP_LOGOUT_RSP:
363 if (datalen) {
364 rc = ISCSI_ERR_PROTO;
365 break;
366 }
343 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 367 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
344 /* fall through */ 368 /* fall through */
345 case ISCSI_OP_LOGIN_RSP: 369 case ISCSI_OP_LOGIN_RSP:
@@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
348 * login related PDU's exp_statsn is handled in 372 * login related PDU's exp_statsn is handled in
349 * userspace 373 * userspace
350 */ 374 */
351 rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); 375 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
376 rc = ISCSI_ERR_CONN_FAILED;
352 list_del(&mtask->running); 377 list_del(&mtask->running);
353 if (conn->login_mtask != mtask) 378 if (conn->login_mtask != mtask)
354 __kfifo_put(session->mgmtpool.queue, 379 __kfifo_put(session->mgmtpool.queue,
@@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
360 break; 385 break;
361 } 386 }
362 387
363 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 388 iscsi_tmf_rsp(conn, hdr);
364 conn->tmfrsp_pdus_cnt++;
365 if (conn->tmabort_state == TMABORT_INITIAL) {
366 conn->tmabort_state =
367 ((struct iscsi_tm_rsp *)hdr)->
368 response == ISCSI_TMF_RSP_COMPLETE ?
369 TMABORT_SUCCESS:TMABORT_FAILED;
370 /* unblock eh_abort() */
371 wake_up(&conn->ehwait);
372 }
373 break; 389 break;
374 case ISCSI_OP_NOOP_IN: 390 case ISCSI_OP_NOOP_IN:
375 if (hdr->ttt != ISCSI_RESERVED_TAG) { 391 if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) {
376 rc = ISCSI_ERR_PROTO; 392 rc = ISCSI_ERR_PROTO;
377 break; 393 break;
378 } 394 }
379 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; 395 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
380 396
381 rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen); 397 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
398 rc = ISCSI_ERR_CONN_FAILED;
382 list_del(&mtask->running); 399 list_del(&mtask->running);
383 if (conn->login_mtask != mtask) 400 if (conn->login_mtask != mtask)
384 __kfifo_put(session->mgmtpool.queue, 401 __kfifo_put(session->mgmtpool.queue,
@@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
391 } else if (itt == ISCSI_RESERVED_TAG) { 408 } else if (itt == ISCSI_RESERVED_TAG) {
392 switch(opcode) { 409 switch(opcode) {
393 case ISCSI_OP_NOOP_IN: 410 case ISCSI_OP_NOOP_IN:
394 if (!datalen) { 411 if (datalen) {
395 rc = iscsi_check_assign_cmdsn(session,
396 (struct iscsi_nopin*)hdr);
397 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
398 rc = iscsi_recv_pdu(conn->cls_conn,
399 hdr, NULL, 0);
400 } else
401 rc = ISCSI_ERR_PROTO; 412 rc = ISCSI_ERR_PROTO;
413 break;
414 }
415
416 rc = iscsi_check_assign_cmdsn(session,
417 (struct iscsi_nopin*)hdr);
418 if (rc)
419 break;
420
421 if (hdr->ttt == ISCSI_RESERVED_TAG)
422 break;
423
424 if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
425 rc = ISCSI_ERR_CONN_FAILED;
402 break; 426 break;
403 case ISCSI_OP_REJECT: 427 case ISCSI_OP_REJECT:
404 /* we need sth like iscsi_reject_rsp()*/ 428 /* we need sth like iscsi_reject_rsp()*/
@@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
568 } 592 }
569 593
570 /* process command queue */ 594 /* process command queue */
571 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask, 595 spin_lock_bh(&conn->session->lock);
572 sizeof(void*))) { 596 while (!list_empty(&conn->xmitqueue)) {
573 /* 597 /*
574 * iscsi tcp may readd the task to the xmitqueue to send 598 * iscsi tcp may readd the task to the xmitqueue to send
575 * write data 599 * write data
576 */ 600 */
577 spin_lock_bh(&conn->session->lock); 601 conn->ctask = list_entry(conn->xmitqueue.next,
578 if (list_empty(&conn->ctask->running)) 602 struct iscsi_cmd_task, running);
579 list_add_tail(&conn->ctask->running, &conn->run_list); 603 conn->ctask->state = ISCSI_TASK_RUNNING;
604 list_move_tail(conn->xmitqueue.next, &conn->run_list);
580 spin_unlock_bh(&conn->session->lock); 605 spin_unlock_bh(&conn->session->lock);
606
581 rc = tt->xmit_cmd_task(conn, conn->ctask); 607 rc = tt->xmit_cmd_task(conn, conn->ctask);
582 if (rc) 608 if (rc)
583 goto again; 609 goto again;
610 spin_lock_bh(&conn->session->lock);
584 } 611 }
612 spin_unlock_bh(&conn->session->lock);
585 /* done with this ctask */ 613 /* done with this ctask */
586 conn->ctask = NULL; 614 conn->ctask = NULL;
587 615
@@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
691 sc->SCp.phase = session->age; 719 sc->SCp.phase = session->age;
692 sc->SCp.ptr = (char *)ctask; 720 sc->SCp.ptr = (char *)ctask;
693 721
722 ctask->state = ISCSI_TASK_PENDING;
694 ctask->mtask = NULL; 723 ctask->mtask = NULL;
695 ctask->conn = conn; 724 ctask->conn = conn;
696 ctask->sc = sc; 725 ctask->sc = sc;
@@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
700 729
701 session->tt->init_cmd_task(ctask); 730 session->tt->init_cmd_task(ctask);
702 731
703 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*)); 732 list_add_tail(&ctask->running, &conn->xmitqueue);
704 debug_scsi( 733 debug_scsi(
705 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n", 734 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
706 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", 735 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
@@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
977/* 1006/*
978 * xmit mutex and session lock must be held 1007 * xmit mutex and session lock must be held
979 */ 1008 */
980#define iscsi_remove_task(tasktype) \ 1009static struct iscsi_mgmt_task *
981static struct iscsi_##tasktype * \ 1010iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
982iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \ 1011{
983{ \ 1012 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
984 int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \ 1013 struct iscsi_mgmt_task *task;
985 struct iscsi_##tasktype *task; \
986 \
987 debug_scsi("searching %d tasks\n", nr_tasks); \
988 \
989 for (i = 0; i < nr_tasks; i++) { \
990 __kfifo_get(fifo, (void*)&task, sizeof(void*)); \
991 debug_scsi("check task %u\n", task->itt); \
992 \
993 if (task->itt == itt) { \
994 debug_scsi("matched task\n"); \
995 return task; \
996 } \
997 \
998 __kfifo_put(fifo, (void*)&task, sizeof(void*)); \
999 } \
1000 return NULL; \
1001}
1002 1014
1003iscsi_remove_task(mgmt_task); 1015 debug_scsi("searching %d tasks\n", nr_tasks);
1004iscsi_remove_task(cmd_task); 1016
1017 for (i = 0; i < nr_tasks; i++) {
1018 __kfifo_get(fifo, (void*)&task, sizeof(void*));
1019 debug_scsi("check task %u\n", task->itt);
1020
1021 if (task->itt == itt) {
1022 debug_scsi("matched task\n");
1023 return task;
1024 }
1025
1026 __kfifo_put(fifo, (void*)&task, sizeof(void*));
1027 }
1028 return NULL;
1029}
1005 1030
1006static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask) 1031static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
1007{ 1032{
@@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1027{ 1052{
1028 struct scsi_cmnd *sc; 1053 struct scsi_cmnd *sc;
1029 1054
1030 conn->session->tt->cleanup_cmd_task(conn, ctask);
1031 iscsi_ctask_mtask_cleanup(ctask);
1032
1033 sc = ctask->sc; 1055 sc = ctask->sc;
1034 if (!sc) 1056 if (!sc)
1035 return; 1057 return;
1058
1059 conn->session->tt->cleanup_cmd_task(conn, ctask);
1060 iscsi_ctask_mtask_cleanup(ctask);
1061
1036 sc->result = err; 1062 sc->result = err;
1037 sc->resid = sc->request_bufflen; 1063 sc->resid = sc->request_bufflen;
1038 iscsi_complete_command(conn->session, ctask); 1064 iscsi_complete_command(conn->session, ctask);
@@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1043 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr; 1069 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1044 struct iscsi_conn *conn = ctask->conn; 1070 struct iscsi_conn *conn = ctask->conn;
1045 struct iscsi_session *session = conn->session; 1071 struct iscsi_session *session = conn->session;
1046 struct iscsi_cmd_task *pending_ctask;
1047 int rc; 1072 int rc;
1048 1073
1049 conn->eh_abort_cnt++; 1074 conn->eh_abort_cnt++;
@@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1061 goto failed; 1086 goto failed;
1062 1087
1063 /* ctask completed before time out */ 1088 /* ctask completed before time out */
1064 if (!ctask->sc) 1089 if (!ctask->sc) {
1065 goto success; 1090 spin_unlock_bh(&session->lock);
1091 debug_scsi("sc completed while abort in progress\n");
1092 goto success_rel_mutex;
1093 }
1066 1094
1067 /* what should we do here ? */ 1095 /* what should we do here ? */
1068 if (conn->ctask == ctask) { 1096 if (conn->ctask == ctask) {
@@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1071 goto failed; 1099 goto failed;
1072 } 1100 }
1073 1101
1074 /* check for the easy pending cmd abort */ 1102 if (ctask->state == ISCSI_TASK_PENDING)
1075 pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt); 1103 goto success_cleanup;
1076 if (pending_ctask) {
1077 /* iscsi_tcp queues write transfers on the xmitqueue */
1078 if (list_empty(&pending_ctask->running)) {
1079 debug_scsi("found pending task\n");
1080 goto success;
1081 } else
1082 __kfifo_put(conn->xmitqueue, (void*)&pending_ctask,
1083 sizeof(void*));
1084 }
1085 1104
1086 conn->tmabort_state = TMABORT_INITIAL; 1105 conn->tmabort_state = TMABORT_INITIAL;
1087 1106
@@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1089 rc = iscsi_exec_abort_task(sc, ctask); 1108 rc = iscsi_exec_abort_task(sc, ctask);
1090 spin_lock_bh(&session->lock); 1109 spin_lock_bh(&session->lock);
1091 1110
1092 iscsi_ctask_mtask_cleanup(ctask);
1093 if (rc || sc->SCp.phase != session->age || 1111 if (rc || sc->SCp.phase != session->age ||
1094 session->state != ISCSI_STATE_LOGGED_IN) 1112 session->state != ISCSI_STATE_LOGGED_IN)
1095 goto failed; 1113 goto failed;
1114 iscsi_ctask_mtask_cleanup(ctask);
1096 1115
1097 /* ctask completed before tmf abort response */ 1116 switch (conn->tmabort_state) {
1098 if (!ctask->sc) { 1117 case TMABORT_SUCCESS:
1099 debug_scsi("sc completed while abort in progress\n"); 1118 goto success_cleanup;
1100 goto success; 1119 case TMABORT_NOT_FOUND:
1101 } 1120 if (!ctask->sc) {
1102 1121 /* ctask completed before tmf abort response */
1103 if (conn->tmabort_state != TMABORT_SUCCESS) { 1122 spin_unlock_bh(&session->lock);
1123 debug_scsi("sc completed while abort in progress\n");
1124 goto success_rel_mutex;
1125 }
1126 /* fall through */
1127 default:
1128 /* timedout or failed */
1104 spin_unlock_bh(&session->lock); 1129 spin_unlock_bh(&session->lock);
1105 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1130 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1106 spin_lock_bh(&session->lock); 1131 spin_lock_bh(&session->lock);
1107 goto failed; 1132 goto failed;
1108 } 1133 }
1109 1134
1110success: 1135success_cleanup:
1111 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1136 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1112 spin_unlock_bh(&session->lock); 1137 spin_unlock_bh(&session->lock);
1113 1138
@@ -1121,6 +1146,7 @@ success:
1121 spin_unlock(&session->lock); 1146 spin_unlock(&session->lock);
1122 write_unlock_bh(conn->recv_lock); 1147 write_unlock_bh(conn->recv_lock);
1123 1148
1149success_rel_mutex:
1124 mutex_unlock(&conn->xmitmutex); 1150 mutex_unlock(&conn->xmitmutex);
1125 return SUCCESS; 1151 return SUCCESS;
1126 1152
@@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1263 if (cmd_task_size) 1289 if (cmd_task_size)
1264 ctask->dd_data = &ctask[1]; 1290 ctask->dd_data = &ctask[1];
1265 ctask->itt = cmd_i; 1291 ctask->itt = cmd_i;
1292 INIT_LIST_HEAD(&ctask->running);
1266 } 1293 }
1267 1294
1268 spin_lock_init(&session->lock); 1295 spin_lock_init(&session->lock);
@@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1282 if (mgmt_task_size) 1309 if (mgmt_task_size)
1283 mtask->dd_data = &mtask[1]; 1310 mtask->dd_data = &mtask[1];
1284 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i; 1311 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
1312 INIT_LIST_HEAD(&mtask->running);
1285 } 1313 }
1286 1314
1287 if (scsi_add_host(shost, NULL)) 1315 if (scsi_add_host(shost, NULL))
@@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1322{ 1350{
1323 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1351 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1324 struct iscsi_session *session = iscsi_hostdata(shost->hostdata); 1352 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1353 struct module *owner = cls_session->transport->owner;
1325 1354
1326 scsi_remove_host(shost); 1355 scsi_remove_host(shost);
1327 1356
1328 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 1357 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
1329 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 1358 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
1330 1359
1360 kfree(session->targetname);
1361
1331 iscsi_destroy_session(cls_session); 1362 iscsi_destroy_session(cls_session);
1332 scsi_host_put(shost); 1363 scsi_host_put(shost);
1333 module_put(cls_session->transport->owner); 1364 module_put(owner);
1334} 1365}
1335EXPORT_SYMBOL_GPL(iscsi_session_teardown); 1366EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1336 1367
@@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1361 conn->tmabort_state = TMABORT_INITIAL; 1392 conn->tmabort_state = TMABORT_INITIAL;
1362 INIT_LIST_HEAD(&conn->run_list); 1393 INIT_LIST_HEAD(&conn->run_list);
1363 INIT_LIST_HEAD(&conn->mgmt_run_list); 1394 INIT_LIST_HEAD(&conn->mgmt_run_list);
1364 1395 INIT_LIST_HEAD(&conn->xmitqueue);
1365 /* initialize general xmit PDU commands queue */
1366 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
1367 GFP_KERNEL, NULL);
1368 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
1369 goto xmitqueue_alloc_fail;
1370 1396
1371 /* initialize general immediate & non-immediate PDU commands queue */ 1397 /* initialize general immediate & non-immediate PDU commands queue */
1372 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), 1398 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
@@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1394 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL); 1420 data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
1395 if (!data) 1421 if (!data)
1396 goto login_mtask_data_alloc_fail; 1422 goto login_mtask_data_alloc_fail;
1397 conn->login_mtask->data = data; 1423 conn->login_mtask->data = conn->data = data;
1398 1424
1399 init_timer(&conn->tmabort_timer); 1425 init_timer(&conn->tmabort_timer);
1400 mutex_init(&conn->xmitmutex); 1426 mutex_init(&conn->xmitmutex);
@@ -1410,8 +1436,6 @@ login_mtask_alloc_fail:
1410mgmtqueue_alloc_fail: 1436mgmtqueue_alloc_fail:
1411 kfifo_free(conn->immqueue); 1437 kfifo_free(conn->immqueue);
1412immqueue_alloc_fail: 1438immqueue_alloc_fail:
1413 kfifo_free(conn->xmitqueue);
1414xmitqueue_alloc_fail:
1415 iscsi_destroy_conn(cls_conn); 1439 iscsi_destroy_conn(cls_conn);
1416 return NULL; 1440 return NULL;
1417} 1441}
@@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1432 1456
1433 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1457 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1434 mutex_lock(&conn->xmitmutex); 1458 mutex_lock(&conn->xmitmutex);
1435 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) {
1436 if (session->tt->suspend_conn_recv)
1437 session->tt->suspend_conn_recv(conn);
1438
1439 session->tt->terminate_conn(conn);
1440 }
1441 1459
1442 spin_lock_bh(&session->lock); 1460 spin_lock_bh(&session->lock);
1443 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 1461 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
@@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1474 } 1492 }
1475 1493
1476 spin_lock_bh(&session->lock); 1494 spin_lock_bh(&session->lock);
1477 kfree(conn->login_mtask->data); 1495 kfree(conn->data);
1496 kfree(conn->persistent_address);
1478 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask, 1497 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
1479 sizeof(void*)); 1498 sizeof(void*));
1480 list_del(&conn->item); 1499 list_del(&conn->item);
@@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1489 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1; 1508 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
1490 spin_unlock_bh(&session->lock); 1509 spin_unlock_bh(&session->lock);
1491 1510
1492 kfifo_free(conn->xmitqueue);
1493 kfifo_free(conn->immqueue); 1511 kfifo_free(conn->immqueue);
1494 kfifo_free(conn->mgmtqueue); 1512 kfifo_free(conn->mgmtqueue);
1495 1513
@@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn)
1572 struct iscsi_cmd_task *ctask, *tmp; 1590 struct iscsi_cmd_task *ctask, *tmp;
1573 1591
1574 /* flush pending */ 1592 /* flush pending */
1575 while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) { 1593 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
1576 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc, 1594 debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
1577 ctask->itt); 1595 ctask->itt);
1578 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1596 fail_command(conn, ctask, DID_BUS_BUSY << 16);
@@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1615 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1633 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1616 spin_unlock_bh(&session->lock); 1634 spin_unlock_bh(&session->lock);
1617 1635
1618 if (session->tt->suspend_conn_recv) 1636 write_lock_bh(conn->recv_lock);
1619 session->tt->suspend_conn_recv(conn); 1637 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1638 write_unlock_bh(conn->recv_lock);
1620 1639
1621 mutex_lock(&conn->xmitmutex); 1640 mutex_lock(&conn->xmitmutex);
1622 /* 1641 /*
@@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1635 } 1654 }
1636 } 1655 }
1637 1656
1638 session->tt->terminate_conn(conn);
1639 /* 1657 /*
1640 * flush queues. 1658 * flush queues.
1641 */ 1659 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c68cdd8736f..d384c16f4a87 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -222,7 +222,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK; 222 pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
223 pmboxq->mb.mbxOwner = OWN_HOST; 223 pmboxq->mb.mbxOwner = OWN_HOST;
224 224
225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 225 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
226 226
227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { 227 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 228 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
@@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
884 phba->sysfs_mbox.mbox == NULL ) { 884 phba->sysfs_mbox.mbox == NULL ) {
885 sysfs_mbox_idle(phba); 885 sysfs_mbox_idle(phba);
886 spin_unlock_irq(host->host_lock); 886 spin_unlock_irq(host->host_lock);
887 return -EINVAL; 887 return -EAGAIN;
888 } 888 }
889 } 889 }
890 890
@@ -1000,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1000 spin_unlock_irq(phba->host->host_lock); 1000 spin_unlock_irq(phba->host->host_lock);
1001 rc = lpfc_sli_issue_mbox_wait (phba, 1001 rc = lpfc_sli_issue_mbox_wait (phba,
1002 phba->sysfs_mbox.mbox, 1002 phba->sysfs_mbox.mbox,
1003 phba->fc_ratov * 2); 1003 lpfc_mbox_tmo_val(phba,
1004 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
1004 spin_lock_irq(phba->host->host_lock); 1005 spin_lock_irq(phba->host->host_lock);
1005 } 1006 }
1006 1007
1007 if (rc != MBX_SUCCESS) { 1008 if (rc != MBX_SUCCESS) {
1008 sysfs_mbox_idle(phba); 1009 sysfs_mbox_idle(phba);
1009 spin_unlock_irq(host->host_lock); 1010 spin_unlock_irq(host->host_lock);
1010 return -ENODEV; 1011 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
1011 } 1012 }
1012 phba->sysfs_mbox.state = SMBOX_READING; 1013 phba->sysfs_mbox.state = SMBOX_READING;
1013 } 1014 }
@@ -1016,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
1016 printk(KERN_WARNING "mbox_read: Bad State\n"); 1017 printk(KERN_WARNING "mbox_read: Bad State\n");
1017 sysfs_mbox_idle(phba); 1018 sysfs_mbox_idle(phba);
1018 spin_unlock_irq(host->host_lock); 1019 spin_unlock_irq(host->host_lock);
1019 return -EINVAL; 1020 return -EAGAIN;
1020 } 1021 }
1021 1022
1022 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); 1023 memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
@@ -1210,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
1210 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1211 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1211 struct lpfc_sli *psli = &phba->sli; 1212 struct lpfc_sli *psli = &phba->sli;
1212 struct fc_host_statistics *hs = &phba->link_stats; 1213 struct fc_host_statistics *hs = &phba->link_stats;
1214 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1213 LPFC_MBOXQ_t *pmboxq; 1215 LPFC_MBOXQ_t *pmboxq;
1214 MAILBOX_t *pmb; 1216 MAILBOX_t *pmb;
1217 unsigned long seconds;
1215 int rc = 0; 1218 int rc = 0;
1216 1219
1217 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1220 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1272,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost)
1272 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt; 1275 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1273 hs->error_frames = pmb->un.varRdLnk.crcCnt; 1276 hs->error_frames = pmb->un.varRdLnk.crcCnt;
1274 1277
1278 hs->link_failure_count -= lso->link_failure_count;
1279 hs->loss_of_sync_count -= lso->loss_of_sync_count;
1280 hs->loss_of_signal_count -= lso->loss_of_signal_count;
1281 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
1282 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
1283 hs->invalid_crc_count -= lso->invalid_crc_count;
1284 hs->error_frames -= lso->error_frames;
1285
1275 if (phba->fc_topology == TOPOLOGY_LOOP) { 1286 if (phba->fc_topology == TOPOLOGY_LOOP) {
1276 hs->lip_count = (phba->fc_eventTag >> 1); 1287 hs->lip_count = (phba->fc_eventTag >> 1);
1288 hs->lip_count -= lso->link_events;
1277 hs->nos_count = -1; 1289 hs->nos_count = -1;
1278 } else { 1290 } else {
1279 hs->lip_count = -1; 1291 hs->lip_count = -1;
1280 hs->nos_count = (phba->fc_eventTag >> 1); 1292 hs->nos_count = (phba->fc_eventTag >> 1);
1293 hs->nos_count -= lso->link_events;
1281 } 1294 }
1282 1295
1283 hs->dumped_frames = -1; 1296 hs->dumped_frames = -1;
1284 1297
1285/* FIX ME */ 1298 seconds = get_seconds();
1286 /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/ 1299 if (seconds < psli->stats_start)
1300 hs->seconds_since_last_reset = seconds +
1301 ((unsigned long)-1 - psli->stats_start);
1302 else
1303 hs->seconds_since_last_reset = seconds - psli->stats_start;
1287 1304
1288 return hs; 1305 return hs;
1289} 1306}
1290 1307
1308static void
1309lpfc_reset_stats(struct Scsi_Host *shost)
1310{
1311 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
1312 struct lpfc_sli *psli = &phba->sli;
1313 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1314 LPFC_MBOXQ_t *pmboxq;
1315 MAILBOX_t *pmb;
1316 int rc = 0;
1317
1318 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1319 if (!pmboxq)
1320 return;
1321 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1322
1323 pmb = &pmboxq->mb;
1324 pmb->mbxCommand = MBX_READ_STATUS;
1325 pmb->mbxOwner = OWN_HOST;
1326 pmb->un.varWords[0] = 0x1; /* reset request */
1327 pmboxq->context1 = NULL;
1328
1329 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1330 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1331 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1332 else
1333 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1334
1335 if (rc != MBX_SUCCESS) {
1336 if (rc == MBX_TIMEOUT)
1337 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1338 else
1339 mempool_free(pmboxq, phba->mbox_mem_pool);
1340 return;
1341 }
1342
1343 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1344 pmb->mbxCommand = MBX_READ_LNK_STAT;
1345 pmb->mbxOwner = OWN_HOST;
1346 pmboxq->context1 = NULL;
1347
1348 if ((phba->fc_flag & FC_OFFLINE_MODE) ||
1349 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1350 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1351 else
1352 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1353
1354 if (rc != MBX_SUCCESS) {
1355 if (rc == MBX_TIMEOUT)
1356 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1357 else
1358 mempool_free( pmboxq, phba->mbox_mem_pool);
1359 return;
1360 }
1361
1362 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
1363 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
1364 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
1365 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
1366 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
1367 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
1368 lso->error_frames = pmb->un.varRdLnk.crcCnt;
1369 lso->link_events = (phba->fc_eventTag >> 1);
1370
1371 psli->stats_start = get_seconds();
1372
1373 return;
1374}
1291 1375
1292/* 1376/*
1293 * The LPFC driver treats linkdown handling as target loss events so there 1377 * The LPFC driver treats linkdown handling as target loss events so there
@@ -1431,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = {
1431 */ 1515 */
1432 1516
1433 .get_fc_host_stats = lpfc_get_stats, 1517 .get_fc_host_stats = lpfc_get_stats,
1434 1518 .reset_fc_host_stats = lpfc_reset_stats,
1435 /* the LPFC driver doesn't support resetting stats yet */
1436 1519
1437 .dd_fcrport_size = sizeof(struct lpfc_rport_data), 1520 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
1438 .show_rport_maxframe_size = 1, 1521 .show_rport_maxframe_size = 1,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 517e9e4dd461..2a176467f71b 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
127void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 127void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
128void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 128void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
129LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 129LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
130int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
130 131
131int lpfc_mem_alloc(struct lpfc_hba *); 132int lpfc_mem_alloc(struct lpfc_hba *);
132void lpfc_mem_free(struct lpfc_hba *); 133void lpfc_mem_free(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index b65ee57af53e..bbb7310210b0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
131 } 131 }
132 132
133ct_unsol_event_exit_piocbq: 133ct_unsol_event_exit_piocbq:
134 list_del(&head);
134 if (pmbuf) { 135 if (pmbuf) {
135 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { 136 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
136 lpfc_mbuf_free(phba, matp->virt, matp->phys); 137 lpfc_mbuf_free(phba, matp->virt, matp->phys);
@@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
481 if (CTrsp->CommandResponse.bits.CmdRsp == 482 if (CTrsp->CommandResponse.bits.CmdRsp ==
482 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { 483 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
483 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 484 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
484 "%d:0239 NameServer Rsp " 485 "%d:0208 NameServer Rsp "
485 "Data: x%x\n", 486 "Data: x%x\n",
486 phba->brd_no, 487 phba->brd_no,
487 phba->fc_flag); 488 phba->fc_flag);
@@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
588 589
589 lpfc_decode_firmware_rev(phba, fwrev, 0); 590 lpfc_decode_firmware_rev(phba, fwrev, 0);
590 591
591 if (phba->Port[0]) { 592 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
592 sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName, 593 fwrev, lpfc_release_version);
593 phba->Port, fwrev, lpfc_release_version); 594 return;
594 } else {
595 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
596 fwrev, lpfc_release_version);
597 }
598} 595}
599 596
600/* 597/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b89f6cb641e6..3567de613162 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1848,9 +1848,12 @@ static void
1848lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1848lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1849 struct lpfc_iocbq * rspiocb) 1849 struct lpfc_iocbq * rspiocb)
1850{ 1850{
1851 IOCB_t *irsp;
1851 struct lpfc_nodelist *ndlp; 1852 struct lpfc_nodelist *ndlp;
1852 LPFC_MBOXQ_t *mbox = NULL; 1853 LPFC_MBOXQ_t *mbox = NULL;
1853 1854
1855 irsp = &rspiocb->iocb;
1856
1854 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1857 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1855 if (cmdiocb->context_un.mbox) 1858 if (cmdiocb->context_un.mbox)
1856 mbox = cmdiocb->context_un.mbox; 1859 mbox = cmdiocb->context_un.mbox;
@@ -1893,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1893 mempool_free( mbox, phba->mbox_mem_pool); 1896 mempool_free( mbox, phba->mbox_mem_pool);
1894 } else { 1897 } else {
1895 mempool_free( mbox, phba->mbox_mem_pool); 1898 mempool_free( mbox, phba->mbox_mem_pool);
1896 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 1899 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
1897 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); 1900 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1898 ndlp = NULL; 1901 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1902 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1903 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
1904 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1905 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1906 ndlp = NULL;
1907 }
1899 } 1908 }
1900 } 1909 }
1901 } 1910 }
@@ -2839,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2839 2848
2840 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 2849 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2841 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2850 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2842 "%d:0128 Xmit ELS RPS ACC response tag x%x " 2851 "%d:0118 Xmit ELS RPS ACC response tag x%x "
2843 "Data: x%x x%x x%x x%x x%x\n", 2852 "Data: x%x x%x x%x x%x x%x\n",
2844 phba->brd_no, 2853 phba->brd_no,
2845 elsiocb->iocb.ulpIoTag, 2854 elsiocb->iocb.ulpIoTag,
@@ -2948,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2948 2957
2949 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 2958 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2950 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2959 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2951 "%d:0128 Xmit ELS RPL ACC response tag x%x " 2960 "%d:0120 Xmit ELS RPL ACC response tag x%x "
2952 "Data: x%x x%x x%x x%x x%x\n", 2961 "Data: x%x x%x x%x x%x x%x\n",
2953 phba->brd_no, 2962 phba->brd_no,
2954 elsiocb->iocb.ulpIoTag, 2963 elsiocb->iocb.ulpIoTag,
@@ -3109,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3109 struct lpfc_nodelist *ndlp, *next_ndlp; 3118 struct lpfc_nodelist *ndlp, *next_ndlp;
3110 3119
3111 /* FAN received */ 3120 /* FAN received */
3112 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n", 3121 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
3113 phba->brd_no); 3122 phba->brd_no);
3114 3123
3115 icmd = &cmdiocb->iocb; 3124 icmd = &cmdiocb->iocb;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4d6cf990c4fc..b2f1552f1848 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1557 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1557 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1558 } 1558 }
1559 } 1559 }
1560
1561 spin_lock_irq(phba->host->host_lock);
1560 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1562 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1561 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1563 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1562 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1564 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1569 mempool_free(mb, phba->mbox_mem_pool); 1571 mempool_free(mb, phba->mbox_mem_pool);
1570 } 1572 }
1571 } 1573 }
1574 spin_unlock_irq(phba->host->host_lock);
1572 1575
1573 lpfc_els_abort(phba,ndlp,0); 1576 lpfc_els_abort(phba,ndlp,0);
1574 spin_lock_irq(phba->host->host_lock); 1577 spin_lock_irq(phba->host->host_lock);
@@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1782 /* LOG change to REGLOGIN */ 1785 /* LOG change to REGLOGIN */
1783 /* FIND node DID reglogin */ 1786 /* FIND node DID reglogin */
1784 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1787 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1785 "%d:0931 FIND node DID reglogin" 1788 "%d:0901 FIND node DID reglogin"
1786 " Data: x%p x%x x%x x%x\n", 1789 " Data: x%p x%x x%x x%x\n",
1787 phba->brd_no, 1790 phba->brd_no,
1788 ndlp, ndlp->nlp_DID, 1791 ndlp, ndlp->nlp_DID,
@@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1805 /* LOG change to PRLI */ 1808 /* LOG change to PRLI */
1806 /* FIND node DID prli */ 1809 /* FIND node DID prli */
1807 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1810 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1808 "%d:0931 FIND node DID prli " 1811 "%d:0902 FIND node DID prli "
1809 "Data: x%p x%x x%x x%x\n", 1812 "Data: x%p x%x x%x x%x\n",
1810 phba->brd_no, 1813 phba->brd_no,
1811 ndlp, ndlp->nlp_DID, 1814 ndlp, ndlp->nlp_DID,
@@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1828 /* LOG change to NPR */ 1831 /* LOG change to NPR */
1829 /* FIND node DID npr */ 1832 /* FIND node DID npr */
1830 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1833 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1831 "%d:0931 FIND node DID npr " 1834 "%d:0903 FIND node DID npr "
1832 "Data: x%p x%x x%x x%x\n", 1835 "Data: x%p x%x x%x x%x\n",
1833 phba->brd_no, 1836 phba->brd_no,
1834 ndlp, ndlp->nlp_DID, 1837 ndlp, ndlp->nlp_DID,
@@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1851 /* LOG change to UNUSED */ 1854 /* LOG change to UNUSED */
1852 /* FIND node DID unused */ 1855 /* FIND node DID unused */
1853 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1856 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1854 "%d:0931 FIND node DID unused " 1857 "%d:0905 FIND node DID unused "
1855 "Data: x%p x%x x%x x%x\n", 1858 "Data: x%p x%x x%x x%x\n",
1856 phba->brd_no, 1859 phba->brd_no,
1857 ndlp, ndlp->nlp_DID, 1860 ndlp, ndlp->nlp_DID,
@@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2335 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2338 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2336 if (!initlinkmbox) { 2339 if (!initlinkmbox) {
2337 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2340 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2338 "%d:0226 Device Discovery " 2341 "%d:0206 Device Discovery "
2339 "completion error\n", 2342 "completion error\n",
2340 phba->brd_no); 2343 phba->brd_no);
2341 phba->hba_state = LPFC_HBA_ERROR; 2344 phba->hba_state = LPFC_HBA_ERROR;
@@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2365 if (!clearlambox) { 2368 if (!clearlambox) {
2366 clrlaerr = 1; 2369 clrlaerr = 1;
2367 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2370 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2368 "%d:0226 Device Discovery " 2371 "%d:0207 Device Discovery "
2369 "completion error\n", 2372 "completion error\n",
2370 phba->brd_no); 2373 phba->brd_no);
2371 phba->hba_state = LPFC_HBA_ERROR; 2374 phba->hba_state = LPFC_HBA_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ef47b824cbed..f6948ffe689a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1379,6 +1379,7 @@ lpfc_offline(struct lpfc_hba * phba)
1379 /* stop all timers associated with this hba */ 1379 /* stop all timers associated with this hba */
1380 lpfc_stop_timer(phba); 1380 lpfc_stop_timer(phba);
1381 phba->work_hba_events = 0; 1381 phba->work_hba_events = 0;
1382 phba->work_ha = 0;
1382 1383
1383 lpfc_printf_log(phba, 1384 lpfc_printf_log(phba,
1384 KERN_WARNING, 1385 KERN_WARNING,
@@ -1616,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1616 goto out_free_iocbq; 1617 goto out_free_iocbq;
1617 } 1618 }
1618 1619
1619 /* We can rely on a queue depth attribute only after SLI HBA setup */ 1620 /*
1621 * Set initial can_queue value since 0 is no longer supported and
1622 * scsi_add_host will fail. This will be adjusted later based on the
1623 * max xri value determined in hba setup.
1624 */
1620 host->can_queue = phba->cfg_hba_queue_depth - 10; 1625 host->can_queue = phba->cfg_hba_queue_depth - 10;
1621 1626
1622 /* Tell the midlayer we support 16 byte commands */ 1627 /* Tell the midlayer we support 16 byte commands */
@@ -1656,6 +1661,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1656 goto out_free_irq; 1661 goto out_free_irq;
1657 } 1662 }
1658 1663
1664 /*
1665 * hba setup may have changed the hba_queue_depth so we need to adjust
1666 * the value of can_queue.
1667 */
1668 host->can_queue = phba->cfg_hba_queue_depth - 10;
1669
1659 lpfc_discovery_wait(phba); 1670 lpfc_discovery_wait(phba);
1660 1671
1661 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1672 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e42f22aaf71b..4d016c2a1b26 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba)
651 651
652 return mbq; 652 return mbq;
653} 653}
654
655int
656lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
657{
658 switch (cmd) {
659 case MBX_WRITE_NV: /* 0x03 */
660 case MBX_UPDATE_CFG: /* 0x1B */
661 case MBX_DOWN_LOAD: /* 0x1C */
662 case MBX_DEL_LD_ENTRY: /* 0x1D */
663 case MBX_LOAD_AREA: /* 0x81 */
664 case MBX_FLASH_WR_ULA: /* 0x98 */
665 case MBX_LOAD_EXP_ROM: /* 0x9C */
666 return LPFC_MBOX_TMO_FLASH_CMD;
667 }
668 return LPFC_MBOX_TMO;
669}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd0b0e293d63..20449a8dd53d 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
179 179
180 /* Abort outstanding I/O on NPort <nlp_DID> */ 180 /* Abort outstanding I/O on NPort <nlp_DID> */
181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 181 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
182 "%d:0201 Abort outstanding I/O on NPort x%x " 182 "%d:0205 Abort outstanding I/O on NPort x%x "
183 "Data: x%x x%x x%x\n", 183 "Data: x%x x%x x%x\n",
184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 184 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
185 ndlp->nlp_state, ndlp->nlp_rpi); 185 ndlp->nlp_state, ndlp->nlp_rpi);
@@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
393 mbox->context2 = ndlp; 393 mbox->context2 = ndlp;
394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 394 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
395 395
396 /*
397 * If there is an outstanding PLOGI issued, abort it before
398 * sending ACC rsp for received PLOGI. If pending plogi
399 * is not canceled here, the plogi will be rejected by
400 * remote port and will be retried. On a configuration with
401 * single discovery thread, this will cause a huge delay in
402 * discovery. Also this will cause multiple state machines
403 * running in parallel for this node.
404 */
405 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
406 /* software abort outstanding PLOGI */
407 lpfc_els_abort(phba, ndlp, 1);
408 }
409
396 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); 410 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
397 return 1; 411 return 1;
398 412
@@ -1601,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1601 1615
1602 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1616 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1603 1617
1604 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1618 /*
1619 * Do not start discovery if discovery is about to start
1620 * or discovery in progress for this node. Starting discovery
1621 * here will affect the counting of discovery threads.
1622 */
1623 if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
1624 (ndlp->nlp_flag & NLP_NPR_2B_DISC)){
1605 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1625 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1606 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1626 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1607 ndlp->nlp_state = NLP_STE_ADISC_ISSUE; 1627 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a760a44173df..a8816a8738f8 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/delay.h>
24 25
25#include <scsi/scsi.h> 26#include <scsi/scsi.h>
26#include <scsi/scsi_device.h> 27#include <scsi/scsi_device.h>
@@ -841,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
841 return 0; 842 return 0;
842} 843}
843 844
845static void
846lpfc_block_error_handler(struct scsi_cmnd *cmnd)
847{
848 struct Scsi_Host *shost = cmnd->device->host;
849 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
850
851 spin_lock_irq(shost->host_lock);
852 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
853 spin_unlock_irq(shost->host_lock);
854 msleep(1000);
855 spin_lock_irq(shost->host_lock);
856 }
857 spin_unlock_irq(shost->host_lock);
858 return;
859}
844 860
845static int 861static int
846lpfc_abort_handler(struct scsi_cmnd *cmnd) 862lpfc_abort_handler(struct scsi_cmnd *cmnd)
@@ -855,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
855 unsigned int loop_count = 0; 871 unsigned int loop_count = 0;
856 int ret = SUCCESS; 872 int ret = SUCCESS;
857 873
874 lpfc_block_error_handler(cmnd);
858 spin_lock_irq(shost->host_lock); 875 spin_lock_irq(shost->host_lock);
859 876
860 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 877 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -957,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
957 int ret = FAILED; 974 int ret = FAILED;
958 int cnt, loopcnt; 975 int cnt, loopcnt;
959 976
977 lpfc_block_error_handler(cmnd);
960 spin_lock_irq(shost->host_lock); 978 spin_lock_irq(shost->host_lock);
961 /* 979 /*
962 * If target is not in a MAPPED state, delay the reset until 980 * If target is not in a MAPPED state, delay the reset until
@@ -1073,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1073 int cnt, loopcnt; 1091 int cnt, loopcnt;
1074 struct lpfc_scsi_buf * lpfc_cmd; 1092 struct lpfc_scsi_buf * lpfc_cmd;
1075 1093
1094 lpfc_block_error_handler(cmnd);
1076 spin_lock_irq(shost->host_lock); 1095 spin_lock_irq(shost->host_lock);
1077 1096
1078 lpfc_cmd = lpfc_get_scsi_buf(phba); 1097 lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -1104,7 +1123,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1104 ndlp->rport->dd_data); 1123 ndlp->rport->dd_data);
1105 if (ret != SUCCESS) { 1124 if (ret != SUCCESS) {
1106 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1125 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1107 "%d:0713 Bus Reset on target %d failed\n", 1126 "%d:0700 Bus Reset on target %d failed\n",
1108 phba->brd_no, i); 1127 phba->brd_no, i);
1109 err_count++; 1128 err_count++;
1110 } 1129 }
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 350a625fa224..70f4d5a1348e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
320 kfree(old_arr); 320 kfree(old_arr);
321 return iotag; 321 return iotag;
322 } 322 }
323 } 323 } else
324 spin_unlock_irq(phba->host->host_lock);
324 325
325 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 326 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
326 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 327 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -969,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
969 * resources need to be recovered. 970 * resources need to be recovered.
970 */ 971 */
971 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 972 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
972 printk(KERN_INFO "%s: IOCB cmd 0x%x processed." 973 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
973 " Skipping completion\n", __FUNCTION__, 974 "%d:0314 IOCB cmd 0x%x"
974 irsp->ulpCommand); 975 " processed. Skipping"
976 " completion", phba->brd_no,
977 irsp->ulpCommand);
975 break; 978 break;
976 } 979 }
977 980
@@ -1104,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1104 if (unlikely(irsp->ulpStatus)) { 1107 if (unlikely(irsp->ulpStatus)) {
1105 /* Rsp ring <ringno> error: IOCB */ 1108 /* Rsp ring <ringno> error: IOCB */
1106 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1109 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1107 "%d:0326 Rsp Ring %d error: IOCB Data: " 1110 "%d:0336 Rsp Ring %d error: IOCB Data: "
1108 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1111 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1109 phba->brd_no, pring->ringno, 1112 phba->brd_no, pring->ringno,
1110 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1113 irsp->un.ulpWord[0], irsp->un.ulpWord[1],
@@ -1122,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1122 * resources need to be recovered. 1125 * resources need to be recovered.
1123 */ 1126 */
1124 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1127 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1125 printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " 1128 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1126 "Skipping completion\n", __FUNCTION__, 1129 "%d:0333 IOCB cmd 0x%x"
1127 irsp->ulpCommand); 1130 " processed. Skipping"
1131 " completion\n", phba->brd_no,
1132 irsp->ulpCommand);
1128 break; 1133 break;
1129 } 1134 }
1130 1135
@@ -1155,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1155 } else { 1160 } else {
1156 /* Unknown IOCB command */ 1161 /* Unknown IOCB command */
1157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1158 "%d:0321 Unknown IOCB command " 1163 "%d:0334 Unknown IOCB command "
1159 "Data: x%x, x%x x%x x%x x%x\n", 1164 "Data: x%x, x%x x%x x%x x%x\n",
1160 phba->brd_no, type, irsp->ulpCommand, 1165 phba->brd_no, type, irsp->ulpCommand,
1161 irsp->ulpStatus, irsp->ulpIoTag, 1166 irsp->ulpStatus, irsp->ulpIoTag,
@@ -1238,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1238 lpfc_printf_log(phba, 1243 lpfc_printf_log(phba,
1239 KERN_ERR, 1244 KERN_ERR,
1240 LOG_SLI, 1245 LOG_SLI,
1241 "%d:0312 Ring %d handler: portRspPut %d " 1246 "%d:0303 Ring %d handler: portRspPut %d "
1242 "is bigger then rsp ring %d\n", 1247 "is bigger then rsp ring %d\n",
1243 phba->brd_no, 1248 phba->brd_no,
1244 pring->ringno, portRspPut, portRspMax); 1249 pring->ringno, portRspPut, portRspMax);
@@ -1383,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1383 lpfc_printf_log(phba, 1388 lpfc_printf_log(phba,
1384 KERN_ERR, 1389 KERN_ERR,
1385 LOG_SLI, 1390 LOG_SLI,
1386 "%d:0321 Unknown IOCB command " 1391 "%d:0335 Unknown IOCB command "
1387 "Data: x%x x%x x%x x%x\n", 1392 "Data: x%x x%x x%x x%x\n",
1388 phba->brd_no, 1393 phba->brd_no,
1389 irsp->ulpCommand, 1394 irsp->ulpCommand,
@@ -1399,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1399 next_iocb, 1404 next_iocb,
1400 &saveq->list, 1405 &saveq->list,
1401 list) { 1406 list) {
1407 list_del(&rspiocbp->list);
1402 lpfc_sli_release_iocbq(phba, 1408 lpfc_sli_release_iocbq(phba,
1403 rspiocbp); 1409 rspiocbp);
1404 } 1410 }
1405 } 1411 }
1406
1407 lpfc_sli_release_iocbq(phba, saveq); 1412 lpfc_sli_release_iocbq(phba, saveq);
1408 } 1413 }
1409 } 1414 }
@@ -1711,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
1711 phba->fc_myDID = 0; 1716 phba->fc_myDID = 0;
1712 phba->fc_prevDID = 0; 1717 phba->fc_prevDID = 0;
1713 1718
1714 psli->sli_flag = 0;
1715
1716 /* Turn off parity checking and serr during the physical reset */ 1719 /* Turn off parity checking and serr during the physical reset */
1717 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1720 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1718 pci_write_config_word(phba->pcidev, PCI_COMMAND, 1721 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1719 (cfg_value & 1722 (cfg_value &
1720 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 1723 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1721 1724
1722 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1725 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1723 /* Now toggle INITFF bit in the Host Control Register */ 1726 /* Now toggle INITFF bit in the Host Control Register */
1724 writel(HC_INITFF, phba->HCregaddr); 1727 writel(HC_INITFF, phba->HCregaddr);
1725 mdelay(1); 1728 mdelay(1);
@@ -1760,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1760 1763
1761 /* Restart HBA */ 1764 /* Restart HBA */
1762 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1765 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1763 "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no, 1766 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
1764 phba->hba_state, psli->sli_flag); 1767 phba->hba_state, psli->sli_flag);
1765 1768
1766 word0 = 0; 1769 word0 = 0;
@@ -1792,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1792 1795
1793 spin_unlock_irq(phba->host->host_lock); 1796 spin_unlock_irq(phba->host->host_lock);
1794 1797
1798 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1799 psli->stats_start = get_seconds();
1800
1795 if (skip_post) 1801 if (skip_post)
1796 mdelay(100); 1802 mdelay(100);
1797 else 1803 else
@@ -1902,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
1902 } 1908 }
1903 1909
1904 while (resetcount < 2 && !done) { 1910 while (resetcount < 2 && !done) {
1911 spin_lock_irq(phba->host->host_lock);
1912 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1913 spin_unlock_irq(phba->host->host_lock);
1905 phba->hba_state = LPFC_STATE_UNKNOWN; 1914 phba->hba_state = LPFC_STATE_UNKNOWN;
1906 lpfc_sli_brdrestart(phba); 1915 lpfc_sli_brdrestart(phba);
1907 msleep(2500); 1916 msleep(2500);
@@ -1909,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
1909 if (rc) 1918 if (rc)
1910 break; 1919 break;
1911 1920
1921 spin_lock_irq(phba->host->host_lock);
1922 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1923 spin_unlock_irq(phba->host->host_lock);
1912 resetcount++; 1924 resetcount++;
1913 1925
1914 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 1926 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
@@ -2194,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2194 return (MBX_NOT_FINISHED); 2206 return (MBX_NOT_FINISHED);
2195 } 2207 }
2196 /* timeout active mbox command */ 2208 /* timeout active mbox command */
2197 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO); 2209 mod_timer(&psli->mbox_tmo, (jiffies +
2210 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2198 } 2211 }
2199 2212
2200 /* Mailbox cmd <cmd> issue */ 2213 /* Mailbox cmd <cmd> issue */
@@ -2254,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2254 break; 2267 break;
2255 2268
2256 case MBX_POLL: 2269 case MBX_POLL:
2257 i = 0;
2258 psli->mbox_active = NULL; 2270 psli->mbox_active = NULL;
2259 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2271 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2260 /* First read mbox status word */ 2272 /* First read mbox status word */
@@ -2268,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2268 /* Read the HBA Host Attention Register */ 2280 /* Read the HBA Host Attention Register */
2269 ha_copy = readl(phba->HAregaddr); 2281 ha_copy = readl(phba->HAregaddr);
2270 2282
2283 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2284 i *= 1000; /* Convert to ms */
2285
2271 /* Wait for command to complete */ 2286 /* Wait for command to complete */
2272 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2287 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2273 (!(ha_copy & HA_MBATT) && 2288 (!(ha_copy & HA_MBATT) &&
2274 (phba->hba_state > LPFC_WARM_START))) { 2289 (phba->hba_state > LPFC_WARM_START))) {
2275 if (i++ >= 100) { 2290 if (i-- <= 0) {
2276 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2291 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2277 spin_unlock_irqrestore(phba->host->host_lock, 2292 spin_unlock_irqrestore(phba->host->host_lock,
2278 drvr_flag); 2293 drvr_flag);
@@ -2290,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2290 2305
2291 /* Can be in interrupt context, do not sleep */ 2306 /* Can be in interrupt context, do not sleep */
2292 /* (or might be called with interrupts disabled) */ 2307 /* (or might be called with interrupts disabled) */
2293 mdelay(i); 2308 mdelay(1);
2294 2309
2295 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2310 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2296 2311
@@ -3005,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3005 3020
3006 if (timeleft == 0) { 3021 if (timeleft == 0) {
3007 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3008 "%d:0329 IOCB wait timeout error - no " 3023 "%d:0338 IOCB wait timeout error - no "
3009 "wake response Data x%x\n", 3024 "wake response Data x%x\n",
3010 phba->brd_no, timeout); 3025 phba->brd_no, timeout);
3011 retval = IOCB_TIMEDOUT; 3026 retval = IOCB_TIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index d8ef0d2894d4..e26de6809358 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -172,6 +172,18 @@ struct lpfc_sli_stat {
172 uint32_t mbox_busy; /* Mailbox cmd busy */ 172 uint32_t mbox_busy; /* Mailbox cmd busy */
173}; 173};
174 174
175/* Structure to store link status values when port stats are reset */
176struct lpfc_lnk_stat {
177 uint32_t link_failure_count;
178 uint32_t loss_of_sync_count;
179 uint32_t loss_of_signal_count;
180 uint32_t prim_seq_protocol_err_count;
181 uint32_t invalid_tx_word_count;
182 uint32_t invalid_crc_count;
183 uint32_t error_frames;
184 uint32_t link_events;
185};
186
175/* Structure used to hold SLI information */ 187/* Structure used to hold SLI information */
176struct lpfc_sli { 188struct lpfc_sli {
177 uint32_t num_rings; 189 uint32_t num_rings;
@@ -201,6 +213,8 @@ struct lpfc_sli {
201 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ 213 struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
202 size_t iocbq_lookup_len; /* current lengs of the array */ 214 size_t iocbq_lookup_len; /* current lengs of the array */
203 uint16_t last_iotag; /* last allocated IOTAG */ 215 uint16_t last_iotag; /* last allocated IOTAG */
216 unsigned long stats_start; /* in seconds */
217 struct lpfc_lnk_stat lnk_stat_offsets;
204}; 218};
205 219
206/* Given a pointer to the start of the ring, and the slot number of 220/* Given a pointer to the start of the ring, and the slot number of
@@ -211,3 +225,9 @@ struct lpfc_sli {
211 225
212#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 226#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
213 command */ 227 command */
228#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
229 * or erase cmds. This is especially
230 * long because of the potential of
231 * multiple flash erases that can be
232 * spawned.
233 */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 10e89c6ae823..c7091ea29f3f 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.7" 21#define LPFC_DRIVER_VERSION "8.1.9"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 4675343228ad..8cd0bd1d0f7c 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -37,6 +37,12 @@
37#define LSI_MAX_CHANNELS 16 37#define LSI_MAX_CHANNELS 16
38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) 38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
39 39
40#define HBA_SIGNATURE_64_BIT 0x299
41#define PCI_CONF_AMISIG64 0xa4
42
43#define MEGA_SCSI_INQ_EVPD 1
44#define MEGA_INVALID_FIELD_IN_CDB 0x24
45
40 46
41/** 47/**
42 * scb_t - scsi command control block 48 * scb_t - scsi command control block
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index bdaee144a1c3..b8aa34202ec3 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -132,6 +132,10 @@ typedef struct uioc {
132/* Driver Data: */ 132/* Driver Data: */
133 void __user * user_data; 133 void __user * user_data;
134 uint32_t user_data_len; 134 uint32_t user_data_len;
135
136 /* 64bit alignment */
137 uint32_t pad_for_64bit_align;
138
135 mraid_passthru_t __user *user_pthru; 139 mraid_passthru_t __user *user_pthru;
136 140
137 mraid_passthru_t *pthru32; 141 mraid_passthru_t *pthru32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 92715130ac09..cd982c877da0 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mbox.c 12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.8 (Apr 11 2006) 13 * Version : v2.20.4.9 (Jul 16 2006)
14 * 14 *
15 * Authors: 15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com> 16 * Atul Mukker <Atul.Mukker@lsil.com>
@@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter)
720 struct pci_dev *pdev; 720 struct pci_dev *pdev;
721 mraid_device_t *raid_dev; 721 mraid_device_t *raid_dev;
722 int i; 722 int i;
723 uint32_t magic64;
723 724
724 725
725 adapter->ito = MBOX_TIMEOUT; 726 adapter->ito = MBOX_TIMEOUT;
@@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter)
863 864
864 // Set the DMA mask to 64-bit. All supported controllers as capable of 865 // Set the DMA mask to 64-bit. All supported controllers as capable of
865 // DMA in this range 866 // DMA in this range
866 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) { 867 pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
867 868
868 con_log(CL_ANN, (KERN_WARNING 869 if (((magic64 == HBA_SIGNATURE_64_BIT) &&
869 "megaraid: could not set DMA mask for 64-bit.\n")); 870 ((adapter->pdev->subsystem_device !=
871 PCI_SUBSYS_ID_MEGARAID_SATA_150_6) ||
872 (adapter->pdev->subsystem_device !=
873 PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
874 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
875 adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
876 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
877 adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
878 (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
879 adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
880 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
881 adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
882 (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
883 adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
884 if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
885 con_log(CL_ANN, (KERN_WARNING
886 "megaraid: DMA mask for 64-bit failed\n"));
870 887
871 goto out_free_sysfs_res; 888 if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) {
889 con_log(CL_ANN, (KERN_WARNING
890 "megaraid: 32-bit DMA mask failed\n"));
891 goto out_free_sysfs_res;
892 }
893 }
872 } 894 }
873 895
874 // setup tasklet for DPC 896 // setup tasklet for DPC
@@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1622 rdev->last_disp |= (1L << SCP2CHANNEL(scp)); 1644 rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1623 } 1645 }
1624 1646
1647 if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
1648 scp->sense_buffer[0] = 0x70;
1649 scp->sense_buffer[2] = ILLEGAL_REQUEST;
1650 scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
1651 scp->result = CHECK_CONDITION << 1;
1652 return NULL;
1653 }
1654
1625 /* Fall through */ 1655 /* Fall through */
1626 1656
1627 case READ_CAPACITY: 1657 case READ_CAPACITY:
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 868fb0ec93e7..2b5a3285f799 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
21#include "megaraid_ioctl.h" 21#include "megaraid_ioctl.h"
22 22
23 23
24#define MEGARAID_VERSION "2.20.4.8" 24#define MEGARAID_VERSION "2.20.4.9"
25#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" 25#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)"
26 26
27 27
28/* 28/*
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index e8f534fb336b..d85b9a8f1b8d 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 * 11 *
12 * FILE : megaraid_mm.c 12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.6 (Mar 7 2005) 13 * Version : v2.20.2.7 (Jul 16 2006)
14 * 14 *
15 * Common management module 15 * Common management module
16 */ 16 */
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index 3d9e67d6849d..c8762b2b8ed1 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -27,9 +27,9 @@
27#include "megaraid_ioctl.h" 27#include "megaraid_ioctl.h"
28 28
29 29
30#define LSI_COMMON_MOD_VERSION "2.20.2.6" 30#define LSI_COMMON_MOD_VERSION "2.20.2.7"
31#define LSI_COMMON_MOD_EXT_VERSION \ 31#define LSI_COMMON_MOD_EXT_VERSION \
32 "(Release Date: Mon Mar 7 00:01:03 EST 2005)" 32 "(Release Date: Sun Jul 16 00:01:03 EST 2006)"
33 33
34 34
35#define LSI_DBGLVL dbglevel 35#define LSI_DBGLVL dbglevel
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index d1f38c32aa15..efc8fff1d250 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -183,7 +183,8 @@ static struct ata_port_info adma_port_info[] = {
183 { 183 {
184 .sht = &adma_ata_sht, 184 .sht = &adma_ata_sht,
185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 185 .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, 186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
187 ATA_FLAG_PIO_POLLING,
187 .pio_mask = 0x10, /* pio4 */ 188 .pio_mask = 0x10, /* pio4 */
188 .udma_mask = 0x1f, /* udma0-4 */ 189 .udma_mask = 0x1f, /* udma0-4 */
189 .port_ops = &adma_ata_ops, 190 .port_ops = &adma_ata_ops,
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 139ea0e27fd7..0930260aec2c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -487,6 +487,7 @@ typedef struct {
487#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */ 487#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */
488#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */ 488#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */
489 /* used. */ 489 /* used. */
490#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */
490#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */ 491#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */
491#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */ 492#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */
492#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */ 493#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9758dba95542..859649160caa 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3063int 3063int
3064qla2x00_abort_isp(scsi_qla_host_t *ha) 3064qla2x00_abort_isp(scsi_qla_host_t *ha)
3065{ 3065{
3066 int rval;
3066 unsigned long flags = 0; 3067 unsigned long flags = 0;
3067 uint16_t cnt; 3068 uint16_t cnt;
3068 srb_t *sp; 3069 srb_t *sp;
@@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3119 3120
3120 ha->isp_abort_cnt = 0; 3121 ha->isp_abort_cnt = 0;
3121 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3122 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3123
3124 if (ha->eft) {
3125 rval = qla2x00_trace_control(ha, TC_ENABLE,
3126 ha->eft_dma, EFT_NUM_BUFFERS);
3127 if (rval) {
3128 qla_printk(KERN_WARNING, ha,
3129 "Unable to reinitialize EFT "
3130 "(%d).\n", rval);
3131 }
3132 }
3122 } else { /* failed the ISP abort */ 3133 } else { /* failed the ISP abort */
3123 ha->flags.online = 1; 3134 ha->flags.online = 1;
3124 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3135 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 2b60a27eff0b..c5b3c610a32a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
471 mrk24->nport_handle = cpu_to_le16(loop_id); 471 mrk24->nport_handle = cpu_to_le16(loop_id);
472 mrk24->lun[1] = LSB(lun); 472 mrk24->lun[1] = LSB(lun);
473 mrk24->lun[2] = MSB(lun); 473 mrk24->lun[2] = MSB(lun);
474 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
474 } else { 475 } else {
475 SET_TARGET_ID(ha, mrk->target, loop_id); 476 SET_TARGET_ID(ha, mrk->target, loop_id);
476 mrk->lun = cpu_to_le16(lun); 477 mrk->lun = cpu_to_le16(lun);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 795bf15b1b8f..de0613135f70 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
587 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 587 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
588 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 588 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
589 break; 589 break;
590
591 case MBA_TRACE_NOTIFICATION:
592 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
593 ha->host_no, mb[1], mb[2]));
594 break;
590 } 595 }
591} 596}
592 597
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ec7ebb6037e6..65cbe2f5eea2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
744{ 744{
745 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 745 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
746 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 746 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
747 srb_t *sp;
748 int ret; 747 int ret;
749 unsigned int id, lun; 748 unsigned int id, lun;
750 unsigned long serial; 749 unsigned long serial;
@@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
755 lun = cmd->device->lun; 754 lun = cmd->device->lun;
756 serial = cmd->serial_number; 755 serial = cmd->serial_number;
757 756
758 sp = (srb_t *) CMD_SP(cmd); 757 if (!fcport)
759 if (!sp || !fcport)
760 return ret; 758 return ret;
761 759
762 qla_printk(KERN_INFO, ha, 760 qla_printk(KERN_INFO, ha,
@@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
875{ 873{
876 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 874 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
877 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 875 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
878 srb_t *sp;
879 int ret; 876 int ret;
880 unsigned int id, lun; 877 unsigned int id, lun;
881 unsigned long serial; 878 unsigned long serial;
@@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
886 lun = cmd->device->lun; 883 lun = cmd->device->lun;
887 serial = cmd->serial_number; 884 serial = cmd->serial_number;
888 885
889 sp = (srb_t *) CMD_SP(cmd); 886 if (!fcport)
890 if (!sp || !fcport)
891 return ret; 887 return ret;
892 888
893 qla_printk(KERN_INFO, ha, 889 qla_printk(KERN_INFO, ha,
@@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
936{ 932{
937 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 933 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
938 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 934 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
939 srb_t *sp;
940 int ret; 935 int ret;
941 unsigned int id, lun; 936 unsigned int id, lun;
942 unsigned long serial; 937 unsigned long serial;
@@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
947 lun = cmd->device->lun; 942 lun = cmd->device->lun;
948 serial = cmd->serial_number; 943 serial = cmd->serial_number;
949 944
950 sp = (srb_t *) CMD_SP(cmd); 945 if (!fcport)
951 if (!sp || !fcport)
952 return ret; 946 return ret;
953 947
954 qla_printk(KERN_INFO, ha, 948 qla_printk(KERN_INFO, ha,
@@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data)
2244 2238
2245 next_loopid = 0; 2239 next_loopid = 0;
2246 list_for_each_entry(fcport, &ha->fcports, list) { 2240 list_for_each_entry(fcport, &ha->fcports, list) {
2247 if (fcport->port_type != FCT_TARGET)
2248 continue;
2249
2250 /* 2241 /*
2251 * If the port is not ONLINE then try to login 2242 * If the port is not ONLINE then try to login
2252 * to it if we haven't run out of retries. 2243 * to it if we haven't run out of retries.
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d2d683440659..971259032ef7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.05-k3" 10#define QLA2XXX_VERSION "8.01.07-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 1
14#define QLA_DRIVER_PATCH_VER 5 14#define QLA_DRIVER_PATCH_VER 7
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 03baec2191bf..01d40369a8a5 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -74,6 +74,7 @@ enum {
74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 74static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); 75static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77static void vt6420_error_handler(struct ata_port *ap);
77 78
78static const struct pci_device_id svia_pci_tbl[] = { 79static const struct pci_device_id svia_pci_tbl[] = {
79 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, 80 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 },
@@ -107,7 +108,38 @@ static struct scsi_host_template svia_sht = {
107 .bios_param = ata_std_bios_param, 108 .bios_param = ata_std_bios_param,
108}; 109};
109 110
110static const struct ata_port_operations svia_sata_ops = { 111static const struct ata_port_operations vt6420_sata_ops = {
112 .port_disable = ata_port_disable,
113
114 .tf_load = ata_tf_load,
115 .tf_read = ata_tf_read,
116 .check_status = ata_check_status,
117 .exec_command = ata_exec_command,
118 .dev_select = ata_std_dev_select,
119
120 .bmdma_setup = ata_bmdma_setup,
121 .bmdma_start = ata_bmdma_start,
122 .bmdma_stop = ata_bmdma_stop,
123 .bmdma_status = ata_bmdma_status,
124
125 .qc_prep = ata_qc_prep,
126 .qc_issue = ata_qc_issue_prot,
127 .data_xfer = ata_pio_data_xfer,
128
129 .freeze = ata_bmdma_freeze,
130 .thaw = ata_bmdma_thaw,
131 .error_handler = vt6420_error_handler,
132 .post_internal_cmd = ata_bmdma_post_internal_cmd,
133
134 .irq_handler = ata_interrupt,
135 .irq_clear = ata_bmdma_irq_clear,
136
137 .port_start = ata_port_start,
138 .port_stop = ata_port_stop,
139 .host_stop = ata_host_stop,
140};
141
142static const struct ata_port_operations vt6421_sata_ops = {
111 .port_disable = ata_port_disable, 143 .port_disable = ata_port_disable,
112 144
113 .tf_load = ata_tf_load, 145 .tf_load = ata_tf_load,
@@ -141,13 +173,13 @@ static const struct ata_port_operations svia_sata_ops = {
141 .host_stop = ata_host_stop, 173 .host_stop = ata_host_stop,
142}; 174};
143 175
144static struct ata_port_info svia_port_info = { 176static struct ata_port_info vt6420_port_info = {
145 .sht = &svia_sht, 177 .sht = &svia_sht,
146 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 178 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
147 .pio_mask = 0x1f, 179 .pio_mask = 0x1f,
148 .mwdma_mask = 0x07, 180 .mwdma_mask = 0x07,
149 .udma_mask = 0x7f, 181 .udma_mask = 0x7f,
150 .port_ops = &svia_sata_ops, 182 .port_ops = &vt6420_sata_ops,
151}; 183};
152 184
153MODULE_AUTHOR("Jeff Garzik"); 185MODULE_AUTHOR("Jeff Garzik");
@@ -170,6 +202,81 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
170 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 202 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
171} 203}
172 204
205/**
206 * vt6420_prereset - prereset for vt6420
207 * @ap: target ATA port
208 *
209 * SCR registers on vt6420 are pieces of shit and may hang the
210 * whole machine completely if accessed with the wrong timing.
211 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
212 * access operations, but uses SStatus and SControl only during
213 * boot probing in controlled way.
214 *
215 * As the old (pre EH update) probing code is proven to work, we
216 * strictly follow the access pattern.
217 *
218 * LOCKING:
219 * Kernel thread context (may sleep)
220 *
221 * RETURNS:
222 * 0 on success, -errno otherwise.
223 */
224static int vt6420_prereset(struct ata_port *ap)
225{
226 struct ata_eh_context *ehc = &ap->eh_context;
227 unsigned long timeout = jiffies + (HZ * 5);
228 u32 sstatus, scontrol;
229 int online;
230
231 /* don't do any SCR stuff if we're not loading */
232 if (!ATA_PFLAG_LOADING)
233 goto skip_scr;
234
235 /* Resume phy. This is the old resume sequence from
236 * __sata_phy_reset().
237 */
238 svia_scr_write(ap, SCR_CONTROL, 0x300);
239 svia_scr_read(ap, SCR_CONTROL); /* flush */
240
241 /* wait for phy to become ready, if necessary */
242 do {
243 msleep(200);
244 if ((svia_scr_read(ap, SCR_STATUS) & 0xf) != 1)
245 break;
246 } while (time_before(jiffies, timeout));
247
248 /* open code sata_print_link_status() */
249 sstatus = svia_scr_read(ap, SCR_STATUS);
250 scontrol = svia_scr_read(ap, SCR_CONTROL);
251
252 online = (sstatus & 0xf) == 0x3;
253
254 ata_port_printk(ap, KERN_INFO,
255 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
256 online ? "up" : "down", sstatus, scontrol);
257
258 /* SStatus is read one more time */
259 svia_scr_read(ap, SCR_STATUS);
260
261 if (!online) {
262 /* tell EH to bail */
263 ehc->i.action &= ~ATA_EH_RESET_MASK;
264 return 0;
265 }
266
267 skip_scr:
268 /* wait for !BSY */
269 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
270
271 return 0;
272}
273
274static void vt6420_error_handler(struct ata_port *ap)
275{
276 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
277 NULL, ata_std_postreset);
278}
279
173static const unsigned int svia_bar_sizes[] = { 280static const unsigned int svia_bar_sizes[] = {
174 8, 4, 8, 4, 16, 256 281 8, 4, 8, 4, 16, 256
175}; 282};
@@ -210,7 +317,7 @@ static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
210static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) 317static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
211{ 318{
212 struct ata_probe_ent *probe_ent; 319 struct ata_probe_ent *probe_ent;
213 struct ata_port_info *ppi = &svia_port_info; 320 struct ata_port_info *ppi = &vt6420_port_info;
214 321
215 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 322 probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
216 if (!probe_ent) 323 if (!probe_ent)
@@ -239,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
239 346
240 probe_ent->sht = &svia_sht; 347 probe_ent->sht = &svia_sht;
241 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; 348 probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
242 probe_ent->port_ops = &svia_sata_ops; 349 probe_ent->port_ops = &vt6421_sata_ops;
243 probe_ent->n_ports = N_PORTS; 350 probe_ent->n_ports = N_PORTS;
244 probe_ent->irq = pdev->irq; 351 probe_ent->irq = pdev->irq;
245 probe_ent->irq_flags = IRQF_SHARED; 352 probe_ent->irq_flags = IRQF_SHARED;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6a5b731bd5ba..a8ed5a22009d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,7 +460,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
460 * Return value: 460 * Return value:
461 * SUCCESS or FAILED or NEEDS_RETRY 461 * SUCCESS or FAILED or NEEDS_RETRY
462 **/ 462 **/
463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense) 463static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
464 int cmnd_size, int timeout, int copy_sense)
464{ 465{
465 struct scsi_device *sdev = scmd->device; 466 struct scsi_device *sdev = scmd->device;
466 struct Scsi_Host *shost = sdev->host; 467 struct Scsi_Host *shost = sdev->host;
@@ -490,6 +491,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense
490 old_cmd_len = scmd->cmd_len; 491 old_cmd_len = scmd->cmd_len;
491 old_use_sg = scmd->use_sg; 492 old_use_sg = scmd->use_sg;
492 493
494 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
495 memcpy(scmd->cmnd, cmnd, cmnd_size);
496
493 if (copy_sense) { 497 if (copy_sense) {
494 int gfp_mask = GFP_ATOMIC; 498 int gfp_mask = GFP_ATOMIC;
495 499
@@ -610,8 +614,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
610 static unsigned char generic_sense[6] = 614 static unsigned char generic_sense[6] =
611 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 615 {REQUEST_SENSE, 0, 0, 0, 252, 0};
612 616
613 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 617 return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1);
614 return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1);
615} 618}
616 619
617/** 620/**
@@ -736,10 +739,7 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
736 int retry_cnt = 1, rtn; 739 int retry_cnt = 1, rtn;
737 740
738retry_tur: 741retry_tur:
739 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 742 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
740
741
742 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0);
743 743
744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 744 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
745 __FUNCTION__, scmd, rtn)); 745 __FUNCTION__, scmd, rtn));
@@ -839,8 +839,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
839 if (scmd->device->allow_restart) { 839 if (scmd->device->allow_restart) {
840 int rtn; 840 int rtn;
841 841
842 memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); 842 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
843 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0); 843 START_UNIT_TIMEOUT, 0);
844 if (rtn == SUCCESS) 844 if (rtn == SUCCESS)
845 return 0; 845 return 0;
846 } 846 }
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 7b9e8fa1a4e0..2ecd14188574 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,6 +34,7 @@
34#define ISCSI_SESSION_ATTRS 11 34#define ISCSI_SESSION_ATTRS 11
35#define ISCSI_CONN_ATTRS 11 35#define ISCSI_CONN_ATTRS 11
36#define ISCSI_HOST_ATTRS 0 36#define ISCSI_HOST_ATTRS 0
37#define ISCSI_TRANSPORT_VERSION "1.1-646"
37 38
38struct iscsi_internal { 39struct iscsi_internal {
39 int daemon_pid; 40 int daemon_pid;
@@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone)
634} 635}
635 636
636static int 637static int
637iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb) 638iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp)
638{ 639{
639 unsigned long flags; 640 unsigned long flags;
640 int rc; 641 int rc;
641 642
642 skb_get(skb); 643 skb_get(skb);
643 rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL); 644 rc = netlink_broadcast(nls, skb, 0, 1, gfp);
644 if (rc < 0) { 645 if (rc < 0) {
645 mempool_free(skb, zone->pool); 646 mempool_free(skb, zone->pool);
646 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc); 647 printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc);
@@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
749 ev->r.connerror.cid = conn->cid; 750 ev->r.connerror.cid = conn->cid;
750 ev->r.connerror.sid = iscsi_conn_get_sid(conn); 751 ev->r.connerror.sid = iscsi_conn_get_sid(conn);
751 752
752 iscsi_broadcast_skb(conn->z_error, skb); 753 iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC);
753 754
754 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", 755 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
755 error); 756 error);
@@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
895 * this will occur if the daemon is not up, so we just warn 896 * this will occur if the daemon is not up, so we just warn
896 * the user and when the daemon is restarted it will handle it 897 * the user and when the daemon is restarted it will handle it
897 */ 898 */
898 rc = iscsi_broadcast_skb(conn->z_pdu, skb); 899 rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
899 if (rc < 0) 900 if (rc < 0)
900 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " 901 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
901 "session destruction event. Check iscsi daemon\n"); 902 "session destruction event. Check iscsi daemon\n");
@@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
958 * this will occur if the daemon is not up, so we just warn 959 * this will occur if the daemon is not up, so we just warn
959 * the user and when the daemon is restarted it will handle it 960 * the user and when the daemon is restarted it will handle it
960 */ 961 */
961 rc = iscsi_broadcast_skb(conn->z_pdu, skb); 962 rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
962 if (rc < 0) 963 if (rc < 0)
963 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of " 964 dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
964 "session creation event. Check iscsi daemon\n"); 965 "session creation event. Check iscsi daemon\n");
@@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void)
1613{ 1614{
1614 int err; 1615 int err;
1615 1616
1617 printk(KERN_INFO "Loading iSCSI transport class v%s.",
1618 ISCSI_TRANSPORT_VERSION);
1619
1616 err = class_register(&iscsi_transport_class); 1620 err = class_register(&iscsi_transport_class);
1617 if (err) 1621 if (err)
1618 return err; 1622 return err;
@@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
1678 "Alex Aizman <itn780@yahoo.com>"); 1682 "Alex Aizman <itn780@yahoo.com>");
1679MODULE_DESCRIPTION("iSCSI Transport Interface"); 1683MODULE_DESCRIPTION("iSCSI Transport Interface");
1680MODULE_LICENSE("GPL"); 1684MODULE_LICENSE("GPL");
1685MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 65eef33846bb..34f9343ed0af 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -18,8 +18,8 @@
18 * 18 *
19 */ 19 */
20 20
21static int sg_version_num = 30533; /* 2 digits for each component */ 21static int sg_version_num = 30534; /* 2 digits for each component */
22#define SG_VERSION_STR "3.5.33" 22#define SG_VERSION_STR "3.5.34"
23 23
24/* 24/*
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
@@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
60 60
61#ifdef CONFIG_SCSI_PROC_FS 61#ifdef CONFIG_SCSI_PROC_FS
62#include <linux/proc_fs.h> 62#include <linux/proc_fs.h>
63static char *sg_version_date = "20050908"; 63static char *sg_version_date = "20060818";
64 64
65static int sg_proc_init(void); 65static int sg_proc_init(void);
66static void sg_proc_cleanup(void); 66static void sg_proc_cleanup(void);
@@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1164 len = vma->vm_end - sa; 1164 len = vma->vm_end - sa;
1165 len = (len < sg->length) ? len : sg->length; 1165 len = (len < sg->length) ? len : sg->length;
1166 if (offset < len) { 1166 if (offset < len) {
1167 page = sg->page; 1167 page = virt_to_page(page_address(sg->page) + offset);
1168 get_page(page); /* increment page count */ 1168 get_page(page); /* increment page count */
1169 break; 1169 break;
1170 } 1170 }
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 8c505076c0eb..739d3ef46a40 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = {
2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860, 2084 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2085 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510, 2086 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
2087 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2087 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL },
2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896, 2088 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 2089 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895, 2090 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index dc673e1b6fd9..cfe20f730436 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -886,6 +886,15 @@ static int sunsab_console_setup(struct console *con, char *options)
886 unsigned long flags; 886 unsigned long flags;
887 unsigned int baud, quot; 887 unsigned int baud, quot;
888 888
889 /*
890 * The console framework calls us for each and every port
891 * registered. Defer the console setup until the requested
892 * port has been properly discovered. A bit of a hack,
893 * though...
894 */
895 if (up->port.type != PORT_SUNSAB)
896 return -1;
897
889 printk("Console: ttyS%d (SAB82532)\n", 898 printk("Console: ttyS%d (SAB82532)\n",
890 (sunsab_reg.minor - 64) + con->index); 899 (sunsab_reg.minor - 64) + con->index);
891 900
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 47bc3d57e019..d34f336d53d8 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1146,6 +1146,9 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
1146 unsigned long flags; 1146 unsigned long flags;
1147 int baud, brg; 1147 int baud, brg;
1148 1148
1149 if (up->port.type != PORT_SUNZILOG)
1150 return -1;
1151
1149 printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n", 1152 printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n",
1150 (sunzilog_reg.minor - 64) + con->index, con->index); 1153 (sunzilog_reg.minor - 64) + con->index, con->index);
1151 1154
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index a4062a6adbb8..9c46746d5d00 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -208,7 +208,7 @@ static int cypress_probe(struct usb_interface *interface,
208 /* allocate memory for our device state and initialize it */ 208 /* allocate memory for our device state and initialize it */
209 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 209 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
210 if (dev == NULL) { 210 if (dev == NULL) {
211 dev_err(&dev->udev->dev, "Out of memory!\n"); 211 dev_err(&interface->dev, "Out of memory!\n");
212 goto error; 212 goto error;
213 } 213 }
214 214
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index efbbc0adb89a..65e4d046951a 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -79,7 +79,6 @@ static struct usb_device_id id_table [] = {
79 { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) }, 79 { USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) },
80 { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) }, 80 { USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) },
81 { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) }, 81 { USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) },
82 { USB_DEVICE(OTI_VENDOR_ID, OTI_PRODUCT_ID) },
83 { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) }, 82 { USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) },
84 { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) }, 83 { USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) },
85 { } /* Terminating entry */ 84 { } /* Terminating entry */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a692ac66ca6c..55195e76eb6f 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -82,10 +82,6 @@
82#define SPEEDDRAGON_VENDOR_ID 0x0e55 82#define SPEEDDRAGON_VENDOR_ID 0x0e55
83#define SPEEDDRAGON_PRODUCT_ID 0x110b 83#define SPEEDDRAGON_PRODUCT_ID 0x110b
84 84
85/* Ours Technology Inc DKU-5 clone, chipset: Prolific Technology Inc */
86#define OTI_VENDOR_ID 0x0ea0
87#define OTI_PRODUCT_ID 0x6858
88
89/* DATAPILOT Universal-2 Phone Cable */ 85/* DATAPILOT Universal-2 Phone Cable */
90#define DATAPILOT_U2_VENDOR_ID 0x0731 86#define DATAPILOT_U2_VENDOR_ID 0x0731
91#define DATAPILOT_U2_PRODUCT_ID 0x2003 87#define DATAPILOT_U2_PRODUCT_ID 0x2003
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fd158e063c06..4a803d69fa36 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1261,7 +1261,7 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
1261 * Tested on hardware version 1.10. 1261 * Tested on hardware version 1.10.
1262 * Entry is needed only for the initializer function override. 1262 * Entry is needed only for the initializer function override.
1263 */ 1263 */
1264UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x9999, 1264UNUSUAL_DEV( 0x1019, 0x0c55, 0x0110, 0x0110,
1265 "Desknote", 1265 "Desknote",
1266 "UCR-61S2B", 1266 "UCR-61S2B",
1267 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, 1267 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init,
diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c
index b485bece5fc9..18ea4a549105 100644
--- a/drivers/video/imacfb.c
+++ b/drivers/video/imacfb.c
@@ -71,10 +71,10 @@ static int set_system(struct dmi_system_id *id)
71static struct dmi_system_id __initdata dmi_system_table[] = { 71static struct dmi_system_id __initdata dmi_system_table[] = {
72 { set_system, "iMac4,1", { 72 { set_system, "iMac4,1", {
73 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), 73 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
74 DMI_MATCH(DMI_BIOS_VERSION,"iMac4,1") }, (void*)M_I17}, 74 DMI_MATCH(DMI_PRODUCT_NAME,"iMac4,1") }, (void*)M_I17},
75 { set_system, "MacBookPro1,1", { 75 { set_system, "MacBookPro1,1", {
76 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), 76 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
77 DMI_MATCH(DMI_BIOS_VERSION,"MacBookPro1,1") }, (void*)M_I17}, 77 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro1,1") }, (void*)M_I17},
78 { set_system, "MacBook1,1", { 78 { set_system, "MacBook1,1", {
79 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."), 79 DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
80 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK}, 80 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK},
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
index 440272ad10e7..7c76e079ca7d 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/matrox/g450_pll.c
@@ -331,7 +331,15 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
331 tmp |= M1064_XPIXCLKCTRL_PLL_UP; 331 tmp |= M1064_XPIXCLKCTRL_PLL_UP;
332 } 332 }
333 matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp); 333 matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp);
334#ifdef __powerpc__
335 /* This is necessary to avoid jitter on PowerPC
336 * (OpenFirmware) systems, but apparently
337 * introduces jitter, at least on a x86-64
338 * using DVI.
339 * A simple workaround is disable for non-PPC.
340 */
334 matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0); 341 matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0);
342#endif /* __powerpc__ */
335 matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl); 343 matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl);
336 344
337 matroxfb_DAC_unlock_irqrestore(flags); 345 matroxfb_DAC_unlock_irqrestore(flags);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 37534573960b..045f98854f14 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -884,6 +884,61 @@ void bd_set_size(struct block_device *bdev, loff_t size)
884} 884}
885EXPORT_SYMBOL(bd_set_size); 885EXPORT_SYMBOL(bd_set_size);
886 886
887static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
888{
889 int ret = 0;
890 struct inode *bd_inode = bdev->bd_inode;
891 struct gendisk *disk = bdev->bd_disk;
892
893 mutex_lock_nested(&bdev->bd_mutex, subclass);
894 lock_kernel();
895 if (!--bdev->bd_openers) {
896 sync_blockdev(bdev);
897 kill_bdev(bdev);
898 }
899 if (bdev->bd_contains == bdev) {
900 if (disk->fops->release)
901 ret = disk->fops->release(bd_inode, NULL);
902 } else {
903 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
904 subclass + 1);
905 bdev->bd_contains->bd_part_count--;
906 mutex_unlock(&bdev->bd_contains->bd_mutex);
907 }
908 if (!bdev->bd_openers) {
909 struct module *owner = disk->fops->owner;
910
911 put_disk(disk);
912 module_put(owner);
913
914 if (bdev->bd_contains != bdev) {
915 kobject_put(&bdev->bd_part->kobj);
916 bdev->bd_part = NULL;
917 }
918 bdev->bd_disk = NULL;
919 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
920 if (bdev != bdev->bd_contains)
921 __blkdev_put(bdev->bd_contains, subclass + 1);
922 bdev->bd_contains = NULL;
923 }
924 unlock_kernel();
925 mutex_unlock(&bdev->bd_mutex);
926 bdput(bdev);
927 return ret;
928}
929
930int blkdev_put(struct block_device *bdev)
931{
932 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
933}
934EXPORT_SYMBOL(blkdev_put);
935
936int blkdev_put_partition(struct block_device *bdev)
937{
938 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
939}
940EXPORT_SYMBOL(blkdev_put_partition);
941
887static int 942static int
888blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags); 943blkdev_get_whole(struct block_device *bdev, mode_t mode, unsigned flags);
889 944
@@ -980,7 +1035,7 @@ out_first:
980 bdev->bd_disk = NULL; 1035 bdev->bd_disk = NULL;
981 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info; 1036 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
982 if (bdev != bdev->bd_contains) 1037 if (bdev != bdev->bd_contains)
983 blkdev_put(bdev->bd_contains); 1038 __blkdev_put(bdev->bd_contains, BD_MUTEX_WHOLE);
984 bdev->bd_contains = NULL; 1039 bdev->bd_contains = NULL;
985 put_disk(disk); 1040 put_disk(disk);
986 module_put(owner); 1041 module_put(owner);
@@ -1079,63 +1134,6 @@ static int blkdev_open(struct inode * inode, struct file * filp)
1079 return res; 1134 return res;
1080} 1135}
1081 1136
1082static int __blkdev_put(struct block_device *bdev, unsigned int subclass)
1083{
1084 int ret = 0;
1085 struct inode *bd_inode = bdev->bd_inode;
1086 struct gendisk *disk = bdev->bd_disk;
1087
1088 mutex_lock_nested(&bdev->bd_mutex, subclass);
1089 lock_kernel();
1090 if (!--bdev->bd_openers) {
1091 sync_blockdev(bdev);
1092 kill_bdev(bdev);
1093 }
1094 if (bdev->bd_contains == bdev) {
1095 if (disk->fops->release)
1096 ret = disk->fops->release(bd_inode, NULL);
1097 } else {
1098 mutex_lock_nested(&bdev->bd_contains->bd_mutex,
1099 subclass + 1);
1100 bdev->bd_contains->bd_part_count--;
1101 mutex_unlock(&bdev->bd_contains->bd_mutex);
1102 }
1103 if (!bdev->bd_openers) {
1104 struct module *owner = disk->fops->owner;
1105
1106 put_disk(disk);
1107 module_put(owner);
1108
1109 if (bdev->bd_contains != bdev) {
1110 kobject_put(&bdev->bd_part->kobj);
1111 bdev->bd_part = NULL;
1112 }
1113 bdev->bd_disk = NULL;
1114 bdev->bd_inode->i_data.backing_dev_info = &default_backing_dev_info;
1115 if (bdev != bdev->bd_contains)
1116 __blkdev_put(bdev->bd_contains, subclass + 1);
1117 bdev->bd_contains = NULL;
1118 }
1119 unlock_kernel();
1120 mutex_unlock(&bdev->bd_mutex);
1121 bdput(bdev);
1122 return ret;
1123}
1124
1125int blkdev_put(struct block_device *bdev)
1126{
1127 return __blkdev_put(bdev, BD_MUTEX_NORMAL);
1128}
1129
1130EXPORT_SYMBOL(blkdev_put);
1131
1132int blkdev_put_partition(struct block_device *bdev)
1133{
1134 return __blkdev_put(bdev, BD_MUTEX_PARTITION);
1135}
1136
1137EXPORT_SYMBOL(blkdev_put_partition);
1138
1139static int blkdev_close(struct inode * inode, struct file * filp) 1137static int blkdev_close(struct inode * inode, struct file * filp)
1140{ 1138{
1141 struct block_device *bdev = I_BDEV(filp->f_mapping->host); 1139 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 19ffb043abbc..3a3567433b92 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1168,7 +1168,7 @@ static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
1168eexit_1: 1168eexit_1:
1169 1169
1170 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n", 1170 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
1171 current, ep, epi->file, error)); 1171 current, ep, epi->ffd.file, error));
1172 1172
1173 return error; 1173 return error;
1174} 1174}
@@ -1236,7 +1236,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
1236 struct eventpoll *ep = epi->ep; 1236 struct eventpoll *ep = epi->ep;
1237 1237
1238 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", 1238 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
1239 current, epi->file, epi, ep)); 1239 current, epi->ffd.file, epi, ep));
1240 1240
1241 write_lock_irqsave(&ep->lock, flags); 1241 write_lock_irqsave(&ep->lock, flags);
1242 1242
diff --git a/fs/exec.c b/fs/exec.c
index 8344ba73a2a6..54135df2a966 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -486,8 +486,6 @@ struct file *open_exec(const char *name)
486 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) && 486 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
487 S_ISREG(inode->i_mode)) { 487 S_ISREG(inode->i_mode)) {
488 int err = vfs_permission(&nd, MAY_EXEC); 488 int err = vfs_permission(&nd, MAY_EXEC);
489 if (!err && !(inode->i_mode & 0111))
490 err = -EACCES;
491 file = ERR_PTR(err); 489 file = ERR_PTR(err);
492 if (!err) { 490 if (!err) {
493 file = nameidata_to_filp(&nd, O_RDONLY); 491 file = nameidata_to_filp(&nd, O_RDONLY);
@@ -753,7 +751,7 @@ no_thread_group:
753 751
754 write_lock_irq(&tasklist_lock); 752 write_lock_irq(&tasklist_lock);
755 spin_lock(&oldsighand->siglock); 753 spin_lock(&oldsighand->siglock);
756 spin_lock(&newsighand->siglock); 754 spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
757 755
758 rcu_assign_pointer(current->sighand, newsighand); 756 rcu_assign_pointer(current->sighand, newsighand);
759 recalc_sigpending(); 757 recalc_sigpending();
@@ -922,12 +920,6 @@ int prepare_binprm(struct linux_binprm *bprm)
922 int retval; 920 int retval;
923 921
924 mode = inode->i_mode; 922 mode = inode->i_mode;
925 /*
926 * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
927 * generic_permission lets a non-executable through
928 */
929 if (!(mode & 0111)) /* with at least _one_ execute bit set */
930 return -EACCES;
931 if (bprm->file->f_op == NULL) 923 if (bprm->file->f_op == NULL)
932 return -EACCES; 924 return -EACCES;
933 925
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index f2702cda9779..681dea8f9532 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -775,7 +775,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
775 if (EXT2_INODE_SIZE(sb) == 0) 775 if (EXT2_INODE_SIZE(sb) == 0)
776 goto cantfind_ext2; 776 goto cantfind_ext2;
777 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); 777 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
778 if (sbi->s_inodes_per_block == 0) 778 if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
779 goto cantfind_ext2; 779 goto cantfind_ext2;
780 sbi->s_itb_per_group = sbi->s_inodes_per_group / 780 sbi->s_itb_per_group = sbi->s_inodes_per_group /
781 sbi->s_inodes_per_block; 781 sbi->s_inodes_per_block;
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a504a40d6d29..063d994bda0b 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1269,12 +1269,12 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1269 goal = le32_to_cpu(es->s_first_data_block); 1269 goal = le32_to_cpu(es->s_first_data_block);
1270 group_no = (goal - le32_to_cpu(es->s_first_data_block)) / 1270 group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
1271 EXT3_BLOCKS_PER_GROUP(sb); 1271 EXT3_BLOCKS_PER_GROUP(sb);
1272 goal_group = group_no;
1273retry_alloc:
1272 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh); 1274 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1273 if (!gdp) 1275 if (!gdp)
1274 goto io_error; 1276 goto io_error;
1275 1277
1276 goal_group = group_no;
1277retry:
1278 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); 1278 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1279 /* 1279 /*
1280 * if there is not enough free blocks to make a new resevation 1280 * if there is not enough free blocks to make a new resevation
@@ -1349,7 +1349,7 @@ retry:
1349 if (my_rsv) { 1349 if (my_rsv) {
1350 my_rsv = NULL; 1350 my_rsv = NULL;
1351 group_no = goal_group; 1351 group_no = goal_group;
1352 goto retry; 1352 goto retry_alloc;
1353 } 1353 }
1354 /* No space left on the device */ 1354 /* No space left on the device */
1355 *errp = -ENOSPC; 1355 *errp = -ENOSPC;
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 93aa5715f224..78b1deae3fa2 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -44,6 +44,9 @@ static int set_task_ioprio(struct task_struct *task, int ioprio)
44 task->ioprio = ioprio; 44 task->ioprio = ioprio;
45 45
46 ioc = task->io_context; 46 ioc = task->io_context;
47 /* see wmb() in current_io_context() */
48 smp_read_barrier_depends();
49
47 if (ioc && ioc->set_ioprio) 50 if (ioc && ioc->set_ioprio)
48 ioc->set_ioprio(ioc, ioprio); 51 ioc->set_ioprio(ioc, ioprio);
49 52
@@ -111,9 +114,9 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
111 continue; 114 continue;
112 ret = set_task_ioprio(p, ioprio); 115 ret = set_task_ioprio(p, ioprio);
113 if (ret) 116 if (ret)
114 break; 117 goto free_uid;
115 } while_each_thread(g, p); 118 } while_each_thread(g, p);
116 119free_uid:
117 if (who) 120 if (who)
118 free_uid(user); 121 free_uid(user);
119 break; 122 break;
@@ -137,6 +140,29 @@ out:
137 return ret; 140 return ret;
138} 141}
139 142
143int ioprio_best(unsigned short aprio, unsigned short bprio)
144{
145 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
146 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
147
148 if (!ioprio_valid(aprio))
149 return bprio;
150 if (!ioprio_valid(bprio))
151 return aprio;
152
153 if (aclass == IOPRIO_CLASS_NONE)
154 aclass = IOPRIO_CLASS_BE;
155 if (bclass == IOPRIO_CLASS_NONE)
156 bclass = IOPRIO_CLASS_BE;
157
158 if (aclass == bclass)
159 return min(aprio, bprio);
160 if (aclass > bclass)
161 return bprio;
162 else
163 return aprio;
164}
165
140asmlinkage long sys_ioprio_get(int which, int who) 166asmlinkage long sys_ioprio_get(int which, int who)
141{ 167{
142 struct task_struct *g, *p; 168 struct task_struct *g, *p;
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 0971814c38b8..42da60784311 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -261,7 +261,7 @@ void journal_commit_transaction(journal_t *journal)
261 struct buffer_head *bh = jh2bh(jh); 261 struct buffer_head *bh = jh2bh(jh);
262 262
263 jbd_lock_bh_state(bh); 263 jbd_lock_bh_state(bh);
264 kfree(jh->b_committed_data); 264 jbd_slab_free(jh->b_committed_data, bh->b_size);
265 jh->b_committed_data = NULL; 265 jh->b_committed_data = NULL;
266 jbd_unlock_bh_state(bh); 266 jbd_unlock_bh_state(bh);
267 } 267 }
@@ -745,14 +745,14 @@ restart_loop:
745 * Otherwise, we can just throw away the frozen data now. 745 * Otherwise, we can just throw away the frozen data now.
746 */ 746 */
747 if (jh->b_committed_data) { 747 if (jh->b_committed_data) {
748 kfree(jh->b_committed_data); 748 jbd_slab_free(jh->b_committed_data, bh->b_size);
749 jh->b_committed_data = NULL; 749 jh->b_committed_data = NULL;
750 if (jh->b_frozen_data) { 750 if (jh->b_frozen_data) {
751 jh->b_committed_data = jh->b_frozen_data; 751 jh->b_committed_data = jh->b_frozen_data;
752 jh->b_frozen_data = NULL; 752 jh->b_frozen_data = NULL;
753 } 753 }
754 } else if (jh->b_frozen_data) { 754 } else if (jh->b_frozen_data) {
755 kfree(jh->b_frozen_data); 755 jbd_slab_free(jh->b_frozen_data, bh->b_size);
756 jh->b_frozen_data = NULL; 756 jh->b_frozen_data = NULL;
757 } 757 }
758 758
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 8c9b28dff119..f66724ce443a 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(journal_force_commit);
84 84
85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 85static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
86static void __journal_abort_soft (journal_t *journal, int errno); 86static void __journal_abort_soft (journal_t *journal, int errno);
87static int journal_create_jbd_slab(size_t slab_size);
87 88
88/* 89/*
89 * Helper function used to manage commit timeouts 90 * Helper function used to manage commit timeouts
@@ -328,10 +329,10 @@ repeat:
328 char *tmp; 329 char *tmp;
329 330
330 jbd_unlock_bh_state(bh_in); 331 jbd_unlock_bh_state(bh_in);
331 tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS); 332 tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS);
332 jbd_lock_bh_state(bh_in); 333 jbd_lock_bh_state(bh_in);
333 if (jh_in->b_frozen_data) { 334 if (jh_in->b_frozen_data) {
334 kfree(tmp); 335 jbd_slab_free(tmp, bh_in->b_size);
335 goto repeat; 336 goto repeat;
336 } 337 }
337 338
@@ -1069,17 +1070,17 @@ static int load_superblock(journal_t *journal)
1069int journal_load(journal_t *journal) 1070int journal_load(journal_t *journal)
1070{ 1071{
1071 int err; 1072 int err;
1073 journal_superblock_t *sb;
1072 1074
1073 err = load_superblock(journal); 1075 err = load_superblock(journal);
1074 if (err) 1076 if (err)
1075 return err; 1077 return err;
1076 1078
1079 sb = journal->j_superblock;
1077 /* If this is a V2 superblock, then we have to check the 1080 /* If this is a V2 superblock, then we have to check the
1078 * features flags on it. */ 1081 * features flags on it. */
1079 1082
1080 if (journal->j_format_version >= 2) { 1083 if (journal->j_format_version >= 2) {
1081 journal_superblock_t *sb = journal->j_superblock;
1082
1083 if ((sb->s_feature_ro_compat & 1084 if ((sb->s_feature_ro_compat &
1084 ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) || 1085 ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
1085 (sb->s_feature_incompat & 1086 (sb->s_feature_incompat &
@@ -1090,6 +1091,13 @@ int journal_load(journal_t *journal)
1090 } 1091 }
1091 } 1092 }
1092 1093
1094 /*
1095 * Create a slab for this blocksize
1096 */
1097 err = journal_create_jbd_slab(cpu_to_be32(sb->s_blocksize));
1098 if (err)
1099 return err;
1100
1093 /* Let the recovery code check whether it needs to recover any 1101 /* Let the recovery code check whether it needs to recover any
1094 * data from the journal. */ 1102 * data from the journal. */
1095 if (journal_recover(journal)) 1103 if (journal_recover(journal))
@@ -1612,6 +1620,77 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
1612} 1620}
1613 1621
1614/* 1622/*
1623 * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
1624 * and allocate frozen and commit buffers from these slabs.
1625 *
1626 * Reason for doing this is to avoid, SLAB_DEBUG - since it could
1627 * cause bh to cross page boundary.
1628 */
1629
1630#define JBD_MAX_SLABS 5
1631#define JBD_SLAB_INDEX(size) (size >> 11)
1632
1633static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
1634static const char *jbd_slab_names[JBD_MAX_SLABS] = {
1635 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
1636};
1637
1638static void journal_destroy_jbd_slabs(void)
1639{
1640 int i;
1641
1642 for (i = 0; i < JBD_MAX_SLABS; i++) {
1643 if (jbd_slab[i])
1644 kmem_cache_destroy(jbd_slab[i]);
1645 jbd_slab[i] = NULL;
1646 }
1647}
1648
1649static int journal_create_jbd_slab(size_t slab_size)
1650{
1651 int i = JBD_SLAB_INDEX(slab_size);
1652
1653 BUG_ON(i >= JBD_MAX_SLABS);
1654
1655 /*
1656 * Check if we already have a slab created for this size
1657 */
1658 if (jbd_slab[i])
1659 return 0;
1660
1661 /*
1662 * Create a slab and force alignment to be same as slabsize -
1663 * this will make sure that allocations won't cross the page
1664 * boundary.
1665 */
1666 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
1667 slab_size, slab_size, 0, NULL, NULL);
1668 if (!jbd_slab[i]) {
1669 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
1670 return -ENOMEM;
1671 }
1672 return 0;
1673}
1674
1675void * jbd_slab_alloc(size_t size, gfp_t flags)
1676{
1677 int idx;
1678
1679 idx = JBD_SLAB_INDEX(size);
1680 BUG_ON(jbd_slab[idx] == NULL);
1681 return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
1682}
1683
1684void jbd_slab_free(void *ptr, size_t size)
1685{
1686 int idx;
1687
1688 idx = JBD_SLAB_INDEX(size);
1689 BUG_ON(jbd_slab[idx] == NULL);
1690 kmem_cache_free(jbd_slab[idx], ptr);
1691}
1692
1693/*
1615 * Journal_head storage management 1694 * Journal_head storage management
1616 */ 1695 */
1617static kmem_cache_t *journal_head_cache; 1696static kmem_cache_t *journal_head_cache;
@@ -1799,13 +1878,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
1799 printk(KERN_WARNING "%s: freeing " 1878 printk(KERN_WARNING "%s: freeing "
1800 "b_frozen_data\n", 1879 "b_frozen_data\n",
1801 __FUNCTION__); 1880 __FUNCTION__);
1802 kfree(jh->b_frozen_data); 1881 jbd_slab_free(jh->b_frozen_data, bh->b_size);
1803 } 1882 }
1804 if (jh->b_committed_data) { 1883 if (jh->b_committed_data) {
1805 printk(KERN_WARNING "%s: freeing " 1884 printk(KERN_WARNING "%s: freeing "
1806 "b_committed_data\n", 1885 "b_committed_data\n",
1807 __FUNCTION__); 1886 __FUNCTION__);
1808 kfree(jh->b_committed_data); 1887 jbd_slab_free(jh->b_committed_data, bh->b_size);
1809 } 1888 }
1810 bh->b_private = NULL; 1889 bh->b_private = NULL;
1811 jh->b_bh = NULL; /* debug, really */ 1890 jh->b_bh = NULL; /* debug, really */
@@ -1961,6 +2040,7 @@ static void journal_destroy_caches(void)
1961 journal_destroy_revoke_caches(); 2040 journal_destroy_revoke_caches();
1962 journal_destroy_journal_head_cache(); 2041 journal_destroy_journal_head_cache();
1963 journal_destroy_handle_cache(); 2042 journal_destroy_handle_cache();
2043 journal_destroy_jbd_slabs();
1964} 2044}
1965 2045
1966static int __init journal_init(void) 2046static int __init journal_init(void)
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 508b2ea91f43..de2e4cbbf79a 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -666,8 +666,9 @@ repeat:
666 if (!frozen_buffer) { 666 if (!frozen_buffer) {
667 JBUFFER_TRACE(jh, "allocate memory for buffer"); 667 JBUFFER_TRACE(jh, "allocate memory for buffer");
668 jbd_unlock_bh_state(bh); 668 jbd_unlock_bh_state(bh);
669 frozen_buffer = jbd_kmalloc(jh2bh(jh)->b_size, 669 frozen_buffer =
670 GFP_NOFS); 670 jbd_slab_alloc(jh2bh(jh)->b_size,
671 GFP_NOFS);
671 if (!frozen_buffer) { 672 if (!frozen_buffer) {
672 printk(KERN_EMERG 673 printk(KERN_EMERG
673 "%s: OOM for frozen_buffer\n", 674 "%s: OOM for frozen_buffer\n",
@@ -879,7 +880,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
879 880
880repeat: 881repeat:
881 if (!jh->b_committed_data) { 882 if (!jh->b_committed_data) {
882 committed_data = jbd_kmalloc(jh2bh(jh)->b_size, GFP_NOFS); 883 committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
883 if (!committed_data) { 884 if (!committed_data) {
884 printk(KERN_EMERG "%s: No memory for committed data\n", 885 printk(KERN_EMERG "%s: No memory for committed data\n",
885 __FUNCTION__); 886 __FUNCTION__);
@@ -906,7 +907,7 @@ repeat:
906out: 907out:
907 journal_put_journal_head(jh); 908 journal_put_journal_head(jh);
908 if (unlikely(committed_data)) 909 if (unlikely(committed_data))
909 kfree(committed_data); 910 jbd_slab_free(committed_data, bh->b_size);
910 return err; 911 return err;
911} 912}
912 913
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 2a4df9b3779a..01b4db9e5466 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -237,19 +237,22 @@ static int
237nlm_traverse_files(struct nlm_host *host, int action) 237nlm_traverse_files(struct nlm_host *host, int action)
238{ 238{
239 struct nlm_file *file, **fp; 239 struct nlm_file *file, **fp;
240 int i; 240 int i, ret = 0;
241 241
242 mutex_lock(&nlm_file_mutex); 242 mutex_lock(&nlm_file_mutex);
243 for (i = 0; i < FILE_NRHASH; i++) { 243 for (i = 0; i < FILE_NRHASH; i++) {
244 fp = nlm_files + i; 244 fp = nlm_files + i;
245 while ((file = *fp) != NULL) { 245 while ((file = *fp) != NULL) {
246 file->f_count++;
247 mutex_unlock(&nlm_file_mutex);
248
246 /* Traverse locks, blocks and shares of this file 249 /* Traverse locks, blocks and shares of this file
247 * and update file->f_locks count */ 250 * and update file->f_locks count */
248 if (nlm_inspect_file(host, file, action)) { 251 if (nlm_inspect_file(host, file, action))
249 mutex_unlock(&nlm_file_mutex); 252 ret = 1;
250 return 1;
251 }
252 253
254 mutex_lock(&nlm_file_mutex);
255 file->f_count--;
253 /* No more references to this file. Let go of it. */ 256 /* No more references to this file. Let go of it. */
254 if (!file->f_blocks && !file->f_locks 257 if (!file->f_blocks && !file->f_locks
255 && !file->f_shares && !file->f_count) { 258 && !file->f_shares && !file->f_count) {
@@ -262,7 +265,7 @@ nlm_traverse_files(struct nlm_host *host, int action)
262 } 265 }
263 } 266 }
264 mutex_unlock(&nlm_file_mutex); 267 mutex_unlock(&nlm_file_mutex);
265 return 0; 268 return ret;
266} 269}
267 270
268/* 271/*
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 9ea91c5eeb7b..330ff9fc7cf0 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -204,6 +204,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
204 /* 204 /*
205 * Allocate the buffer map to keep the superblock small. 205 * Allocate the buffer map to keep the superblock small.
206 */ 206 */
207 if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
208 goto out_illegal_sb;
207 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); 209 i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
208 map = kmalloc(i, GFP_KERNEL); 210 map = kmalloc(i, GFP_KERNEL);
209 if (!map) 211 if (!map)
@@ -263,7 +265,7 @@ out_no_root:
263 265
264out_no_bitmap: 266out_no_bitmap:
265 printk("MINIX-fs: bad superblock or unable to read bitmaps\n"); 267 printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
266 out_freemap: 268out_freemap:
267 for (i = 0; i < sbi->s_imap_blocks; i++) 269 for (i = 0; i < sbi->s_imap_blocks; i++)
268 brelse(sbi->s_imap[i]); 270 brelse(sbi->s_imap[i]);
269 for (i = 0; i < sbi->s_zmap_blocks; i++) 271 for (i = 0; i < sbi->s_zmap_blocks; i++)
@@ -276,11 +278,16 @@ out_no_map:
276 printk("MINIX-fs: can't allocate map\n"); 278 printk("MINIX-fs: can't allocate map\n");
277 goto out_release; 279 goto out_release;
278 280
281out_illegal_sb:
282 if (!silent)
283 printk("MINIX-fs: bad superblock\n");
284 goto out_release;
285
279out_no_fs: 286out_no_fs:
280 if (!silent) 287 if (!silent)
281 printk("VFS: Can't find a Minix or Minix V2 filesystem " 288 printk("VFS: Can't find a Minix or Minix V2 filesystem "
282 "on device %s\n", s->s_id); 289 "on device %s\n", s->s_id);
283 out_release: 290out_release:
284 brelse(bh); 291 brelse(bh);
285 goto out; 292 goto out;
286 293
@@ -290,7 +297,7 @@ out_bad_hblock:
290 297
291out_bad_sb: 298out_bad_sb:
292 printk("MINIX-fs: unable to read superblock\n"); 299 printk("MINIX-fs: unable to read superblock\n");
293 out: 300out:
294 s->s_fs_info = NULL; 301 s->s_fs_info = NULL;
295 kfree(sbi); 302 kfree(sbi);
296 return -EINVAL; 303 return -EINVAL;
diff --git a/fs/namei.c b/fs/namei.c
index 55a131230f94..432d6bc6fab0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -227,10 +227,10 @@ int generic_permission(struct inode *inode, int mask,
227 227
228int permission(struct inode *inode, int mask, struct nameidata *nd) 228int permission(struct inode *inode, int mask, struct nameidata *nd)
229{ 229{
230 umode_t mode = inode->i_mode;
230 int retval, submask; 231 int retval, submask;
231 232
232 if (mask & MAY_WRITE) { 233 if (mask & MAY_WRITE) {
233 umode_t mode = inode->i_mode;
234 234
235 /* 235 /*
236 * Nobody gets write access to a read-only fs. 236 * Nobody gets write access to a read-only fs.
@@ -247,6 +247,13 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
247 } 247 }
248 248
249 249
250 /*
251 * MAY_EXEC on regular files requires special handling: We override
252 * filesystem execute permissions if the mode bits aren't set.
253 */
254 if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO))
255 return -EACCES;
256
250 /* Ordinary permission routines do not understand MAY_APPEND. */ 257 /* Ordinary permission routines do not understand MAY_APPEND. */
251 submask = mask & ~MAY_APPEND; 258 submask = mask & ~MAY_APPEND;
252 if (inode->i_op && inode->i_op->permission) 259 if (inode->i_op && inode->i_op->permission)
@@ -1767,6 +1774,8 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
1767 if (nd->last_type != LAST_NORM) 1774 if (nd->last_type != LAST_NORM)
1768 goto fail; 1775 goto fail;
1769 nd->flags &= ~LOOKUP_PARENT; 1776 nd->flags &= ~LOOKUP_PARENT;
1777 nd->flags |= LOOKUP_CREATE;
1778 nd->intent.open.flags = O_EXCL;
1770 1779
1771 /* 1780 /*
1772 * Do the final lookup. 1781 * Do the final lookup.
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index cc2b874ad5a4..48e892880d5b 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -312,7 +312,13 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
312 312
313static int nfs_release_page(struct page *page, gfp_t gfp) 313static int nfs_release_page(struct page *page, gfp_t gfp)
314{ 314{
315 return !nfs_wb_page(page->mapping->host, page); 315 if (gfp & __GFP_FS)
316 return !nfs_wb_page(page->mapping->host, page);
317 else
318 /*
319 * Avoid deadlock on nfs_wait_on_request().
320 */
321 return 0;
316} 322}
317 323
318const struct address_space_operations nfs_file_aops = { 324const struct address_space_operations nfs_file_aops = {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b81e7ed3c902..07a5dd57646e 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -130,9 +130,7 @@ nfs_idmap_delete(struct nfs4_client *clp)
130 130
131 if (!idmap) 131 if (!idmap)
132 return; 132 return;
133 dput(idmap->idmap_dentry); 133 rpc_unlink(idmap->idmap_dentry);
134 idmap->idmap_dentry = NULL;
135 rpc_unlink(idmap->idmap_path);
136 clp->cl_idmap = NULL; 134 clp->cl_idmap = NULL;
137 kfree(idmap); 135 kfree(idmap);
138} 136}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e6ee97f19d81..153898e1331f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2668,7 +2668,7 @@ out:
2668 nfs4_set_cached_acl(inode, acl); 2668 nfs4_set_cached_acl(inode, acl);
2669} 2669}
2670 2670
2671static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 2671static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
2672{ 2672{
2673 struct page *pages[NFS4ACL_MAXPAGES]; 2673 struct page *pages[NFS4ACL_MAXPAGES];
2674 struct nfs_getaclargs args = { 2674 struct nfs_getaclargs args = {
@@ -2721,6 +2721,19 @@ out_free:
2721 return ret; 2721 return ret;
2722} 2722}
2723 2723
2724static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
2725{
2726 struct nfs4_exception exception = { };
2727 ssize_t ret;
2728 do {
2729 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
2730 if (ret >= 0)
2731 break;
2732 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
2733 } while (exception.retry);
2734 return ret;
2735}
2736
2724static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 2737static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
2725{ 2738{
2726 struct nfs_server *server = NFS_SERVER(inode); 2739 struct nfs_server *server = NFS_SERVER(inode);
@@ -2737,7 +2750,7 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
2737 return nfs4_get_acl_uncached(inode, buf, buflen); 2750 return nfs4_get_acl_uncached(inode, buf, buflen);
2738} 2751}
2739 2752
2740static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 2753static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
2741{ 2754{
2742 struct nfs_server *server = NFS_SERVER(inode); 2755 struct nfs_server *server = NFS_SERVER(inode);
2743 struct page *pages[NFS4ACL_MAXPAGES]; 2756 struct page *pages[NFS4ACL_MAXPAGES];
@@ -2763,6 +2776,18 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
2763 return ret; 2776 return ret;
2764} 2777}
2765 2778
2779static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
2780{
2781 struct nfs4_exception exception = { };
2782 int err;
2783 do {
2784 err = nfs4_handle_exception(NFS_SERVER(inode),
2785 __nfs4_proc_set_acl(inode, buf, buflen),
2786 &exception);
2787 } while (exception.retry);
2788 return err;
2789}
2790
2766static int 2791static int
2767nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) 2792nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
2768{ 2793{
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 1750d996f49f..730ec8fb31c6 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3355,7 +3355,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3355 struct kvec *iov = rcvbuf->head; 3355 struct kvec *iov = rcvbuf->head;
3356 unsigned int nr, pglen = rcvbuf->page_len; 3356 unsigned int nr, pglen = rcvbuf->page_len;
3357 uint32_t *end, *entry, *p, *kaddr; 3357 uint32_t *end, *entry, *p, *kaddr;
3358 uint32_t len, attrlen; 3358 uint32_t len, attrlen, xlen;
3359 int hdrlen, recvd, status; 3359 int hdrlen, recvd, status;
3360 3360
3361 status = decode_op_hdr(xdr, OP_READDIR); 3361 status = decode_op_hdr(xdr, OP_READDIR);
@@ -3377,10 +3377,10 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3377 3377
3378 BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE); 3378 BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE);
3379 kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0); 3379 kaddr = p = (uint32_t *) kmap_atomic(page, KM_USER0);
3380 end = (uint32_t *) ((char *)p + pglen + readdir->pgbase); 3380 end = p + ((pglen + readdir->pgbase) >> 2);
3381 entry = p; 3381 entry = p;
3382 for (nr = 0; *p++; nr++) { 3382 for (nr = 0; *p++; nr++) {
3383 if (p + 3 > end) 3383 if (end - p < 3)
3384 goto short_pkt; 3384 goto short_pkt;
3385 dprintk("cookie = %Lu, ", *((unsigned long long *)p)); 3385 dprintk("cookie = %Lu, ", *((unsigned long long *)p));
3386 p += 2; /* cookie */ 3386 p += 2; /* cookie */
@@ -3389,18 +3389,19 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3389 printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len); 3389 printk(KERN_WARNING "NFS: giant filename in readdir (len 0x%x)\n", len);
3390 goto err_unmap; 3390 goto err_unmap;
3391 } 3391 }
3392 dprintk("filename = %*s\n", len, (char *)p); 3392 xlen = XDR_QUADLEN(len);
3393 p += XDR_QUADLEN(len); 3393 if (end - p < xlen + 1)
3394 if (p + 1 > end)
3395 goto short_pkt; 3394 goto short_pkt;
3395 dprintk("filename = %*s\n", len, (char *)p);
3396 p += xlen;
3396 len = ntohl(*p++); /* bitmap length */ 3397 len = ntohl(*p++); /* bitmap length */
3397 p += len; 3398 if (end - p < len + 1)
3398 if (p + 1 > end)
3399 goto short_pkt; 3399 goto short_pkt;
3400 p += len;
3400 attrlen = XDR_QUADLEN(ntohl(*p++)); 3401 attrlen = XDR_QUADLEN(ntohl(*p++));
3401 p += attrlen; /* attributes */ 3402 if (end - p < attrlen + 2)
3402 if (p + 2 > end)
3403 goto short_pkt; 3403 goto short_pkt;
3404 p += attrlen; /* attributes */
3404 entry = p; 3405 entry = p;
3405 } 3406 }
3406 if (!nr && (entry[0] != 0 || entry[1] == 0)) 3407 if (!nr && (entry[0] != 0 || entry[1] == 0))
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 65c0c5b32351..da9cf11c326f 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -116,10 +116,17 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
116 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 116 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
117 base &= ~PAGE_CACHE_MASK; 117 base &= ~PAGE_CACHE_MASK;
118 pglen = PAGE_CACHE_SIZE - base; 118 pglen = PAGE_CACHE_SIZE - base;
119 if (pglen < remainder) 119 for (;;) {
120 if (remainder <= pglen) {
121 memclear_highpage_flush(*pages, base, remainder);
122 break;
123 }
120 memclear_highpage_flush(*pages, base, pglen); 124 memclear_highpage_flush(*pages, base, pglen);
121 else 125 pages++;
122 memclear_highpage_flush(*pages, base, remainder); 126 remainder -= pglen;
127 pglen = PAGE_CACHE_SIZE;
128 base = 0;
129 }
123} 130}
124 131
125/* 132/*
@@ -476,6 +483,8 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
476 unsigned int base = data->args.pgbase; 483 unsigned int base = data->args.pgbase;
477 struct page **pages; 484 struct page **pages;
478 485
486 if (data->res.eof)
487 count = data->args.count;
479 if (unlikely(count == 0)) 488 if (unlikely(count == 0))
480 return; 489 return;
481 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; 490 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
@@ -483,11 +492,7 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
483 count += base; 492 count += base;
484 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 493 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
485 SetPageUptodate(*pages); 494 SetPageUptodate(*pages);
486 /* 495 if (count != 0)
487 * Was this an eof or a short read? If the latter, don't mark the page
488 * as uptodate yet.
489 */
490 if (count > 0 && (data->res.eof || data->args.count == data->res.count))
491 SetPageUptodate(*pages); 496 SetPageUptodate(*pages);
492} 497}
493 498
@@ -502,6 +507,8 @@ static void nfs_readpage_set_pages_error(struct nfs_read_data *data)
502 count += base; 507 count += base;
503 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) 508 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
504 SetPageError(*pages); 509 SetPageError(*pages);
510 if (count != 0)
511 SetPageError(*pages);
505} 512}
506 513
507/* 514/*
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c
index abe91ca03edf..0a5927c806ca 100644
--- a/fs/partitions/sun.c
+++ b/fs/partitions/sun.c
@@ -74,7 +74,7 @@ int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
74 spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); 74 spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
75 for (i = 0; i < 8; i++, p++) { 75 for (i = 0; i < 8; i++, p++) {
76 unsigned long st_sector; 76 unsigned long st_sector;
77 int num_sectors; 77 unsigned int num_sectors;
78 78
79 st_sector = be32_to_cpu(p->start_cylinder) * spc; 79 st_sector = be32_to_cpu(p->start_cylinder) * spc;
80 num_sectors = be32_to_cpu(p->num_sectors); 80 num_sectors = be32_to_cpu(p->num_sectors);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 9f2cfc30f9cf..942156225447 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -169,7 +169,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
169 "Mapped: %8lu kB\n" 169 "Mapped: %8lu kB\n"
170 "Slab: %8lu kB\n" 170 "Slab: %8lu kB\n"
171 "PageTables: %8lu kB\n" 171 "PageTables: %8lu kB\n"
172 "NFS Unstable: %8lu kB\n" 172 "NFS_Unstable: %8lu kB\n"
173 "Bounce: %8lu kB\n" 173 "Bounce: %8lu kB\n"
174 "CommitLimit: %8lu kB\n" 174 "CommitLimit: %8lu kB\n"
175 "Committed_AS: %8lu kB\n" 175 "Committed_AS: %8lu kB\n"
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 39fedaa88a0c..d935fb9394e3 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -424,7 +424,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
424 int res = -ENOTDIR; 424 int res = -ENOTDIR;
425 if (!file->f_op || !file->f_op->readdir) 425 if (!file->f_op || !file->f_op->readdir)
426 goto out; 426 goto out;
427 mutex_lock(&inode->i_mutex); 427 mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR);
428// down(&inode->i_zombie); 428// down(&inode->i_zombie);
429 res = -ENOENT; 429 res = -ENOENT;
430 if (!IS_DEADDIR(inode)) { 430 if (!IS_DEADDIR(inode)) {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 7de172efa084..fcce1a21a51b 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1659,7 +1659,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1659 iput(inode); 1659 iput(inode);
1660 goto error_out; 1660 goto error_out;
1661 } 1661 }
1662 sb->s_maxbytes = MAX_LFS_FILESIZE; 1662 sb->s_maxbytes = 1<<30;
1663 return 0; 1663 return 0;
1664 1664
1665error_out: 1665error_out:
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index e1b0e8cfecb4..0abd66ce36ea 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -239,37 +239,51 @@ void udf_truncate_extents(struct inode * inode)
239 { 239 {
240 if (offset) 240 if (offset)
241 { 241 {
242 extoffset -= adsize; 242 /*
243 etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1); 243 * OK, there is not extent covering inode->i_size and
244 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 244 * no extent above inode->i_size => truncate is
245 { 245 * extending the file by 'offset'.
246 extoffset -= adsize; 246 */
247 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset); 247 if ((!bh && extoffset == udf_file_entry_alloc_offset(inode)) ||
248 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0); 248 (bh && extoffset == sizeof(struct allocExtDesc))) {
249 /* File has no extents at all! */
250 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
251 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
252 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
249 } 253 }
250 else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 254 else {
251 {
252 kernel_lb_addr neloc = { 0, 0 };
253 extoffset -= adsize; 255 extoffset -= adsize;
254 nelen = EXT_NOT_RECORDED_NOT_ALLOCATED | 256 etype = udf_next_aext(inode, &bloc, &extoffset, &eloc, &elen, &bh, 1);
255 ((elen + offset + inode->i_sb->s_blocksize - 1) & 257 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
256 ~(inode->i_sb->s_blocksize - 1)); 258 {
257 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1); 259 extoffset -= adsize;
258 udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1); 260 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | (elen + offset);
259 } 261 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 0);
260 else 262 }
261 { 263 else if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
262 if (elen & (inode->i_sb->s_blocksize - 1))
263 { 264 {
265 kernel_lb_addr neloc = { 0, 0 };
264 extoffset -= adsize; 266 extoffset -= adsize;
265 elen = EXT_RECORDED_ALLOCATED | 267 nelen = EXT_NOT_RECORDED_NOT_ALLOCATED |
266 ((elen + inode->i_sb->s_blocksize - 1) & 268 ((elen + offset + inode->i_sb->s_blocksize - 1) &
267 ~(inode->i_sb->s_blocksize - 1)); 269 ~(inode->i_sb->s_blocksize - 1));
268 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1); 270 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
271 udf_add_aext(inode, &bloc, &extoffset, eloc, (etype << 30) | elen, &bh, 1);
272 }
273 else
274 {
275 if (elen & (inode->i_sb->s_blocksize - 1))
276 {
277 extoffset -= adsize;
278 elen = EXT_RECORDED_ALLOCATED |
279 ((elen + inode->i_sb->s_blocksize - 1) &
280 ~(inode->i_sb->s_blocksize - 1));
281 udf_write_aext(inode, bloc, &extoffset, eloc, elen, bh, 1);
282 }
283 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
284 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
285 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
269 } 286 }
270 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
271 elen = EXT_NOT_RECORDED_NOT_ALLOCATED | offset;
272 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &bh, 1);
273 } 287 }
274 } 288 }
275 } 289 }
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index e7c8615beb65..30c6e8a9446c 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -169,18 +169,20 @@ static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh)
169 169
170static struct buffer_head * 170static struct buffer_head *
171ufs_clear_frags(struct inode *inode, sector_t beg, 171ufs_clear_frags(struct inode *inode, sector_t beg,
172 unsigned int n) 172 unsigned int n, sector_t want)
173{ 173{
174 struct buffer_head *res, *bh; 174 struct buffer_head *res = NULL, *bh;
175 sector_t end = beg + n; 175 sector_t end = beg + n;
176 176
177 res = sb_getblk(inode->i_sb, beg); 177 for (; beg < end; ++beg) {
178 ufs_clear_frag(inode, res);
179 for (++beg; beg < end; ++beg) {
180 bh = sb_getblk(inode->i_sb, beg); 178 bh = sb_getblk(inode->i_sb, beg);
181 ufs_clear_frag(inode, bh); 179 ufs_clear_frag(inode, bh);
182 brelse(bh); 180 if (want != beg)
181 brelse(bh);
182 else
183 res = bh;
183 } 184 }
185 BUG_ON(!res);
184 return res; 186 return res;
185} 187}
186 188
@@ -265,7 +267,9 @@ repeat:
265 lastfrag = ufsi->i_lastfrag; 267 lastfrag = ufsi->i_lastfrag;
266 268
267 } 269 }
268 goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; 270 tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]);
271 if (tmp)
272 goal = tmp + uspi->s_fpb;
269 tmp = ufs_new_fragments (inode, p, fragment - blockoff, 273 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
270 goal, required + blockoff, 274 goal, required + blockoff,
271 err, locked_page); 275 err, locked_page);
@@ -277,13 +281,15 @@ repeat:
277 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), 281 tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff),
278 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), 282 fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff),
279 err, locked_page); 283 err, locked_page);
280 } 284 } else /* (lastblock > block) */ {
281 /* 285 /*
282 * We will allocate new block before last allocated block 286 * We will allocate new block before last allocated block
283 */ 287 */
284 else /* (lastblock > block) */ { 288 if (block) {
285 if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) 289 tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[block-1]);
286 goal = tmp + uspi->s_fpb; 290 if (tmp)
291 goal = tmp + uspi->s_fpb;
292 }
287 tmp = ufs_new_fragments(inode, p, fragment - blockoff, 293 tmp = ufs_new_fragments(inode, p, fragment - blockoff,
288 goal, uspi->s_fpb, err, locked_page); 294 goal, uspi->s_fpb, err, locked_page);
289 } 295 }
@@ -296,7 +302,7 @@ repeat:
296 } 302 }
297 303
298 if (!phys) { 304 if (!phys) {
299 result = ufs_clear_frags(inode, tmp + blockoff, required); 305 result = ufs_clear_frags(inode, tmp, required, tmp + blockoff);
300 } else { 306 } else {
301 *phys = tmp + blockoff; 307 *phys = tmp + blockoff;
302 result = NULL; 308 result = NULL;
@@ -383,7 +389,7 @@ repeat:
383 } 389 }
384 } 390 }
385 391
386 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1]) + uspi->s_fpb)) 392 if (block && (tmp = fs32_to_cpu(sb, ((__fs32*)bh->b_data)[block-1])))
387 goal = tmp + uspi->s_fpb; 393 goal = tmp + uspi->s_fpb;
388 else 394 else
389 goal = bh->b_blocknr + uspi->s_fpb; 395 goal = bh->b_blocknr + uspi->s_fpb;
@@ -397,7 +403,8 @@ repeat:
397 403
398 404
399 if (!phys) { 405 if (!phys) {
400 result = ufs_clear_frags(inode, tmp + blockoff, uspi->s_fpb); 406 result = ufs_clear_frags(inode, tmp, uspi->s_fpb,
407 tmp + blockoff);
401 } else { 408 } else {
402 *phys = tmp + blockoff; 409 *phys = tmp + blockoff;
403 *new = 1; 410 *new = 1;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index c9b55872079b..ea11d04c41a0 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -375,17 +375,15 @@ static int ufs_alloc_lastblock(struct inode *inode)
375 int err = 0; 375 int err = 0;
376 struct address_space *mapping = inode->i_mapping; 376 struct address_space *mapping = inode->i_mapping;
377 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; 377 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
378 struct ufs_inode_info *ufsi = UFS_I(inode);
379 unsigned lastfrag, i, end; 378 unsigned lastfrag, i, end;
380 struct page *lastpage; 379 struct page *lastpage;
381 struct buffer_head *bh; 380 struct buffer_head *bh;
382 381
383 lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; 382 lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift;
384 383
385 if (!lastfrag) { 384 if (!lastfrag)
386 ufsi->i_lastfrag = 0;
387 goto out; 385 goto out;
388 } 386
389 lastfrag--; 387 lastfrag--;
390 388
391 lastpage = ufs_get_locked_page(mapping, lastfrag >> 389 lastpage = ufs_get_locked_page(mapping, lastfrag >>
@@ -400,25 +398,25 @@ static int ufs_alloc_lastblock(struct inode *inode)
400 for (i = 0; i < end; ++i) 398 for (i = 0; i < end; ++i)
401 bh = bh->b_this_page; 399 bh = bh->b_this_page;
402 400
403 if (!buffer_mapped(bh)) { 401
404 err = ufs_getfrag_block(inode, lastfrag, bh, 1); 402 err = ufs_getfrag_block(inode, lastfrag, bh, 1);
405 403
406 if (unlikely(err)) 404 if (unlikely(err))
407 goto out_unlock; 405 goto out_unlock;
408 406
409 if (buffer_new(bh)) { 407 if (buffer_new(bh)) {
410 clear_buffer_new(bh); 408 clear_buffer_new(bh);
411 unmap_underlying_metadata(bh->b_bdev, 409 unmap_underlying_metadata(bh->b_bdev,
412 bh->b_blocknr); 410 bh->b_blocknr);
413 /* 411 /*
414 * we do not zeroize fragment, because of 412 * we do not zeroize fragment, because of
415 * if it maped to hole, it already contains zeroes 413 * if it maped to hole, it already contains zeroes
416 */ 414 */
417 set_buffer_uptodate(bh); 415 set_buffer_uptodate(bh);
418 mark_buffer_dirty(bh); 416 mark_buffer_dirty(bh);
419 set_page_dirty(lastpage); 417 set_page_dirty(lastpage);
420 }
421 } 418 }
419
422out_unlock: 420out_unlock:
423 ufs_put_locked_page(lastpage); 421 ufs_put_locked_page(lastpage);
424out: 422out:
@@ -440,23 +438,11 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
440 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 438 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
441 return -EPERM; 439 return -EPERM;
442 440
443 if (inode->i_size > old_i_size) { 441 err = ufs_alloc_lastblock(inode);
444 /*
445 * if we expand file we should care about
446 * allocation of block for last byte first of all
447 */
448 err = ufs_alloc_lastblock(inode);
449 442
450 if (err) { 443 if (err) {
451 i_size_write(inode, old_i_size); 444 i_size_write(inode, old_i_size);
452 goto out; 445 goto out;
453 }
454 /*
455 * go away, because of we expand file, and we do not
456 * need free blocks, and zeroizes page
457 */
458 lock_kernel();
459 goto almost_end;
460 } 446 }
461 447
462 block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block); 448 block_truncate_page(inode->i_mapping, inode->i_size, ufs_getfrag_block);
@@ -477,21 +463,8 @@ int ufs_truncate(struct inode *inode, loff_t old_i_size)
477 yield(); 463 yield();
478 } 464 }
479 465
480 if (inode->i_size < old_i_size) {
481 /*
482 * now we should have enough space
483 * to allocate block for last byte
484 */
485 err = ufs_alloc_lastblock(inode);
486 if (err)
487 /*
488 * looks like all the same - we have no space,
489 * but we truncate file already
490 */
491 inode->i_size = (ufsi->i_lastfrag - 1) * uspi->s_fsize;
492 }
493almost_end:
494 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 466 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
467 ufsi->i_lastfrag = DIRECT_FRAGMENT;
495 unlock_kernel(); 468 unlock_kernel();
496 mark_inode_dirty(inode); 469 mark_inode_dirty(inode);
497out: 470out:
diff --git a/include/asm-arm/arch-s3c2410/dma.h b/include/asm-arm/arch-s3c2410/dma.h
index 72964f9b8414..7463fd5252ce 100644
--- a/include/asm-arm/arch-s3c2410/dma.h
+++ b/include/asm-arm/arch-s3c2410/dma.h
@@ -104,6 +104,7 @@ enum s3c2410_chan_op_e {
104 S3C2410_DMAOP_RESUME, 104 S3C2410_DMAOP_RESUME,
105 S3C2410_DMAOP_FLUSH, 105 S3C2410_DMAOP_FLUSH,
106 S3C2410_DMAOP_TIMEOUT, /* internal signal to handler */ 106 S3C2410_DMAOP_TIMEOUT, /* internal signal to handler */
107 S3C2410_DMAOP_STARTED, /* indicate channel started */
107}; 108};
108 109
109typedef enum s3c2410_chan_op_e s3c2410_chan_op_t; 110typedef enum s3c2410_chan_op_e s3c2410_chan_op_t;
diff --git a/include/asm-arm/arch-s3c2410/regs-rtc.h b/include/asm-arm/arch-s3c2410/regs-rtc.h
index 228983f89bc8..0fbec07bb6b8 100644
--- a/include/asm-arm/arch-s3c2410/regs-rtc.h
+++ b/include/asm-arm/arch-s3c2410/regs-rtc.h
@@ -18,7 +18,7 @@
18#ifndef __ASM_ARCH_REGS_RTC_H 18#ifndef __ASM_ARCH_REGS_RTC_H
19#define __ASM_ARCH_REGS_RTC_H __FILE__ 19#define __ASM_ARCH_REGS_RTC_H __FILE__
20 20
21#define S3C2410_RTCREG(x) ((x) + S3C24XX_VA_RTC) 21#define S3C2410_RTCREG(x) (x)
22 22
23#define S3C2410_RTCCON S3C2410_RTCREG(0x40) 23#define S3C2410_RTCCON S3C2410_RTCREG(0x40)
24#define S3C2410_RTCCON_RTCEN (1<<0) 24#define S3C2410_RTCCON_RTCEN (1<<0)
diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h
index edb7b6502fcf..91a31adfa8a8 100644
--- a/include/asm-arm/procinfo.h
+++ b/include/asm-arm/procinfo.h
@@ -55,5 +55,6 @@ extern unsigned int elf_hwcap;
55#define HWCAP_VFP 64 55#define HWCAP_VFP 64
56#define HWCAP_EDSP 128 56#define HWCAP_EDSP 128
57#define HWCAP_JAVA 256 57#define HWCAP_JAVA 256
58#define HWCAP_IWMMXT 512
58 59
59#endif 60#endif
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index e33e9f9e4c66..22cb07cc8f32 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -14,7 +14,7 @@ extern struct pglist_data *node_data[];
14 14
15#ifdef CONFIG_X86_NUMAQ 15#ifdef CONFIG_X86_NUMAQ
16 #include <asm/numaq.h> 16 #include <asm/numaq.h>
17#else /* summit or generic arch */ 17#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */
18 #include <asm/srat.h> 18 #include <asm/srat.h>
19#endif 19#endif
20 20
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index 9f0917c68659..ae63db7b3e7d 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -117,7 +117,7 @@ static inline void pte_free(struct page *ptepage)
117 pte_free_kernel(page_address(ptepage)); 117 pte_free_kernel(page_address(ptepage));
118} 118}
119 119
120#define PGF_CACHENUM_MASK 0xf 120#define PGF_CACHENUM_MASK 0x3
121 121
122typedef struct pgtable_free { 122typedef struct pgtable_free {
123 unsigned long val; 123 unsigned long val;
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 7307aa775671..4c9f5229e833 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -53,6 +53,15 @@
53#define smp_read_barrier_depends() do { } while(0) 53#define smp_read_barrier_depends() do { } while(0)
54#endif /* CONFIG_SMP */ 54#endif /* CONFIG_SMP */
55 55
56/*
57 * This is a barrier which prevents following instructions from being
58 * started until the value of the argument x is known. For example, if
59 * x is a variable loaded from memory, this prevents following
60 * instructions from being executed until the load has been performed.
61 */
62#define data_barrier(x) \
63 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
64
56struct task_struct; 65struct task_struct;
57struct pt_regs; 66struct pt_regs;
58 67
diff --git a/include/asm-powerpc/tsi108.h b/include/asm-powerpc/tsi108.h
index c4c278d72f71..2c702d35a7cf 100644
--- a/include/asm-powerpc/tsi108.h
+++ b/include/asm-powerpc/tsi108.h
@@ -1,16 +1,18 @@
1/* 1/*
2 * include/asm-ppc/tsi108.h
3 *
4 * common routine and memory layout for Tundra TSI108(Grendel) host bridge 2 * common routine and memory layout for Tundra TSI108(Grendel) host bridge
5 * memory controller. 3 * memory controller.
6 * 4 *
7 * Author: Jacob Pan (jacob.pan@freescale.com) 5 * Author: Jacob Pan (jacob.pan@freescale.com)
8 * Alex Bounine (alexandreb@tundra.com) 6 * Alex Bounine (alexandreb@tundra.com)
9 * 2004 (c) Freescale Semiconductor Inc. This file is licensed under 7 *
10 * the terms of the GNU General Public License version 2. This program 8 * Copyright 2004-2006 Freescale Semiconductor, Inc.
11 * is licensed "as is" without any warranty of any kind, whether express 9 *
12 * or implied. 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
13 */ 14 */
15
14#ifndef __PPC_KERNEL_TSI108_H 16#ifndef __PPC_KERNEL_TSI108_H
15#define __PPC_KERNEL_TSI108_H 17#define __PPC_KERNEL_TSI108_H
16 18
diff --git a/include/asm-powerpc/tsi108_irq.h b/include/asm-powerpc/tsi108_irq.h
new file mode 100644
index 000000000000..3e4d04effa57
--- /dev/null
+++ b/include/asm-powerpc/tsi108_irq.h
@@ -0,0 +1,124 @@
1/*
2 * (C) Copyright 2005 Tundra Semiconductor Corp.
3 * Alex Bounine, <alexandreb at tundra.com).
4 *
5 * See file CREDITS for list of people who contributed to this
6 * project.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24/*
25 * definitions for interrupt controller initialization and external interrupt
26 * demultiplexing on TSI108EMU/SVB boards.
27 */
28
29#ifndef _ASM_PPC_TSI108_IRQ_H
30#define _ASM_PPC_TSI108_IRQ_H
31
32/*
33 * Tsi108 interrupts
34 */
35#ifndef TSI108_IRQ_REG_BASE
36#define TSI108_IRQ_REG_BASE 0
37#endif
38
39#define TSI108_IRQ(x) (TSI108_IRQ_REG_BASE + (x))
40
41#define TSI108_MAX_VECTORS (36 + 4) /* 36 sources + PCI INT demux */
42#define MAX_TASK_PRIO 0xF
43
44#define TSI108_IRQ_SPURIOUS (TSI108_MAX_VECTORS)
45
46#define DEFAULT_PRIO_LVL 10 /* initial priority level */
47
48/* Interrupt vectors assignment to external and internal
49 * sources of requests. */
50
51/* EXTERNAL INTERRUPT SOURCES */
52
53#define IRQ_TSI108_EXT_INT0 TSI108_IRQ(0) /* External Source at INT[0] */
54#define IRQ_TSI108_EXT_INT1 TSI108_IRQ(1) /* External Source at INT[1] */
55#define IRQ_TSI108_EXT_INT2 TSI108_IRQ(2) /* External Source at INT[2] */
56#define IRQ_TSI108_EXT_INT3 TSI108_IRQ(3) /* External Source at INT[3] */
57
58/* INTERNAL INTERRUPT SOURCES */
59
60#define IRQ_TSI108_RESERVED0 TSI108_IRQ(4) /* Reserved IRQ */
61#define IRQ_TSI108_RESERVED1 TSI108_IRQ(5) /* Reserved IRQ */
62#define IRQ_TSI108_RESERVED2 TSI108_IRQ(6) /* Reserved IRQ */
63#define IRQ_TSI108_RESERVED3 TSI108_IRQ(7) /* Reserved IRQ */
64#define IRQ_TSI108_DMA0 TSI108_IRQ(8) /* DMA0 */
65#define IRQ_TSI108_DMA1 TSI108_IRQ(9) /* DMA1 */
66#define IRQ_TSI108_DMA2 TSI108_IRQ(10) /* DMA2 */
67#define IRQ_TSI108_DMA3 TSI108_IRQ(11) /* DMA3 */
68#define IRQ_TSI108_UART0 TSI108_IRQ(12) /* UART0 */
69#define IRQ_TSI108_UART1 TSI108_IRQ(13) /* UART1 */
70#define IRQ_TSI108_I2C TSI108_IRQ(14) /* I2C */
71#define IRQ_TSI108_GPIO TSI108_IRQ(15) /* GPIO */
72#define IRQ_TSI108_GIGE0 TSI108_IRQ(16) /* GIGE0 */
73#define IRQ_TSI108_GIGE1 TSI108_IRQ(17) /* GIGE1 */
74#define IRQ_TSI108_RESERVED4 TSI108_IRQ(18) /* Reserved IRQ */
75#define IRQ_TSI108_HLP TSI108_IRQ(19) /* HLP */
76#define IRQ_TSI108_SDRAM TSI108_IRQ(20) /* SDC */
77#define IRQ_TSI108_PROC_IF TSI108_IRQ(21) /* Processor IF */
78#define IRQ_TSI108_RESERVED5 TSI108_IRQ(22) /* Reserved IRQ */
79#define IRQ_TSI108_PCI TSI108_IRQ(23) /* PCI/X block */
80
81#define IRQ_TSI108_MBOX0 TSI108_IRQ(24) /* Mailbox 0 register */
82#define IRQ_TSI108_MBOX1 TSI108_IRQ(25) /* Mailbox 1 register */
83#define IRQ_TSI108_MBOX2 TSI108_IRQ(26) /* Mailbox 2 register */
84#define IRQ_TSI108_MBOX3 TSI108_IRQ(27) /* Mailbox 3 register */
85
86#define IRQ_TSI108_DBELL0 TSI108_IRQ(28) /* Doorbell 0 */
87#define IRQ_TSI108_DBELL1 TSI108_IRQ(29) /* Doorbell 1 */
88#define IRQ_TSI108_DBELL2 TSI108_IRQ(30) /* Doorbell 2 */
89#define IRQ_TSI108_DBELL3 TSI108_IRQ(31) /* Doorbell 3 */
90
91#define IRQ_TSI108_TIMER0 TSI108_IRQ(32) /* Global Timer 0 */
92#define IRQ_TSI108_TIMER1 TSI108_IRQ(33) /* Global Timer 1 */
93#define IRQ_TSI108_TIMER2 TSI108_IRQ(34) /* Global Timer 2 */
94#define IRQ_TSI108_TIMER3 TSI108_IRQ(35) /* Global Timer 3 */
95
96/*
97 * PCI bus INTA# - INTD# lines demultiplexor
98 */
99#define IRQ_PCI_INTAD_BASE TSI108_IRQ(36)
100#define IRQ_PCI_INTA (IRQ_PCI_INTAD_BASE + 0)
101#define IRQ_PCI_INTB (IRQ_PCI_INTAD_BASE + 1)
102#define IRQ_PCI_INTC (IRQ_PCI_INTAD_BASE + 2)
103#define IRQ_PCI_INTD (IRQ_PCI_INTAD_BASE + 3)
104#define NUM_PCI_IRQS (4)
105
106/* number of entries in vector dispatch table */
107#define IRQ_TSI108_TAB_SIZE (TSI108_MAX_VECTORS + 1)
108
109/* Mapping of MPIC outputs to processors' interrupt pins */
110
111#define IDIR_INT_OUT0 0x1
112#define IDIR_INT_OUT1 0x2
113#define IDIR_INT_OUT2 0x4
114#define IDIR_INT_OUT3 0x8
115
116/*---------------------------------------------------------------
117 * IRQ line configuration parameters */
118
119/* Interrupt delivery modes */
120typedef enum {
121 TSI108_IRQ_DIRECTED,
122 TSI108_IRQ_DISTRIBUTED,
123} TSI108_IRQ_MODE;
124#endif /* _ASM_PPC_TSI108_IRQ_H */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 1ba19eb34ce3..ebfe395cfb87 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -234,7 +234,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
234 sz_bits = 0UL; 234 sz_bits = 0UL;
235 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) { 235 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
236 __asm__ __volatile__( 236 __asm__ __volatile__(
237 "\n661: sethi %uhi(%1), %0\n" 237 "\n661: sethi %%uhi(%1), %0\n"
238 " sllx %0, 32, %0\n" 238 " sllx %0, 32, %0\n"
239 " .section .sun4v_2insn_patch, \"ax\"\n" 239 " .section .sun4v_2insn_patch, \"ax\"\n"
240 " .word 661b\n" 240 " .word 661b\n"
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 269d000bb2a3..bea0255196c4 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -216,6 +216,7 @@ COMPATIBLE_IOCTL(VT_RESIZE)
216COMPATIBLE_IOCTL(VT_RESIZEX) 216COMPATIBLE_IOCTL(VT_RESIZEX)
217COMPATIBLE_IOCTL(VT_LOCKSWITCH) 217COMPATIBLE_IOCTL(VT_LOCKSWITCH)
218COMPATIBLE_IOCTL(VT_UNLOCKSWITCH) 218COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
219COMPATIBLE_IOCTL(VT_GETHIFONTMASK)
219/* Little p (/dev/rtc, /dev/envctrl, etc.) */ 220/* Little p (/dev/rtc, /dev/envctrl, etc.) */
220COMPATIBLE_IOCTL(RTC_AIE_ON) 221COMPATIBLE_IOCTL(RTC_AIE_ON)
221COMPATIBLE_IOCTL(RTC_AIE_OFF) 222COMPATIBLE_IOCTL(RTC_AIE_OFF)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 25610205c90d..555bc195c420 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -570,13 +570,14 @@ struct inode {
570 * 3: quota file 570 * 3: quota file
571 * 571 *
572 * The locking order between these classes is 572 * The locking order between these classes is
573 * parent -> child -> normal -> quota 573 * parent -> child -> normal -> xattr -> quota
574 */ 574 */
575enum inode_i_mutex_lock_class 575enum inode_i_mutex_lock_class
576{ 576{
577 I_MUTEX_NORMAL, 577 I_MUTEX_NORMAL,
578 I_MUTEX_PARENT, 578 I_MUTEX_PARENT,
579 I_MUTEX_CHILD, 579 I_MUTEX_CHILD,
580 I_MUTEX_XATTR,
580 I_MUTEX_QUOTA 581 I_MUTEX_QUOTA
581}; 582};
582 583
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 88d5961f7a3f..8e2042b9d471 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -59,27 +59,6 @@ static inline int task_nice_ioprio(struct task_struct *task)
59/* 59/*
60 * For inheritance, return the highest of the two given priorities 60 * For inheritance, return the highest of the two given priorities
61 */ 61 */
62static inline int ioprio_best(unsigned short aprio, unsigned short bprio) 62extern int ioprio_best(unsigned short aprio, unsigned short bprio);
63{
64 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
65 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
66
67 if (!ioprio_valid(aprio))
68 return bprio;
69 if (!ioprio_valid(bprio))
70 return aprio;
71
72 if (aclass == IOPRIO_CLASS_NONE)
73 aclass = IOPRIO_CLASS_BE;
74 if (bclass == IOPRIO_CLASS_NONE)
75 bclass = IOPRIO_CLASS_BE;
76
77 if (aclass == bclass)
78 return min(aprio, bprio);
79 if (aclass > bclass)
80 return bprio;
81 else
82 return aprio;
83}
84 63
85#endif 64#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 20eb34403d0c..a04c154c5207 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -72,6 +72,9 @@ extern int journal_enable_debug;
72#endif 72#endif
73 73
74extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); 74extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
75extern void * jbd_slab_alloc(size_t size, gfp_t flags);
76extern void jbd_slab_free(void *ptr, size_t size);
77
75#define jbd_kmalloc(size, flags) \ 78#define jbd_kmalloc(size, flags) \
76 __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) 79 __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
77#define jbd_rep_kmalloc(size, flags) \ 80#define jbd_rep_kmalloc(size, flags) \
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 10c13dc4665b..427c67ff89e9 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -48,15 +48,25 @@ enum nf_br_hook_priorities {
48 48
49/* Only used in br_forward.c */ 49/* Only used in br_forward.c */
50static inline 50static inline
51void nf_bridge_maybe_copy_header(struct sk_buff *skb) 51int nf_bridge_maybe_copy_header(struct sk_buff *skb)
52{ 52{
53 int err;
54
53 if (skb->nf_bridge) { 55 if (skb->nf_bridge) {
54 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 56 if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
57 err = skb_cow(skb, 18);
58 if (err)
59 return err;
55 memcpy(skb->data - 18, skb->nf_bridge->data, 18); 60 memcpy(skb->data - 18, skb->nf_bridge->data, 18);
56 skb_push(skb, 4); 61 skb_push(skb, 4);
57 } else 62 } else {
63 err = skb_cow(skb, 16);
64 if (err)
65 return err;
58 memcpy(skb->data - 16, skb->nf_bridge->data, 16); 66 memcpy(skb->data - 16, skb->nf_bridge->data, 16);
67 }
59 } 68 }
69 return 0;
60} 70}
61 71
62/* This is called by the IP fragmenting code and it ensures there is 72/* This is called by the IP fragmenting code and it ensures there is
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 2d3fb6416d91..db9cbf68e12b 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -659,7 +659,7 @@ struct nfs4_rename_res {
659struct nfs4_setclientid { 659struct nfs4_setclientid {
660 const nfs4_verifier * sc_verifier; /* request */ 660 const nfs4_verifier * sc_verifier; /* request */
661 unsigned int sc_name_len; 661 unsigned int sc_name_len;
662 char sc_name[32]; /* request */ 662 char sc_name[48]; /* request */
663 u32 sc_prog; /* request */ 663 u32 sc_prog; /* request */
664 unsigned int sc_netid_len; 664 unsigned int sc_netid_len;
665 char sc_netid[4]; /* request */ 665 char sc_netid[4]; /* request */
diff --git a/include/linux/node.h b/include/linux/node.h
index 81dcec84cd8f..bc001bc225c3 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -30,12 +30,20 @@ extern struct node node_devices[];
30 30
31extern int register_node(struct node *, int, struct node *); 31extern int register_node(struct node *, int, struct node *);
32extern void unregister_node(struct node *node); 32extern void unregister_node(struct node *node);
33#ifdef CONFIG_NUMA
33extern int register_one_node(int nid); 34extern int register_one_node(int nid);
34extern void unregister_one_node(int nid); 35extern void unregister_one_node(int nid);
35#ifdef CONFIG_NUMA
36extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); 36extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
37extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); 37extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
38#else 38#else
39static inline int register_one_node(int nid)
40{
41 return 0;
42}
43static inline int unregister_one_node(int nid)
44{
45 return 0;
46}
39static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) 47static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
40{ 48{
41 return 0; 49 return 0;
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index 2c2189cb30aa..a481472c9484 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -42,9 +42,9 @@ RPC_I(struct inode *inode)
42extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); 42extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
43 43
44extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *); 44extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *);
45extern int rpc_rmdir(char *); 45extern int rpc_rmdir(struct dentry *);
46extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags); 46extern struct dentry *rpc_mkpipe(char *, void *, struct rpc_pipe_ops *, int flags);
47extern int rpc_unlink(char *); 47extern int rpc_unlink(struct dentry *);
48extern struct vfsmount *rpc_get_mount(void); 48extern struct vfsmount *rpc_get_mount(void);
49extern void rpc_put_mount(void); 49extern void rpc_put_mount(void);
50 50
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 840e47a4ccc5..3a0cca255b76 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -37,7 +37,7 @@ extern unsigned int xprt_max_resvport;
37 37
38#define RPC_MIN_RESVPORT (1U) 38#define RPC_MIN_RESVPORT (1U)
39#define RPC_MAX_RESVPORT (65535U) 39#define RPC_MAX_RESVPORT (65535U)
40#define RPC_DEF_MIN_RESVPORT (650U) 40#define RPC_DEF_MIN_RESVPORT (665U)
41#define RPC_DEF_MAX_RESVPORT (1023U) 41#define RPC_DEF_MAX_RESVPORT (1023U)
42 42
43/* 43/*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e421d5e34818..04827ca65781 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -59,6 +59,7 @@ struct tty_bufhead {
59 struct tty_buffer *head; /* Queue head */ 59 struct tty_buffer *head; /* Queue head */
60 struct tty_buffer *tail; /* Active buffer */ 60 struct tty_buffer *tail; /* Active buffer */
61 struct tty_buffer *free; /* Free queue head */ 61 struct tty_buffer *free; /* Free queue head */
62 int memory_used; /* Buffer space used excluding free queue */
62}; 63};
63/* 64/*
64 * The pty uses char_buf and flag_buf as a contiguous buffer 65 * The pty uses char_buf and flag_buf as a contiguous buffer
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 8ab334a48222..ba806e8711be 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -60,5 +60,6 @@ struct vt_consize {
60#define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */ 60#define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */
61#define VT_LOCKSWITCH 0x560B /* disallow vt switching */ 61#define VT_LOCKSWITCH 0x560B /* disallow vt switching */
62#define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ 62#define VT_UNLOCKSWITCH 0x560C /* allow vt switching */
63#define VT_GETHIFONTMASK 0x560D /* return hi font mask */
63 64
64#endif /* _LINUX_VT_H */ 65#endif /* _LINUX_VT_H */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a9663b49ea54..92eae0e0f3f1 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -404,19 +404,6 @@ static inline int sctp_list_single_entry(struct list_head *head)
404 return ((head->next != head) && (head->next == head->prev)); 404 return ((head->next != head) && (head->next == head->prev));
405} 405}
406 406
407/* Calculate the size (in bytes) occupied by the data of an iovec. */
408static inline size_t get_user_iov_size(struct iovec *iov, int iovlen)
409{
410 size_t retval = 0;
411
412 for (; iovlen > 0; --iovlen) {
413 retval += iov->iov_len;
414 iov++;
415 }
416
417 return retval;
418}
419
420/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ 407/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
421static inline __s32 sctp_jitter(__u32 rto) 408static inline __s32 sctp_jitter(__u32 rto)
422{ 409{
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 1eac3d0eb7a9..de313de4fefe 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -221,8 +221,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *,
221 const struct sctp_chunk *, 221 const struct sctp_chunk *,
222 __u32 tsn); 222 __u32 tsn);
223struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, 223struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *,
224 const struct sctp_chunk *, 224 const struct msghdr *, size_t msg_len);
225 const struct msghdr *);
226struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, 225struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *,
227 const struct sctp_chunk *, 226 const struct sctp_chunk *,
228 const __u8 *, 227 const __u8 *,
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index ba2760802ded..41904f611d12 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -60,6 +60,7 @@ struct iscsi_nopin;
60#define TMABORT_SUCCESS 0x1 60#define TMABORT_SUCCESS 0x1
61#define TMABORT_FAILED 0x2 61#define TMABORT_FAILED 0x2
62#define TMABORT_TIMEDOUT 0x3 62#define TMABORT_TIMEDOUT 0x3
63#define TMABORT_NOT_FOUND 0x4
63 64
64/* Connection suspend "bit" */ 65/* Connection suspend "bit" */
65#define ISCSI_SUSPEND_BIT 1 66#define ISCSI_SUSPEND_BIT 1
@@ -83,6 +84,12 @@ struct iscsi_mgmt_task {
83 struct list_head running; 84 struct list_head running;
84}; 85};
85 86
87enum {
88 ISCSI_TASK_COMPLETED,
89 ISCSI_TASK_PENDING,
90 ISCSI_TASK_RUNNING,
91};
92
86struct iscsi_cmd_task { 93struct iscsi_cmd_task {
87 /* 94 /*
88 * Becuae LLDs allocate their hdr differently, this is a pointer to 95 * Becuae LLDs allocate their hdr differently, this is a pointer to
@@ -101,6 +108,8 @@ struct iscsi_cmd_task {
101 struct iscsi_conn *conn; /* used connection */ 108 struct iscsi_conn *conn; /* used connection */
102 struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */ 109 struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */
103 110
111 /* state set/tested under session->lock */
112 int state;
104 struct list_head running; /* running cmd list */ 113 struct list_head running; /* running cmd list */
105 void *dd_data; /* driver/transport data */ 114 void *dd_data; /* driver/transport data */
106}; 115};
@@ -126,6 +135,14 @@ struct iscsi_conn {
126 int id; /* CID */ 135 int id; /* CID */
127 struct list_head item; /* maintains list of conns */ 136 struct list_head item; /* maintains list of conns */
128 int c_stage; /* connection state */ 137 int c_stage; /* connection state */
138 /*
139 * Preallocated buffer for pdus that have data but do not
140 * originate from scsi-ml. We never have two pdus using the
141 * buffer at the same time. It is only allocated to
142 * the default max recv size because the pdus we support
143 * should always fit in this buffer
144 */
145 char *data;
129 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */ 146 struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
130 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */ 147 struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
131 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */ 148 struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
@@ -134,7 +151,7 @@ struct iscsi_conn {
134 struct kfifo *immqueue; /* immediate xmit queue */ 151 struct kfifo *immqueue; /* immediate xmit queue */
135 struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */ 152 struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */
136 struct list_head mgmt_run_list; /* list of control tasks */ 153 struct list_head mgmt_run_list; /* list of control tasks */
137 struct kfifo *xmitqueue; /* data-path cmd queue */ 154 struct list_head xmitqueue; /* data-path cmd queue */
138 struct list_head run_list; /* list of cmds in progress */ 155 struct list_head run_list; /* list of cmds in progress */
139 struct work_struct xmitwork; /* per-conn. xmit workqueue */ 156 struct work_struct xmitwork; /* per-conn. xmit workqueue */
140 /* 157 /*
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 5a3df1d7085f..39e833260bd0 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -57,8 +57,6 @@ struct sockaddr;
57 * @stop_conn: suspend/recover/terminate connection 57 * @stop_conn: suspend/recover/terminate connection
58 * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text. 58 * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
59 * @session_recovery_timedout: notify LLD a block during recovery timed out 59 * @session_recovery_timedout: notify LLD a block during recovery timed out
60 * @suspend_conn_recv: susepend the recv side of the connection
61 * @termincate_conn: destroy socket connection. Called with mutex lock.
62 * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs. 60 * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
63 * Called from queuecommand with session lock held. 61 * Called from queuecommand with session lock held.
64 * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs. 62 * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
@@ -112,8 +110,6 @@ struct iscsi_transport {
112 char *data, uint32_t data_size); 110 char *data, uint32_t data_size);
113 void (*get_stats) (struct iscsi_cls_conn *conn, 111 void (*get_stats) (struct iscsi_cls_conn *conn,
114 struct iscsi_stats *stats); 112 struct iscsi_stats *stats);
115 void (*suspend_conn_recv) (struct iscsi_conn *conn);
116 void (*terminate_conn) (struct iscsi_conn *conn);
117 void (*init_cmd_task) (struct iscsi_cmd_task *ctask); 113 void (*init_cmd_task) (struct iscsi_cmd_task *ctask);
118 void (*init_mgmt_task) (struct iscsi_conn *conn, 114 void (*init_mgmt_task) (struct iscsi_conn *conn,
119 struct iscsi_mgmt_task *mtask, 115 struct iscsi_mgmt_task *mtask,
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1a649f2bb9bb..4ea6f0dc2fc5 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -816,6 +816,10 @@ static int update_cpumask(struct cpuset *cs, char *buf)
816 struct cpuset trialcs; 816 struct cpuset trialcs;
817 int retval, cpus_unchanged; 817 int retval, cpus_unchanged;
818 818
819 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
820 if (cs == &top_cpuset)
821 return -EACCES;
822
819 trialcs = *cs; 823 trialcs = *cs;
820 retval = cpulist_parse(buf, trialcs.cpus_allowed); 824 retval = cpulist_parse(buf, trialcs.cpus_allowed);
821 if (retval < 0) 825 if (retval < 0)
@@ -2033,6 +2037,33 @@ out:
2033 return err; 2037 return err;
2034} 2038}
2035 2039
2040/*
2041 * The top_cpuset tracks what CPUs and Memory Nodes are online,
2042 * period. This is necessary in order to make cpusets transparent
2043 * (of no affect) on systems that are actively using CPU hotplug
2044 * but making no active use of cpusets.
2045 *
2046 * This handles CPU hotplug (cpuhp) events. If someday Memory
2047 * Nodes can be hotplugged (dynamically changing node_online_map)
2048 * then we should handle that too, perhaps in a similar way.
2049 */
2050
2051#ifdef CONFIG_HOTPLUG_CPU
2052static int cpuset_handle_cpuhp(struct notifier_block *nb,
2053 unsigned long phase, void *cpu)
2054{
2055 mutex_lock(&manage_mutex);
2056 mutex_lock(&callback_mutex);
2057
2058 top_cpuset.cpus_allowed = cpu_online_map;
2059
2060 mutex_unlock(&callback_mutex);
2061 mutex_unlock(&manage_mutex);
2062
2063 return 0;
2064}
2065#endif
2066
2036/** 2067/**
2037 * cpuset_init_smp - initialize cpus_allowed 2068 * cpuset_init_smp - initialize cpus_allowed
2038 * 2069 *
@@ -2043,6 +2074,8 @@ void __init cpuset_init_smp(void)
2043{ 2074{
2044 top_cpuset.cpus_allowed = cpu_online_map; 2075 top_cpuset.cpus_allowed = cpu_online_map;
2045 top_cpuset.mems_allowed = node_online_map; 2076 top_cpuset.mems_allowed = node_online_map;
2077
2078 hotcpu_notifier(cpuset_handle_cpuhp, 0);
2046} 2079}
2047 2080
2048/** 2081/**
@@ -2387,7 +2420,7 @@ EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2387int cpuset_excl_nodes_overlap(const struct task_struct *p) 2420int cpuset_excl_nodes_overlap(const struct task_struct *p)
2388{ 2421{
2389 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ 2422 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
2390 int overlap = 0; /* do cpusets overlap? */ 2423 int overlap = 1; /* do cpusets overlap? */
2391 2424
2392 task_lock(current); 2425 task_lock(current);
2393 if (current->flags & PF_EXITING) { 2426 if (current->flags & PF_EXITING) {
diff --git a/kernel/futex.c b/kernel/futex.c
index d4633c588f33..b9b8aea5389e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -397,7 +397,7 @@ static struct task_struct * futex_find_get_task(pid_t pid)
397 p = NULL; 397 p = NULL;
398 goto out_unlock; 398 goto out_unlock;
399 } 399 }
400 if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) { 400 if (p->exit_state != 0) {
401 p = NULL; 401 p = NULL;
402 goto out_unlock; 402 goto out_unlock;
403 } 403 }
diff --git a/kernel/sched.c b/kernel/sched.c
index a2be2d055299..a234fbee1238 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4162,10 +4162,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4162 read_unlock_irq(&tasklist_lock); 4162 read_unlock_irq(&tasklist_lock);
4163 return -ESRCH; 4163 return -ESRCH;
4164 } 4164 }
4165 get_task_struct(p);
4166 read_unlock_irq(&tasklist_lock);
4167 retval = sched_setscheduler(p, policy, &lparam); 4165 retval = sched_setscheduler(p, policy, &lparam);
4168 put_task_struct(p); 4166 read_unlock_irq(&tasklist_lock);
4169 4167
4170 return retval; 4168 return retval;
4171} 4169}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index dcfb5d731466..51cacd111dbd 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -111,7 +111,6 @@ static int stop_machine(void)
111 /* If some failed, kill them all. */ 111 /* If some failed, kill them all. */
112 if (ret < 0) { 112 if (ret < 0) {
113 stopmachine_set_state(STOPMACHINE_EXIT); 113 stopmachine_set_state(STOPMACHINE_EXIT);
114 up(&stopmachine_mutex);
115 return ret; 114 return ret;
116 } 115 }
117 116
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 0110e4414805..d90822c378a4 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -111,15 +111,14 @@ static int subpattern(u8 *pattern, int i, int j, int g)
111 return ret; 111 return ret;
112} 112}
113 113
114static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, 114static void compute_prefix_tbl(struct ts_bm *bm)
115 unsigned int len)
116{ 115{
117 int i, j, g; 116 int i, j, g;
118 117
119 for (i = 0; i < ASIZE; i++) 118 for (i = 0; i < ASIZE; i++)
120 bm->bad_shift[i] = len; 119 bm->bad_shift[i] = bm->patlen;
121 for (i = 0; i < len - 1; i++) 120 for (i = 0; i < bm->patlen - 1; i++)
122 bm->bad_shift[pattern[i]] = len - 1 - i; 121 bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
123 122
124 /* Compute the good shift array, used to match reocurrences 123 /* Compute the good shift array, used to match reocurrences
125 * of a subpattern */ 124 * of a subpattern */
@@ -150,8 +149,8 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
150 bm = ts_config_priv(conf); 149 bm = ts_config_priv(conf);
151 bm->patlen = len; 150 bm->patlen = len;
152 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len; 151 bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
153 compute_prefix_tbl(bm, pattern, len);
154 memcpy(bm->pattern, pattern, len); 152 memcpy(bm->pattern, pattern, len);
153 compute_prefix_tbl(bm);
155 154
156 return conf; 155 return conf;
157} 156}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e70d6c6d6fee..f1f5ec783781 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -442,11 +442,12 @@ int swap_type_of(dev_t device)
442 442
443 if (!(swap_info[i].flags & SWP_WRITEOK)) 443 if (!(swap_info[i].flags & SWP_WRITEOK))
444 continue; 444 continue;
445
445 if (!device) { 446 if (!device) {
446 spin_unlock(&swap_lock); 447 spin_unlock(&swap_lock);
447 return i; 448 return i;
448 } 449 }
449 inode = swap_info->swap_file->f_dentry->d_inode; 450 inode = swap_info[i].swap_file->f_dentry->d_inode;
450 if (S_ISBLK(inode->i_mode) && 451 if (S_ISBLK(inode->i_mode) &&
451 device == MKDEV(imajor(inode), iminor(inode))) { 452 device == MKDEV(imajor(inode), iminor(inode))) {
452 spin_unlock(&swap_lock); 453 spin_unlock(&swap_lock);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 6ccd32b30809..864fbbc7b24d 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -40,11 +40,15 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
40 else { 40 else {
41#ifdef CONFIG_BRIDGE_NETFILTER 41#ifdef CONFIG_BRIDGE_NETFILTER
42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ 42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
43 nf_bridge_maybe_copy_header(skb); 43 if (nf_bridge_maybe_copy_header(skb))
44 kfree_skb(skb);
45 else
44#endif 46#endif
45 skb_push(skb, ETH_HLEN); 47 {
48 skb_push(skb, ETH_HLEN);
46 49
47 dev_queue_xmit(skb); 50 dev_queue_xmit(skb);
51 }
48 } 52 }
49 53
50 return 0; 54 return 0;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index c39bff706cfc..090bc39e8199 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/ccid3.c 2 * net/dccp/ccids/ccid3.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005-6 Ian McDonald <imcdnzl@gmail.com> 5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * 6 *
7 * An implementation of the DCCP protocol 7 * An implementation of the DCCP protocol
8 * 8 *
@@ -342,6 +342,8 @@ static int ccid3_hc_tx_send_packet(struct sock *sk,
342 new_packet->dccphtx_ccval = 342 new_packet->dccphtx_ccval =
343 DCCP_SKB_CB(skb)->dccpd_ccval = 343 DCCP_SKB_CB(skb)->dccpd_ccval =
344 hctx->ccid3hctx_last_win_count; 344 hctx->ccid3hctx_last_win_count;
345 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
346 hctx->ccid3hctx_t_ipi);
345 } 347 }
346out: 348out:
347 return rc; 349 return rc;
@@ -413,7 +415,8 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
413 case TFRC_SSTATE_NO_FBACK: 415 case TFRC_SSTATE_NO_FBACK:
414 case TFRC_SSTATE_FBACK: 416 case TFRC_SSTATE_FBACK:
415 if (len > 0) { 417 if (len > 0) {
416 hctx->ccid3hctx_t_nom = now; 418 timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
419 hctx->ccid3hctx_t_ipi);
417 ccid3_calc_new_t_ipi(hctx); 420 ccid3_calc_new_t_ipi(hctx);
418 ccid3_calc_new_delta(hctx); 421 ccid3_calc_new_delta(hctx);
419 timeval_add_usecs(&hctx->ccid3hctx_t_nom, 422 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
@@ -757,8 +760,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk)
757 } 760 }
758 761
759 hcrx->ccid3hcrx_tstamp_last_feedback = now; 762 hcrx->ccid3hcrx_tstamp_last_feedback = now;
760 hcrx->ccid3hcrx_last_counter = packet->dccphrx_ccval; 763 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
761 hcrx->ccid3hcrx_seqno_last_counter = packet->dccphrx_seqno;
762 hcrx->ccid3hcrx_bytes_recv = 0; 764 hcrx->ccid3hcrx_bytes_recv = 0;
763 765
764 /* Convert to multiples of 10us */ 766 /* Convert to multiples of 10us */
@@ -782,7 +784,7 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
782 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 784 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
783 return 0; 785 return 0;
784 786
785 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_last_counter; 787 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter;
786 788
787 if (dccp_packet_without_ack(skb)) 789 if (dccp_packet_without_ack(skb))
788 return 0; 790 return 0;
@@ -854,6 +856,11 @@ static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
854 interval = 1; 856 interval = 1;
855 } 857 }
856found: 858found:
859 if (!tail) {
860 LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n",
861 __FUNCTION__);
862 return ~0;
863 }
857 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval; 864 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
858 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n", 865 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
859 dccp_role(sk), sk, rtt); 866 dccp_role(sk), sk, rtt);
@@ -864,9 +871,20 @@ found:
864 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback); 871 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
865 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta); 872 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
866 873
874 if (x_recv == 0)
875 x_recv = hcrx->ccid3hcrx_x_recv;
876
867 tmp1 = (u64)x_recv * (u64)rtt; 877 tmp1 = (u64)x_recv * (u64)rtt;
868 do_div(tmp1,10000000); 878 do_div(tmp1,10000000);
869 tmp2 = (u32)tmp1; 879 tmp2 = (u32)tmp1;
880
881 if (!tmp2) {
882 LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 "
883 "%s: x_recv = %u, rtt =%u\n",
884 __FUNCTION__, x_recv, rtt);
885 return ~0;
886 }
887
870 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2; 888 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
871 /* do not alter order above or you will get overflow on 32 bit */ 889 /* do not alter order above or you will get overflow on 32 bit */
872 p = tfrc_calc_x_reverse_lookup(fval); 890 p = tfrc_calc_x_reverse_lookup(fval);
@@ -882,31 +900,101 @@ found:
882static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss) 900static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
883{ 901{
884 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 902 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
903 struct dccp_li_hist_entry *next, *head;
904 u64 seq_temp;
885 905
886 if (seq_loss != DCCP_MAX_SEQNO + 1 && 906 if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
887 list_empty(&hcrx->ccid3hcrx_li_hist)) { 907 if (!dccp_li_hist_interval_new(ccid3_li_hist,
888 struct dccp_li_hist_entry *li_tail; 908 &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
909 return;
889 910
890 li_tail = dccp_li_hist_interval_new(ccid3_li_hist, 911 next = (struct dccp_li_hist_entry *)
891 &hcrx->ccid3hcrx_li_hist, 912 hcrx->ccid3hcrx_li_hist.next;
892 seq_loss, win_loss); 913 next->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
893 if (li_tail == NULL) 914 } else {
915 struct dccp_li_hist_entry *entry;
916 struct list_head *tail;
917
918 head = (struct dccp_li_hist_entry *)
919 hcrx->ccid3hcrx_li_hist.next;
920 /* FIXME win count check removed as was wrong */
921 /* should make this check with receive history */
922 /* and compare there as per section 10.2 of RFC4342 */
923
924 /* new loss event detected */
925 /* calculate last interval length */
926 seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
927 entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
928
929 if (entry == NULL) {
930 printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__);
931 dump_stack();
894 return; 932 return;
895 li_tail->dccplih_interval = ccid3_hc_rx_calc_first_li(sk); 933 }
896 } else 934
897 LIMIT_NETDEBUG(KERN_WARNING "%s: FIXME: find end of " 935 list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
898 "interval\n", __FUNCTION__); 936
937 tail = hcrx->ccid3hcrx_li_hist.prev;
938 list_del(tail);
939 kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
940
941 /* Create the newest interval */
942 entry->dccplih_seqno = seq_loss;
943 entry->dccplih_interval = seq_temp;
944 entry->dccplih_win_count = win_loss;
945 }
899} 946}
900 947
901static void ccid3_hc_rx_detect_loss(struct sock *sk) 948static int ccid3_hc_rx_detect_loss(struct sock *sk,
949 struct dccp_rx_hist_entry *packet)
902{ 950{
903 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); 951 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
904 u8 win_loss; 952 struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
905 const u64 seq_loss = dccp_rx_hist_detect_loss(&hcrx->ccid3hcrx_hist, 953 u64 seqno = packet->dccphrx_seqno;
906 &hcrx->ccid3hcrx_li_hist, 954 u64 tmp_seqno;
907 &win_loss); 955 int loss = 0;
956 u8 ccval;
957
958
959 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
960
961 if (!rx_hist ||
962 follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
963 hcrx->ccid3hcrx_seqno_nonloss = seqno;
964 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
965 goto detect_out;
966 }
967
908 968
909 ccid3_hc_rx_update_li(sk, seq_loss, win_loss); 969 while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
970 > TFRC_RECV_NUM_LATE_LOSS) {
971 loss = 1;
972 ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss,
973 hcrx->ccid3hcrx_ccval_nonloss);
974 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
975 dccp_inc_seqno(&tmp_seqno);
976 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
977 dccp_inc_seqno(&tmp_seqno);
978 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
979 tmp_seqno, &ccval)) {
980 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
981 hcrx->ccid3hcrx_ccval_nonloss = ccval;
982 dccp_inc_seqno(&tmp_seqno);
983 }
984 }
985
986 /* FIXME - this code could be simplified with above while */
987 /* but works at moment */
988 if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
989 hcrx->ccid3hcrx_seqno_nonloss = seqno;
990 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
991 }
992
993detect_out:
994 dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
995 &hcrx->ccid3hcrx_li_hist, packet,
996 hcrx->ccid3hcrx_seqno_nonloss);
997 return loss;
910} 998}
911 999
912static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 1000static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
@@ -916,8 +1004,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
916 struct dccp_rx_hist_entry *packet; 1004 struct dccp_rx_hist_entry *packet;
917 struct timeval now; 1005 struct timeval now;
918 u8 win_count; 1006 u8 win_count;
919 u32 p_prev, r_sample, t_elapsed; 1007 u32 p_prev, rtt_prev, r_sample, t_elapsed;
920 int ins; 1008 int loss;
921 1009
922 BUG_ON(hcrx == NULL || 1010 BUG_ON(hcrx == NULL ||
923 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA || 1011 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
@@ -932,7 +1020,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
932 case DCCP_PKT_DATAACK: 1020 case DCCP_PKT_DATAACK:
933 if (opt_recv->dccpor_timestamp_echo == 0) 1021 if (opt_recv->dccpor_timestamp_echo == 0)
934 break; 1022 break;
935 p_prev = hcrx->ccid3hcrx_rtt; 1023 rtt_prev = hcrx->ccid3hcrx_rtt;
936 dccp_timestamp(sk, &now); 1024 dccp_timestamp(sk, &now);
937 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10); 1025 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
938 r_sample = timeval_usecs(&now); 1026 r_sample = timeval_usecs(&now);
@@ -951,8 +1039,8 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
951 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 + 1039 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
952 r_sample / 10; 1040 r_sample / 10;
953 1041
954 if (p_prev != hcrx->ccid3hcrx_rtt) 1042 if (rtt_prev != hcrx->ccid3hcrx_rtt)
955 ccid3_pr_debug("%s, New RTT=%luus, elapsed time=%u\n", 1043 ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
956 dccp_role(sk), hcrx->ccid3hcrx_rtt, 1044 dccp_role(sk), hcrx->ccid3hcrx_rtt,
957 opt_recv->dccpor_elapsed_time); 1045 opt_recv->dccpor_elapsed_time);
958 break; 1046 break;
@@ -973,8 +1061,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
973 1061
974 win_count = packet->dccphrx_ccval; 1062 win_count = packet->dccphrx_ccval;
975 1063
976 ins = dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist, 1064 loss = ccid3_hc_rx_detect_loss(sk, packet);
977 &hcrx->ccid3hcrx_li_hist, packet);
978 1065
979 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK) 1066 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
980 return; 1067 return;
@@ -991,7 +1078,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
991 case TFRC_RSTATE_DATA: 1078 case TFRC_RSTATE_DATA:
992 hcrx->ccid3hcrx_bytes_recv += skb->len - 1079 hcrx->ccid3hcrx_bytes_recv += skb->len -
993 dccp_hdr(skb)->dccph_doff * 4; 1080 dccp_hdr(skb)->dccph_doff * 4;
994 if (ins != 0) 1081 if (loss)
995 break; 1082 break;
996 1083
997 dccp_timestamp(sk, &now); 1084 dccp_timestamp(sk, &now);
@@ -1012,7 +1099,6 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1012 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n", 1099 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
1013 dccp_role(sk), sk, dccp_state_name(sk->sk_state)); 1100 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1014 1101
1015 ccid3_hc_rx_detect_loss(sk);
1016 p_prev = hcrx->ccid3hcrx_p; 1102 p_prev = hcrx->ccid3hcrx_p;
1017 1103
1018 /* Calculate loss event rate */ 1104 /* Calculate loss event rate */
@@ -1022,6 +1108,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1022 /* Scaling up by 1000000 as fixed decimal */ 1108 /* Scaling up by 1000000 as fixed decimal */
1023 if (i_mean != 0) 1109 if (i_mean != 0)
1024 hcrx->ccid3hcrx_p = 1000000 / i_mean; 1110 hcrx->ccid3hcrx_p = 1000000 / i_mean;
1111 } else {
1112 printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__);
1113 dump_stack();
1025 } 1114 }
1026 1115
1027 if (hcrx->ccid3hcrx_p > p_prev) { 1116 if (hcrx->ccid3hcrx_p > p_prev) {
@@ -1230,7 +1319,7 @@ static __exit void ccid3_module_exit(void)
1230} 1319}
1231module_exit(ccid3_module_exit); 1320module_exit(ccid3_module_exit);
1232 1321
1233MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " 1322MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
1234 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 1323 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
1235MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID"); 1324MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
1236MODULE_LICENSE("GPL"); 1325MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 5ade4f668b22..0a2cb7536d26 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/ccids/ccid3.h 2 * net/dccp/ccids/ccid3.h
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -120,9 +120,10 @@ struct ccid3_hc_rx_sock {
120#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv 120#define ccid3hcrx_x_recv ccid3hcrx_tfrc.tfrcrx_x_recv
121#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt 121#define ccid3hcrx_rtt ccid3hcrx_tfrc.tfrcrx_rtt
122#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p 122#define ccid3hcrx_p ccid3hcrx_tfrc.tfrcrx_p
123 u64 ccid3hcrx_seqno_last_counter:48, 123 u64 ccid3hcrx_seqno_nonloss:48,
124 ccid3hcrx_ccval_nonloss:4,
124 ccid3hcrx_state:8, 125 ccid3hcrx_state:8,
125 ccid3hcrx_last_counter:4; 126 ccid3hcrx_ccval_last_counter:4;
126 u32 ccid3hcrx_bytes_recv; 127 u32 ccid3hcrx_bytes_recv;
127 struct timeval ccid3hcrx_tstamp_last_feedback; 128 struct timeval ccid3hcrx_tstamp_last_feedback;
128 struct timeval ccid3hcrx_tstamp_last_ack; 129 struct timeval ccid3hcrx_tstamp_last_ack;
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 5d7b7d864385..906c81ab9d4f 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/lib/loss_interval.c 2 * net/dccp/ccids/lib/loss_interval.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <net/sock.h>
15 16
16#include "loss_interval.h" 17#include "loss_interval.h"
17 18
@@ -90,13 +91,13 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
90 u32 w_tot = 0; 91 u32 w_tot = 0;
91 92
92 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) { 93 list_for_each_entry_safe(li_entry, li_next, list, dccplih_node) {
93 if (i < DCCP_LI_HIST_IVAL_F_LENGTH) { 94 if (li_entry->dccplih_interval != ~0) {
94 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i]; 95 i_tot0 += li_entry->dccplih_interval * dccp_li_hist_w[i];
95 w_tot += dccp_li_hist_w[i]; 96 w_tot += dccp_li_hist_w[i];
97 if (i != 0)
98 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
96 } 99 }
97 100
98 if (i != 0)
99 i_tot1 += li_entry->dccplih_interval * dccp_li_hist_w[i - 1];
100 101
101 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH) 102 if (++i > DCCP_LI_HIST_IVAL_F_LENGTH)
102 break; 103 break;
@@ -107,37 +108,36 @@ u32 dccp_li_hist_calc_i_mean(struct list_head *list)
107 108
108 i_tot = max(i_tot0, i_tot1); 109 i_tot = max(i_tot0, i_tot1);
109 110
110 /* FIXME: Why do we do this? -Ian McDonald */ 111 if (!w_tot) {
111 if (i_tot * 4 < w_tot) 112 LIMIT_NETDEBUG(KERN_WARNING "%s: w_tot = 0\n", __FUNCTION__);
112 i_tot = w_tot * 4; 113 return 1;
114 }
113 115
114 return i_tot * 4 / w_tot; 116 return i_tot / w_tot;
115} 117}
116 118
117EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean); 119EXPORT_SYMBOL_GPL(dccp_li_hist_calc_i_mean);
118 120
119struct dccp_li_hist_entry *dccp_li_hist_interval_new(struct dccp_li_hist *hist, 121int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
120 struct list_head *list, 122 struct list_head *list, const u64 seq_loss, const u8 win_loss)
121 const u64 seq_loss,
122 const u8 win_loss)
123{ 123{
124 struct dccp_li_hist_entry *tail = NULL, *entry; 124 struct dccp_li_hist_entry *entry;
125 int i; 125 int i;
126 126
127 for (i = 0; i <= DCCP_LI_HIST_IVAL_F_LENGTH; ++i) { 127 for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
128 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC); 128 entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
129 if (entry == NULL) { 129 if (entry == NULL) {
130 dccp_li_hist_purge(hist, list); 130 dccp_li_hist_purge(hist, list);
131 return NULL; 131 dump_stack();
132 return 0;
132 } 133 }
133 if (tail == NULL) 134 entry->dccplih_interval = ~0;
134 tail = entry;
135 list_add(&entry->dccplih_node, list); 135 list_add(&entry->dccplih_node, list);
136 } 136 }
137 137
138 entry->dccplih_seqno = seq_loss; 138 entry->dccplih_seqno = seq_loss;
139 entry->dccplih_win_count = win_loss; 139 entry->dccplih_win_count = win_loss;
140 return tail; 140 return 1;
141} 141}
142 142
143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new); 143EXPORT_SYMBOL_GPL(dccp_li_hist_interval_new);
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index 43bf78269d1d..0ae85f0340b2 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -4,7 +4,7 @@
4 * net/dccp/ccids/lib/loss_interval.h 4 * net/dccp/ccids/lib/loss_interval.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
@@ -52,9 +52,6 @@ extern void dccp_li_hist_purge(struct dccp_li_hist *hist,
52 52
53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); 53extern u32 dccp_li_hist_calc_i_mean(struct list_head *list);
54 54
55extern struct dccp_li_hist_entry * 55extern int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
56 dccp_li_hist_interval_new(struct dccp_li_hist *hist, 56 struct list_head *list, const u64 seq_loss, const u8 win_loss);
57 struct list_head *list,
58 const u64 seq_loss,
59 const u8 win_loss);
60#endif /* _DCCP_LI_HIST_ */ 57#endif /* _DCCP_LI_HIST_ */
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index ad98d6a322eb..b876c9c81c65 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/packet_history.h 2 * net/dccp/packet_history.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -112,64 +112,27 @@ struct dccp_rx_hist_entry *
112 112
113EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet); 113EXPORT_SYMBOL_GPL(dccp_rx_hist_find_data_packet);
114 114
115int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, 115void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
116 struct list_head *rx_list, 116 struct list_head *rx_list,
117 struct list_head *li_list, 117 struct list_head *li_list,
118 struct dccp_rx_hist_entry *packet) 118 struct dccp_rx_hist_entry *packet,
119 u64 nonloss_seqno)
119{ 120{
120 struct dccp_rx_hist_entry *entry, *next, *iter; 121 struct dccp_rx_hist_entry *entry, *next;
121 u8 num_later = 0; 122 u8 num_later = 0;
122 123
123 iter = dccp_rx_hist_head(rx_list); 124 list_add(&packet->dccphrx_node, rx_list);
124 if (iter == NULL)
125 dccp_rx_hist_add_entry(rx_list, packet);
126 else {
127 const u64 seqno = packet->dccphrx_seqno;
128
129 if (after48(seqno, iter->dccphrx_seqno))
130 dccp_rx_hist_add_entry(rx_list, packet);
131 else {
132 if (dccp_rx_hist_entry_data_packet(iter))
133 num_later = 1;
134
135 list_for_each_entry_continue(iter, rx_list,
136 dccphrx_node) {
137 if (after48(seqno, iter->dccphrx_seqno)) {
138 dccp_rx_hist_add_entry(&iter->dccphrx_node,
139 packet);
140 goto trim_history;
141 }
142
143 if (dccp_rx_hist_entry_data_packet(iter))
144 num_later++;
145 125
146 if (num_later == TFRC_RECV_NUM_LATE_LOSS) {
147 dccp_rx_hist_entry_delete(hist, packet);
148 return 1;
149 }
150 }
151
152 if (num_later < TFRC_RECV_NUM_LATE_LOSS)
153 dccp_rx_hist_add_entry(rx_list, packet);
154 /*
155 * FIXME: else what? should we destroy the packet
156 * like above?
157 */
158 }
159 }
160
161trim_history:
162 /*
163 * Trim history (remove all packets after the NUM_LATE_LOSS + 1
164 * data packets)
165 */
166 num_later = TFRC_RECV_NUM_LATE_LOSS + 1; 126 num_later = TFRC_RECV_NUM_LATE_LOSS + 1;
167 127
168 if (!list_empty(li_list)) { 128 if (!list_empty(li_list)) {
169 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) { 129 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
170 if (num_later == 0) { 130 if (num_later == 0) {
171 list_del_init(&entry->dccphrx_node); 131 if (after48(nonloss_seqno,
172 dccp_rx_hist_entry_delete(hist, entry); 132 entry->dccphrx_seqno)) {
133 list_del_init(&entry->dccphrx_node);
134 dccp_rx_hist_entry_delete(hist, entry);
135 }
173 } else if (dccp_rx_hist_entry_data_packet(entry)) 136 } else if (dccp_rx_hist_entry_data_packet(entry))
174 --num_later; 137 --num_later;
175 } 138 }
@@ -217,94 +180,10 @@ trim_history:
217 --num_later; 180 --num_later;
218 } 181 }
219 } 182 }
220
221 return 0;
222} 183}
223 184
224EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet); 185EXPORT_SYMBOL_GPL(dccp_rx_hist_add_packet);
225 186
226u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
227 struct list_head *li_list, u8 *win_loss)
228{
229 struct dccp_rx_hist_entry *entry, *next, *packet;
230 struct dccp_rx_hist_entry *a_loss = NULL;
231 struct dccp_rx_hist_entry *b_loss = NULL;
232 u64 seq_loss = DCCP_MAX_SEQNO + 1;
233 u8 num_later = TFRC_RECV_NUM_LATE_LOSS;
234
235 list_for_each_entry_safe(entry, next, rx_list, dccphrx_node) {
236 if (num_later == 0) {
237 b_loss = entry;
238 break;
239 } else if (dccp_rx_hist_entry_data_packet(entry))
240 --num_later;
241 }
242
243 if (b_loss == NULL)
244 goto out;
245
246 num_later = 1;
247 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
248 if (num_later == 0) {
249 a_loss = entry;
250 break;
251 } else if (dccp_rx_hist_entry_data_packet(entry))
252 --num_later;
253 }
254
255 if (a_loss == NULL) {
256 if (list_empty(li_list)) {
257 /* no loss event have occured yet */
258 LIMIT_NETDEBUG("%s: TODO: find a lost data packet by "
259 "comparing to initial seqno\n",
260 __FUNCTION__);
261 goto out;
262 } else {
263 LIMIT_NETDEBUG("%s: Less than 4 data pkts in history!",
264 __FUNCTION__);
265 goto out;
266 }
267 }
268
269 /* Locate a lost data packet */
270 entry = packet = b_loss;
271 list_for_each_entry_safe_continue(entry, next, rx_list, dccphrx_node) {
272 u64 delta = dccp_delta_seqno(entry->dccphrx_seqno,
273 packet->dccphrx_seqno);
274
275 if (delta != 0) {
276 if (dccp_rx_hist_entry_data_packet(packet))
277 --delta;
278 /*
279 * FIXME: check this, probably this % usage is because
280 * in earlier drafts the ndp count was just 8 bits
281 * long, but now it cam be up to 24 bits long.
282 */
283#if 0
284 if (delta % DCCP_NDP_LIMIT !=
285 (packet->dccphrx_ndp -
286 entry->dccphrx_ndp) % DCCP_NDP_LIMIT)
287#endif
288 if (delta != packet->dccphrx_ndp - entry->dccphrx_ndp) {
289 seq_loss = entry->dccphrx_seqno;
290 dccp_inc_seqno(&seq_loss);
291 }
292 }
293 packet = entry;
294 if (packet == a_loss)
295 break;
296 }
297out:
298 if (seq_loss != DCCP_MAX_SEQNO + 1)
299 *win_loss = a_loss->dccphrx_ccval;
300 else
301 *win_loss = 0; /* Paranoia */
302
303 return seq_loss;
304}
305
306EXPORT_SYMBOL_GPL(dccp_rx_hist_detect_loss);
307
308struct dccp_tx_hist *dccp_tx_hist_new(const char *name) 187struct dccp_tx_hist *dccp_tx_hist_new(const char *name)
309{ 188{
310 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC); 189 struct dccp_tx_hist *hist = kmalloc(sizeof(*hist), GFP_ATOMIC);
@@ -365,6 +244,25 @@ struct dccp_tx_hist_entry *
365 244
366EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry); 245EXPORT_SYMBOL_GPL(dccp_tx_hist_find_entry);
367 246
247int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
248 u8 *ccval)
249{
250 struct dccp_rx_hist_entry *packet = NULL, *entry;
251
252 list_for_each_entry(entry, list, dccphrx_node)
253 if (entry->dccphrx_seqno == seq) {
254 packet = entry;
255 break;
256 }
257
258 if (packet)
259 *ccval = packet->dccphrx_ccval;
260
261 return packet != NULL;
262}
263
264EXPORT_SYMBOL_GPL(dccp_rx_hist_find_entry);
265
368void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist, 266void dccp_tx_hist_purge_older(struct dccp_tx_hist *hist,
369 struct list_head *list, 267 struct list_head *list,
370 struct dccp_tx_hist_entry *packet) 268 struct dccp_tx_hist_entry *packet)
@@ -391,7 +289,7 @@ void dccp_tx_hist_purge(struct dccp_tx_hist *hist, struct list_head *list)
391 289
392EXPORT_SYMBOL_GPL(dccp_tx_hist_purge); 290EXPORT_SYMBOL_GPL(dccp_tx_hist_purge);
393 291
394MODULE_AUTHOR("Ian McDonald <iam4@cs.waikato.ac.nz>, " 292MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
395 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>"); 293 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
396MODULE_DESCRIPTION("DCCP TFRC library"); 294MODULE_DESCRIPTION("DCCP TFRC library");
397MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 673c209e4e85..067cf1c85a37 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -1,13 +1,13 @@
1/* 1/*
2 * net/dccp/packet_history.h 2 * net/dccp/packet_history.h
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * 7 *
8 * This code has been developed by the University of Waikato WAND 8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see http://www.wand.net.nz/ 9 * research group. For further information please see http://www.wand.net.nz/
10 * or e-mail Ian McDonald - iam4@cs.waikato.ac.nz 10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 * 11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its 12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors: 13 * authors:
@@ -106,6 +106,8 @@ static inline void dccp_tx_hist_entry_delete(struct dccp_tx_hist *hist,
106extern struct dccp_tx_hist_entry * 106extern struct dccp_tx_hist_entry *
107 dccp_tx_hist_find_entry(const struct list_head *list, 107 dccp_tx_hist_find_entry(const struct list_head *list,
108 const u64 seq); 108 const u64 seq);
109extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
110 u8 *ccval);
109 111
110static inline void dccp_tx_hist_add_entry(struct list_head *list, 112static inline void dccp_tx_hist_add_entry(struct list_head *list,
111 struct dccp_tx_hist_entry *entry) 113 struct dccp_tx_hist_entry *entry)
@@ -164,12 +166,6 @@ static inline void dccp_rx_hist_entry_delete(struct dccp_rx_hist *hist,
164extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist, 166extern void dccp_rx_hist_purge(struct dccp_rx_hist *hist,
165 struct list_head *list); 167 struct list_head *list);
166 168
167static inline void dccp_rx_hist_add_entry(struct list_head *list,
168 struct dccp_rx_hist_entry *entry)
169{
170 list_add(&entry->dccphrx_node, list);
171}
172
173static inline struct dccp_rx_hist_entry * 169static inline struct dccp_rx_hist_entry *
174 dccp_rx_hist_head(struct list_head *list) 170 dccp_rx_hist_head(struct list_head *list)
175{ 171{
@@ -188,10 +184,11 @@ static inline int
188 entry->dccphrx_type == DCCP_PKT_DATAACK; 184 entry->dccphrx_type == DCCP_PKT_DATAACK;
189} 185}
190 186
191extern int dccp_rx_hist_add_packet(struct dccp_rx_hist *hist, 187extern void dccp_rx_hist_add_packet(struct dccp_rx_hist *hist,
192 struct list_head *rx_list, 188 struct list_head *rx_list,
193 struct list_head *li_list, 189 struct list_head *li_list,
194 struct dccp_rx_hist_entry *packet); 190 struct dccp_rx_hist_entry *packet,
191 u64 nonloss_seqno);
195 192
196extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list, 193extern u64 dccp_rx_hist_detect_loss(struct list_head *rx_list,
197 struct list_head *li_list, u8 *win_loss); 194 struct list_head *li_list, u8 *win_loss);
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 130c4c40cfe3..45f30f59ea2a 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -4,7 +4,7 @@
4 * net/dccp/ccids/lib/tfrc.h 4 * net/dccp/ccids/lib/tfrc.h
5 * 5 *
6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 6 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 8 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 9 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
10 * 10 *
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 4fd2ebebf5a0..44076e0c6591 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -2,7 +2,7 @@
2 * net/dccp/ccids/lib/tfrc_equation.c 2 * net/dccp/ccids/lib/tfrc_equation.c
3 * 3 *
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. 4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 5 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 7 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
8 * 8 *
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index d00a2f4ee5dd..a5c5475724c0 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * An implementation of the DCCP protocol 6 * An implementation of the DCCP protocol
7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 7 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 8 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as 11 * under the terms of the GNU General Public License version 2 as
@@ -81,6 +81,14 @@ static inline u64 max48(const u64 seq1, const u64 seq2)
81 return after48(seq1, seq2) ? seq1 : seq2; 81 return after48(seq1, seq2) ? seq1 : seq2;
82} 82}
83 83
84/* is seq1 next seqno after seq2 */
85static inline int follows48(const u64 seq1, const u64 seq2)
86{
87 int diff = (seq1 & 0xFFFF) - (seq2 & 0xFFFF);
88
89 return diff==1;
90}
91
84enum { 92enum {
85 DCCP_MIB_NUM = 0, 93 DCCP_MIB_NUM = 0,
86 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */ 94 DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
diff --git a/net/dccp/options.c b/net/dccp/options.c
index daf72bb671f0..07a34696ac97 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -4,7 +4,7 @@
4 * An implementation of the DCCP protocol 4 * An implementation of the DCCP protocol
5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> 5 * Copyright (c) 2005 Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
7 * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz> 7 * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index df4854cf598b..8d1d7a6e72a5 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -236,7 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
236 struct arpt_entry *e, *back; 236 struct arpt_entry *e, *back;
237 const char *indev, *outdev; 237 const char *indev, *outdev;
238 void *table_base; 238 void *table_base;
239 struct xt_table_info *private = table->private; 239 struct xt_table_info *private;
240 240
241 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 241 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
242 if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) + 242 if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) +
@@ -248,6 +248,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
248 outdev = out ? out->name : nulldevname; 248 outdev = out ? out->name : nulldevname;
249 249
250 read_lock_bh(&table->lock); 250 read_lock_bh(&table->lock);
251 private = table->private;
251 table_base = (void *)private->entries[smp_processor_id()]; 252 table_base = (void *)private->entries[smp_processor_id()];
252 e = get_entry(table_base, private->hook_entry[hook]); 253 e = get_entry(table_base, private->hook_entry[hook]);
253 back = get_entry(table_base, private->underflow[hook]); 254 back = get_entry(table_base, private->underflow[hook]);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 507adefbc17c..b4f3ffe1b3b4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -201,6 +201,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
201 * See RFC1323 for an explanation of the limit to 14 201 * See RFC1323 for an explanation of the limit to 14
202 */ 202 */
203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 203 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
204 space = min_t(u32, space, *window_clamp);
204 while (space > 65535 && (*rcv_wscale) < 14) { 205 while (space > 65535 && (*rcv_wscale) < 14) {
205 space >>= 1; 206 space >>= 1;
206 (*rcv_wscale)++; 207 (*rcv_wscale)++;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b843a650be71..802a1a6b1037 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -944,7 +944,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
944 * comment in that function for the gory details. -acme 944 * comment in that function for the gory details. -acme
945 */ 945 */
946 946
947 sk->sk_gso_type = SKB_GSO_TCPV6; 947 newsk->sk_gso_type = SKB_GSO_TCPV6;
948 __ip6_dst_store(newsk, dst, NULL); 948 __ip6_dst_store(newsk, dst, NULL);
949 949
950 newtcp6sk = (struct tcp6_sock *)newsk; 950 newtcp6sk = (struct tcp6_sock *)newsk;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 4f11f5858209..17b509282cf2 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -806,38 +806,26 @@ no_mem:
806 806
807/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ 807/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
808struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, 808struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
809 const struct sctp_chunk *chunk, 809 const struct msghdr *msg,
810 const struct msghdr *msg) 810 size_t paylen)
811{ 811{
812 struct sctp_chunk *retval; 812 struct sctp_chunk *retval;
813 void *payload = NULL, *payoff; 813 void *payload = NULL;
814 size_t paylen = 0; 814 int err;
815 struct iovec *iov = NULL;
816 int iovlen = 0;
817
818 if (msg) {
819 iov = msg->msg_iov;
820 iovlen = msg->msg_iovlen;
821 paylen = get_user_iov_size(iov, iovlen);
822 }
823 815
824 retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen); 816 retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen);
825 if (!retval) 817 if (!retval)
826 goto err_chunk; 818 goto err_chunk;
827 819
828 if (paylen) { 820 if (paylen) {
829 /* Put the msg_iov together into payload. */ 821 /* Put the msg_iov together into payload. */
830 payload = kmalloc(paylen, GFP_ATOMIC); 822 payload = kmalloc(paylen, GFP_KERNEL);
831 if (!payload) 823 if (!payload)
832 goto err_payload; 824 goto err_payload;
833 payoff = payload;
834 825
835 for (; iovlen > 0; --iovlen) { 826 err = memcpy_fromiovec(payload, msg->msg_iov, paylen);
836 if (copy_from_user(payoff, iov->iov_base,iov->iov_len)) 827 if (err < 0)
837 goto err_copy; 828 goto err_copy;
838 payoff += iov->iov_len;
839 iov++;
840 }
841 } 829 }
842 830
843 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); 831 sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index ead3f1b0ea3d..5b5ae7958322 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4031,18 +4031,12 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
4031 * from its upper layer, but retransmits data to the far end 4031 * from its upper layer, but retransmits data to the far end
4032 * if necessary to fill gaps. 4032 * if necessary to fill gaps.
4033 */ 4033 */
4034 struct msghdr *msg = arg; 4034 struct sctp_chunk *abort = arg;
4035 struct sctp_chunk *abort;
4036 sctp_disposition_t retval; 4035 sctp_disposition_t retval;
4037 4036
4038 retval = SCTP_DISPOSITION_CONSUME; 4037 retval = SCTP_DISPOSITION_CONSUME;
4039 4038
4040 /* Generate ABORT chunk to send the peer. */ 4039 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4041 abort = sctp_make_abort_user(asoc, NULL, msg);
4042 if (!abort)
4043 retval = SCTP_DISPOSITION_NOMEM;
4044 else
4045 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4046 4040
4047 /* Even if we can't send the ABORT due to low memory delete the 4041 /* Even if we can't send the ABORT due to low memory delete the
4048 * TCB. This is a departure from our typical NOMEM handling. 4042 * TCB. This is a departure from our typical NOMEM handling.
@@ -4166,8 +4160,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4166 void *arg, 4160 void *arg,
4167 sctp_cmd_seq_t *commands) 4161 sctp_cmd_seq_t *commands)
4168{ 4162{
4169 struct msghdr *msg = arg; 4163 struct sctp_chunk *abort = arg;
4170 struct sctp_chunk *abort;
4171 sctp_disposition_t retval; 4164 sctp_disposition_t retval;
4172 4165
4173 /* Stop T1-init timer */ 4166 /* Stop T1-init timer */
@@ -4175,12 +4168,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
4175 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 4168 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
4176 retval = SCTP_DISPOSITION_CONSUME; 4169 retval = SCTP_DISPOSITION_CONSUME;
4177 4170
4178 /* Generate ABORT chunk to send the peer */ 4171 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4179 abort = sctp_make_abort_user(asoc, NULL, msg);
4180 if (!abort)
4181 retval = SCTP_DISPOSITION_NOMEM;
4182 else
4183 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4184 4172
4185 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 4173 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
4186 SCTP_STATE(SCTP_STATE_CLOSED)); 4174 SCTP_STATE(SCTP_STATE_CLOSED));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 54722e622e6d..fde3f55bfd4b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1520,8 +1520,16 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1520 goto out_unlock; 1520 goto out_unlock;
1521 } 1521 }
1522 if (sinfo_flags & SCTP_ABORT) { 1522 if (sinfo_flags & SCTP_ABORT) {
1523 struct sctp_chunk *chunk;
1524
1525 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1526 if (!chunk) {
1527 err = -ENOMEM;
1528 goto out_unlock;
1529 }
1530
1523 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); 1531 SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc);
1524 sctp_primitive_ABORT(asoc, msg); 1532 sctp_primitive_ABORT(asoc, chunk);
1525 err = 0; 1533 err = 0;
1526 goto out_unlock; 1534 goto out_unlock;
1527 } 1535 }
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 4a9aa9393b97..ef1cf5b476c8 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -718,8 +718,7 @@ gss_destroy(struct rpc_auth *auth)
718 auth, auth->au_flavor); 718 auth, auth->au_flavor);
719 719
720 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 720 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
721 rpc_unlink(gss_auth->path); 721 rpc_unlink(gss_auth->dentry);
722 dput(gss_auth->dentry);
723 gss_auth->dentry = NULL; 722 gss_auth->dentry = NULL;
724 gss_mech_put(gss_auth->mech); 723 gss_mech_put(gss_auth->mech);
725 724
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d6409e757219..3e19d321067a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -183,8 +183,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
183 183
184out_no_auth: 184out_no_auth:
185 if (!IS_ERR(clnt->cl_dentry)) { 185 if (!IS_ERR(clnt->cl_dentry)) {
186 rpc_rmdir(clnt->cl_pathname); 186 rpc_rmdir(clnt->cl_dentry);
187 dput(clnt->cl_dentry);
188 rpc_put_mount(); 187 rpc_put_mount();
189 } 188 }
190out_no_path: 189out_no_path:
@@ -251,10 +250,8 @@ rpc_clone_client(struct rpc_clnt *clnt)
251 new->cl_autobind = 0; 250 new->cl_autobind = 0;
252 new->cl_oneshot = 0; 251 new->cl_oneshot = 0;
253 new->cl_dead = 0; 252 new->cl_dead = 0;
254 if (!IS_ERR(new->cl_dentry)) { 253 if (!IS_ERR(new->cl_dentry))
255 dget(new->cl_dentry); 254 dget(new->cl_dentry);
256 rpc_get_mount();
257 }
258 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 255 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
259 if (new->cl_auth) 256 if (new->cl_auth)
260 atomic_inc(&new->cl_auth->au_count); 257 atomic_inc(&new->cl_auth->au_count);
@@ -317,11 +314,15 @@ rpc_destroy_client(struct rpc_clnt *clnt)
317 clnt->cl_auth = NULL; 314 clnt->cl_auth = NULL;
318 } 315 }
319 if (clnt->cl_parent != clnt) { 316 if (clnt->cl_parent != clnt) {
317 if (!IS_ERR(clnt->cl_dentry))
318 dput(clnt->cl_dentry);
320 rpc_destroy_client(clnt->cl_parent); 319 rpc_destroy_client(clnt->cl_parent);
321 goto out_free; 320 goto out_free;
322 } 321 }
323 if (clnt->cl_pathname[0]) 322 if (!IS_ERR(clnt->cl_dentry)) {
324 rpc_rmdir(clnt->cl_pathname); 323 rpc_rmdir(clnt->cl_dentry);
324 rpc_put_mount();
325 }
325 if (clnt->cl_xprt) { 326 if (clnt->cl_xprt) {
326 xprt_destroy(clnt->cl_xprt); 327 xprt_destroy(clnt->cl_xprt);
327 clnt->cl_xprt = NULL; 328 clnt->cl_xprt = NULL;
@@ -331,10 +332,6 @@ rpc_destroy_client(struct rpc_clnt *clnt)
331out_free: 332out_free:
332 rpc_free_iostats(clnt->cl_metrics); 333 rpc_free_iostats(clnt->cl_metrics);
333 clnt->cl_metrics = NULL; 334 clnt->cl_metrics = NULL;
334 if (!IS_ERR(clnt->cl_dentry)) {
335 dput(clnt->cl_dentry);
336 rpc_put_mount();
337 }
338 kfree(clnt); 335 kfree(clnt);
339 return 0; 336 return 0;
340} 337}
@@ -1184,6 +1181,17 @@ call_verify(struct rpc_task *task)
1184 u32 *p = iov->iov_base, n; 1181 u32 *p = iov->iov_base, n;
1185 int error = -EACCES; 1182 int error = -EACCES;
1186 1183
1184 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
1185 /* RFC-1014 says that the representation of XDR data must be a
1186 * multiple of four bytes
1187 * - if it isn't pointer subtraction in the NFS client may give
1188 * undefined results
1189 */
1190 printk(KERN_WARNING
1191 "call_verify: XDR representation not a multiple of"
1192 " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len);
1193 goto out_eio;
1194 }
1187 if ((len -= 3) < 0) 1195 if ((len -= 3) < 0)
1188 goto out_overflow; 1196 goto out_overflow;
1189 p += 1; /* skip XID */ 1197 p += 1; /* skip XID */
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index a3bd2db2e024..0b1a1ac8a4bc 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -539,6 +539,7 @@ repeat:
539 rpc_close_pipes(dentry->d_inode); 539 rpc_close_pipes(dentry->d_inode);
540 simple_unlink(dir, dentry); 540 simple_unlink(dir, dentry);
541 } 541 }
542 inode_dir_notify(dir, DN_DELETE);
542 dput(dentry); 543 dput(dentry);
543 } while (n); 544 } while (n);
544 goto repeat; 545 goto repeat;
@@ -610,8 +611,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
610 int error; 611 int error;
611 612
612 shrink_dcache_parent(dentry); 613 shrink_dcache_parent(dentry);
613 if (dentry->d_inode) 614 if (d_unhashed(dentry))
614 rpc_close_pipes(dentry->d_inode); 615 return 0;
615 if ((error = simple_rmdir(dir, dentry)) != 0) 616 if ((error = simple_rmdir(dir, dentry)) != 0)
616 return error; 617 return error;
617 if (!error) { 618 if (!error) {
@@ -684,28 +685,20 @@ err_dput:
684} 685}
685 686
686int 687int
687rpc_rmdir(char *path) 688rpc_rmdir(struct dentry *dentry)
688{ 689{
689 struct nameidata nd; 690 struct dentry *parent;
690 struct dentry *dentry;
691 struct inode *dir; 691 struct inode *dir;
692 int error; 692 int error;
693 693
694 if ((error = rpc_lookup_parent(path, &nd)) != 0) 694 parent = dget_parent(dentry);
695 return error; 695 dir = parent->d_inode;
696 dir = nd.dentry->d_inode;
697 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 696 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
698 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len);
699 if (IS_ERR(dentry)) {
700 error = PTR_ERR(dentry);
701 goto out_release;
702 }
703 rpc_depopulate(dentry); 697 rpc_depopulate(dentry);
704 error = __rpc_rmdir(dir, dentry); 698 error = __rpc_rmdir(dir, dentry);
705 dput(dentry); 699 dput(dentry);
706out_release:
707 mutex_unlock(&dir->i_mutex); 700 mutex_unlock(&dir->i_mutex);
708 rpc_release_path(&nd); 701 dput(parent);
709 return error; 702 return error;
710} 703}
711 704
@@ -746,32 +739,26 @@ err_dput:
746} 739}
747 740
748int 741int
749rpc_unlink(char *path) 742rpc_unlink(struct dentry *dentry)
750{ 743{
751 struct nameidata nd; 744 struct dentry *parent;
752 struct dentry *dentry;
753 struct inode *dir; 745 struct inode *dir;
754 int error; 746 int error = 0;
755 747
756 if ((error = rpc_lookup_parent(path, &nd)) != 0) 748 parent = dget_parent(dentry);
757 return error; 749 dir = parent->d_inode;
758 dir = nd.dentry->d_inode;
759 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 750 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
760 dentry = lookup_one_len(nd.last.name, nd.dentry, nd.last.len); 751 if (!d_unhashed(dentry)) {
761 if (IS_ERR(dentry)) { 752 d_drop(dentry);
762 error = PTR_ERR(dentry); 753 if (dentry->d_inode) {
763 goto out_release; 754 rpc_close_pipes(dentry->d_inode);
764 } 755 error = simple_unlink(dir, dentry);
765 d_drop(dentry); 756 }
766 if (dentry->d_inode) { 757 inode_dir_notify(dir, DN_DELETE);
767 rpc_close_pipes(dentry->d_inode);
768 error = simple_unlink(dir, dentry);
769 } 758 }
770 dput(dentry); 759 dput(dentry);
771 inode_dir_notify(dir, DN_DELETE);
772out_release:
773 mutex_unlock(&dir->i_mutex); 760 mutex_unlock(&dir->i_mutex);
774 rpc_release_path(&nd); 761 dput(parent);
775 return error; 762 return error;
776} 763}
777 764