aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/scsi.tmpl2
-rw-r--r--Documentation/input/input-programming.txt2
-rw-r--r--Documentation/scsi/ChangeLog.arcmsr41
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt2
-rw-r--r--MAINTAINERS8
-rw-r--r--arch/arm/mach-pxa/tosa.c43
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--drivers/base/firmware_class.c3
-rw-r--r--drivers/char/drm/i915_drv.h2
-rw-r--r--drivers/char/keyboard.c5
-rw-r--r--drivers/char/pcmcia/Kconfig9
-rw-r--r--drivers/char/pcmcia/Makefile2
-rw-r--r--drivers/char/pcmcia/ipwireless/Makefile10
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.c1787
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.h64
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c501
-rw-r--r--drivers/char/pcmcia/ipwireless/main.h70
-rw-r--r--drivers/char/pcmcia/ipwireless/network.c512
-rw-r--r--drivers/char/pcmcia/ipwireless/network.h55
-rw-r--r--drivers/char/pcmcia/ipwireless/setup_protocol.h108
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c688
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.h48
-rw-r--r--drivers/input/Kconfig12
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/apm-power.c131
-rw-r--r--drivers/input/evdev.c6
-rw-r--r--drivers/input/input-polldev.c18
-rw-r--r--drivers/input/input.c85
-rw-r--r--drivers/input/joystick/amijoy.c1
-rw-r--r--drivers/input/joystick/analog.c1
-rw-r--r--drivers/input/joystick/db9.c1
-rw-r--r--drivers/input/joystick/gamecon.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c17
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/Kconfig29
-rw-r--r--drivers/input/keyboard/Makefile3
-rw-r--r--drivers/input/keyboard/atkbd.c91
-rw-r--r--drivers/input/keyboard/lkkbd.c1
-rw-r--r--drivers/input/keyboard/pxa27x_keyboard.c274
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c572
-rw-r--r--drivers/input/keyboard/tosakbd.c415
-rw-r--r--drivers/input/misc/Kconfig14
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/apanel.c378
-rw-r--r--drivers/input/misc/ati_remote.c1
-rw-r--r--drivers/input/misc/atlas_btns.c39
-rw-r--r--drivers/input/misc/cobalt_btns.c73
-rw-r--r--drivers/input/misc/keyspan_remote.c119
-rw-r--r--drivers/input/mouse/inport.c1
-rw-r--r--drivers/input/mouse/logibm.c1
-rw-r--r--drivers/input/mouse/psmouse-base.c1
-rw-r--r--drivers/input/mouse/trackpoint.c1
-rw-r--r--drivers/input/mousedev.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h67
-rw-r--r--drivers/input/serio/i8042.c26
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/touchscreen/ads7846.c8
-rw-r--r--drivers/input/touchscreen/mk712.c1
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c1
-rw-r--r--drivers/media/video/usbvideo/konicawc.c2
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c2
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/enclosure.c484
-rw-r--r--drivers/scsi/Kconfig93
-rw-r--r--drivers/scsi/Makefile12
-rw-r--r--drivers/scsi/NCR53C9x.c3654
-rw-r--r--drivers/scsi/NCR53C9x.h668
-rw-r--r--drivers/scsi/aacraid/aachba.c81
-rw-r--r--drivers/scsi/aacraid/commctrl.c26
-rw-r--r--drivers/scsi/aacraid/linit.c28
-rw-r--r--drivers/scsi/advansys.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c87
-rw-r--r--drivers/scsi/arm/acornscsi.c14
-rw-r--r--drivers/scsi/arm/scsi.h87
-rw-r--r--drivers/scsi/blz1230.c353
-rw-r--r--drivers/scsi/blz2060.c306
-rw-r--r--drivers/scsi/cyberstorm.c377
-rw-r--r--drivers/scsi/cyberstormII.c314
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dec_esp.c687
-rw-r--r--drivers/scsi/fastlane.c421
-rw-r--r--drivers/scsi/iscsi_tcp.c57
-rw-r--r--drivers/scsi/libiscsi.c137
-rw-r--r--drivers/scsi/mac_esp.c751
-rw-r--r--drivers/scsi/mca_53c9x.c520
-rw-r--r--drivers/scsi/oktagon_esp.c606
-rw-r--r--drivers/scsi/oktagon_io.S194
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c87
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c404
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c75
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c238
-rw-r--r--drivers/scsi/sd.c34
-rw-r--r--drivers/scsi/ses.c689
-rw-r--r--drivers/scsi/sr.c49
-rw-r--r--drivers/scsi/sr.h1
-rw-r--r--drivers/scsi/sr_ioctl.c3
-rw-r--r--drivers/scsi/sun3x_esp.c546
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/u14-34f.c2
-rw-r--r--fs/Kconfig7
-rw-r--r--fs/jfs/file.c5
-rw-r--r--fs/jfs/ioctl.c31
-rw-r--r--fs/jfs/jfs_dinode.h2
-rw-r--r--fs/jfs/jfs_inode.h4
-rw-r--r--fs/jfs/namei.c5
-rw-r--r--fs/nfs/write.c20
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h11
-rw-r--r--fs/ocfs2/dlm/dlmapi.h7
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h24
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c195
-rw-r--r--fs/ocfs2/dlm/dlmfs.c15
-rw-r--r--fs/ocfs2/dlm/userdlm.c5
-rw-r--r--fs/ocfs2/dlm/userdlm.h3
-rw-r--r--fs/ocfs2/dlmglue.c29
-rw-r--r--fs/ocfs2/dlmglue.h1
-rw-r--r--fs/ocfs2/ocfs2.h1
-rw-r--r--fs/ocfs2/ocfs2_lockingver.h30
-rw-r--r--fs/ocfs2/super.c1
-rw-r--r--fs/partitions/check.c17
-rw-r--r--fs/sysfs/group.c7
-rw-r--r--include/asm-arm/arch-pxa/pxa27x_keyboard.h13
-rw-r--r--include/asm-arm/arch-pxa/pxa27x_keypad.h56
-rw-r--r--include/asm-arm/arch-pxa/tosa.h30
-rw-r--r--include/linux/enclosure.h129
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/input.h7
-rw-r--r--include/linux/ktime.h3
-rw-r--r--include/scsi/iscsi_proto.h4
-rw-r--r--include/scsi/libiscsi.h30
-rw-r--r--include/scsi/scsi.h14
-rw-r--r--include/scsi/scsi_host.h44
-rw-r--r--include/scsi/scsi_transport_iscsi.h43
148 files changed, 8803 insertions, 10581 deletions
diff --git a/Documentation/DocBook/scsi.tmpl b/Documentation/DocBook/scsi.tmpl
index f299ab182bbe..10a150ae2a7e 100644
--- a/Documentation/DocBook/scsi.tmpl
+++ b/Documentation/DocBook/scsi.tmpl
@@ -12,7 +12,7 @@
12 <surname>Bottomley</surname> 12 <surname>Bottomley</surname>
13 <affiliation> 13 <affiliation>
14 <address> 14 <address>
15 <email>James.Bottomley@steeleye.com</email> 15 <email>James.Bottomley@hansenpartnership.com</email>
16 </address> 16 </address>
17 </affiliation> 17 </affiliation>
18 </author> 18 </author>
diff --git a/Documentation/input/input-programming.txt b/Documentation/input/input-programming.txt
index 47fc86830cd7..81905e81585e 100644
--- a/Documentation/input/input-programming.txt
+++ b/Documentation/input/input-programming.txt
@@ -22,7 +22,7 @@ static struct input_dev *button_dev;
22 22
23static void button_interrupt(int irq, void *dummy, struct pt_regs *fp) 23static void button_interrupt(int irq, void *dummy, struct pt_regs *fp)
24{ 24{
25 input_report_key(button_dev, BTN_1, inb(BUTTON_PORT) & 1); 25 input_report_key(button_dev, BTN_0, inb(BUTTON_PORT) & 1);
26 input_sync(button_dev); 26 input_sync(button_dev);
27} 27}
28 28
diff --git a/Documentation/scsi/ChangeLog.arcmsr b/Documentation/scsi/ChangeLog.arcmsr
index cd8403a33ee6..de2bcacfa870 100644
--- a/Documentation/scsi/ChangeLog.arcmsr
+++ b/Documentation/scsi/ChangeLog.arcmsr
@@ -68,4 +68,45 @@
68** 2. modify the arcmsr_pci_slot_reset function 68** 2. modify the arcmsr_pci_slot_reset function
69** 3. modify the arcmsr_pci_ers_disconnect_forepart function 69** 3. modify the arcmsr_pci_ers_disconnect_forepart function
70** 4. modify the arcmsr_pci_ers_need_reset_forepart function 70** 4. modify the arcmsr_pci_ers_need_reset_forepart function
71** 1.20.00.15 09/27/2007 Erich Chen & Nick Cheng
72** 1. add arcmsr_enable_eoi_mode() on adapter Type B
73** 2. add readl(reg->iop2drv_doorbell_reg) in arcmsr_handle_hbb_isr()
74** in case of the doorbell interrupt clearance is cached
75** 1.20.00.15 10/01/2007 Erich Chen & Nick Cheng
76** 1. modify acb->devstate[i][j]
77** as ARECA_RAID_GOOD instead of
78** ARECA_RAID_GONE in arcmsr_alloc_ccb_pool
79** 1.20.00.15 11/06/2007 Erich Chen & Nick Cheng
80** 1. add conditional declaration for
81** arcmsr_pci_error_detected() and
82** arcmsr_pci_slot_reset
83** 1.20.00.15 11/23/2007 Erich Chen & Nick Cheng
84** 1.check if the sg list member number
85** exceeds arcmsr default limit in arcmsr_build_ccb()
86** 2.change the returned value type of arcmsr_build_ccb()
87** from "void" to "int"
88** 3.add the conditional check if arcmsr_build_ccb()
89** returns FAILED
90** 1.20.00.15 12/04/2007 Erich Chen & Nick Cheng
91** 1. modify arcmsr_drain_donequeue() to ignore unknown
92** command and let kernel process command timeout.
93** This could handle IO request violating max. segments
94** while Linux XFS over DM-CRYPT.
95** Thanks to Milan Broz's comments <mbroz@redhat.com>
96** 1.20.00.15 12/24/2007 Erich Chen & Nick Cheng
97** 1.fix the portability problems
98** 2.fix type B where we should _not_ iounmap() acb->pmu;
99** it's not ioremapped.
100** 3.add return -ENOMEM if ioremap() fails
101** 4.transfer IS_SG64_ADDR w/ cpu_to_le32()
102** in arcmsr_build_ccb
103** 5. modify acb->devstate[i][j] as ARECA_RAID_GONE instead of
104** ARECA_RAID_GOOD in arcmsr_alloc_ccb_pool()
105** 6.fix arcmsr_cdb->Context as (unsigned long)arcmsr_cdb
106** 7.add the checking state of
107** (outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT) == 0
108** in arcmsr_handle_hba_isr
109** 8.replace pci_alloc_consistent()/pci_free_consistent() with kmalloc()/kfree() in arcmsr_iop_message_xfer()
110** 9. fix the release of dma memory for type B in arcmsr_free_ccb_pool()
111** 10.fix the arcmsr_polling_hbb_ccbdone()
71************************************************************************** 112**************************************************************************
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 6f70f2b9327e..a6d5354639b2 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -1407,7 +1407,7 @@ Credits
1407======= 1407=======
1408The following people have contributed to this document: 1408The following people have contributed to this document:
1409 Mike Anderson <andmike at us dot ibm dot com> 1409 Mike Anderson <andmike at us dot ibm dot com>
1410 James Bottomley <James dot Bottomley at steeleye dot com> 1410 James Bottomley <James dot Bottomley at hansenpartnership dot com>
1411 Patrick Mansfield <patmans at us dot ibm dot com> 1411 Patrick Mansfield <patmans at us dot ibm dot com>
1412 Christoph Hellwig <hch at infradead dot org> 1412 Christoph Hellwig <hch at infradead dot org>
1413 Doug Ledford <dledford at redhat dot com> 1413 Doug Ledford <dledford at redhat dot com>
diff --git a/MAINTAINERS b/MAINTAINERS
index aefd23f892ba..2cdb591ac080 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2150,6 +2150,14 @@ M: acme@ghostprotocols.net
2150L: netdev@vger.kernel.org 2150L: netdev@vger.kernel.org
2151S: Maintained 2151S: Maintained
2152 2152
2153IPWIRELES DRIVER
2154P: Jiri Kosina
2155M: jkosina@suse.cz
2156P: David Sterba
2157M: dsterba@suse.cz
2158S: Maintained
2159T: git://git.kernel.org/pub/scm/linux/kernel/git/jikos/ipwireless_cs.git
2160
2153IRDA SUBSYSTEM 2161IRDA SUBSYSTEM
2154P: Samuel Ortiz 2162P: Samuel Ortiz
2155M: samuel@sortiz.org 2163M: samuel@sortiz.org
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 9b26fa5edad6..f99112d50b41 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -21,6 +21,8 @@
21#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
22#include <linux/pm.h> 22#include <linux/pm.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/gpio_keys.h>
25#include <linux/input.h>
24 26
25#include <asm/setup.h> 27#include <asm/setup.h>
26#include <asm/memory.h> 28#include <asm/memory.h>
@@ -246,6 +248,46 @@ static struct platform_device tosakbd_device = {
246 .id = -1, 248 .id = -1,
247}; 249};
248 250
251static struct gpio_keys_button tosa_gpio_keys[] = {
252 {
253 .type = EV_PWR,
254 .code = KEY_SUSPEND,
255 .gpio = TOSA_GPIO_ON_KEY,
256 .desc = "On key",
257 .wakeup = 1,
258 .active_low = 1,
259 },
260 {
261 .type = EV_KEY,
262 .code = TOSA_KEY_RECORD,
263 .gpio = TOSA_GPIO_RECORD_BTN,
264 .desc = "Record Button",
265 .wakeup = 1,
266 .active_low = 1,
267 },
268 {
269 .type = EV_KEY,
270 .code = TOSA_KEY_SYNC,
271 .gpio = TOSA_GPIO_SYNC,
272 .desc = "Sync Button",
273 .wakeup = 1,
274 .active_low = 1,
275 },
276};
277
278static struct gpio_keys_platform_data tosa_gpio_keys_platform_data = {
279 .buttons = tosa_gpio_keys,
280 .nbuttons = ARRAY_SIZE(tosa_gpio_keys),
281};
282
283static struct platform_device tosa_gpio_keys_device = {
284 .name = "gpio-keys",
285 .id = -1,
286 .dev = {
287 .platform_data = &tosa_gpio_keys_platform_data,
288 },
289};
290
249/* 291/*
250 * Tosa LEDs 292 * Tosa LEDs
251 */ 293 */
@@ -258,6 +300,7 @@ static struct platform_device *devices[] __initdata = {
258 &tosascoop_device, 300 &tosascoop_device,
259 &tosascoop_jc_device, 301 &tosascoop_jc_device,
260 &tosakbd_device, 302 &tosakbd_device,
303 &tosa_gpio_keys_device,
261 &tosaled_device, 304 &tosaled_device,
262}; 305};
263 306
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 3ee918695215..f85b928ffac4 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -335,7 +335,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
335 if (dump->start_stack < TASK_SIZE) 335 if (dump->start_stack < TASK_SIZE)
336 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; 336 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
337 337
338 dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump); 338 dump->u_ar0 = offsetof(struct user, regs);
339 sw = ((struct switch_stack *)regs) - 1; 339 sw = ((struct switch_stack *)regs) - 1;
340 dump->regs.d1 = regs->d1; 340 dump->regs.d1 = regs->d1;
341 dump->regs.d2 = regs->d2; 341 dump->regs.d2 = regs->d2;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e6e49289f788..4b749c416464 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -313,7 +313,7 @@ int main(void)
313 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 313 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
314 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 314 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
315 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); 315 DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
316 DEFINE(CLOCK_REALTIME_RES, (KTIME_MONOTONIC_RES).tv64); 316 DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
317 317
318#ifdef CONFIG_BUG 318#ifdef CONFIG_BUG
319 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); 319 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 0295855a3eef..4a1b9bfc5471 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -292,7 +292,8 @@ firmware_class_timeout(u_long data)
292 292
293static inline void fw_setup_device_id(struct device *f_dev, struct device *dev) 293static inline void fw_setup_device_id(struct device *f_dev, struct device *dev)
294{ 294{
295 snprintf(f_dev->bus_id, BUS_ID_SIZE, "firmware-%s", dev->bus_id); 295 /* XXX warning we should watch out for name collisions */
296 strlcpy(f_dev->bus_id, dev->bus_id, BUS_ID_SIZE);
296} 297}
297 298
298static int fw_register_device(struct device **dev_p, const char *fw_name, 299static int fw_register_device(struct device **dev_p, const char *fw_name,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 37bbf6729b4e..f8308bfb2613 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -187,7 +187,7 @@ typedef struct drm_i915_private {
187 u32 saveSWF2[3]; 187 u32 saveSWF2[3];
188 u8 saveMSR; 188 u8 saveMSR;
189 u8 saveSR[8]; 189 u8 saveSR[8];
190 u8 saveGR[24]; 190 u8 saveGR[25];
191 u8 saveAR_INDEX; 191 u8 saveAR_INDEX;
192 u8 saveAR[20]; 192 u8 saveAR[20];
193 u8 saveDACMASK; 193 u8 saveDACMASK;
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index fc54d234507a..4dbd3425e928 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -38,7 +38,6 @@
38#include <linux/kbd_kern.h> 38#include <linux/kbd_kern.h>
39#include <linux/kbd_diacr.h> 39#include <linux/kbd_diacr.h>
40#include <linux/vt_kern.h> 40#include <linux/vt_kern.h>
41#include <linux/consolemap.h>
42#include <linux/sysrq.h> 41#include <linux/sysrq.h>
43#include <linux/input.h> 42#include <linux/input.h>
44#include <linux/reboot.h> 43#include <linux/reboot.h>
@@ -194,7 +193,7 @@ int getkeycode(unsigned int scancode)
194 int error = -ENODEV; 193 int error = -ENODEV;
195 194
196 list_for_each_entry(handle, &kbd_handler.h_list, h_node) { 195 list_for_each_entry(handle, &kbd_handler.h_list, h_node) {
197 error = handle->dev->getkeycode(handle->dev, scancode, &keycode); 196 error = input_get_keycode(handle->dev, scancode, &keycode);
198 if (!error) 197 if (!error)
199 return keycode; 198 return keycode;
200 } 199 }
@@ -208,7 +207,7 @@ int setkeycode(unsigned int scancode, unsigned int keycode)
208 int error = -ENODEV; 207 int error = -ENODEV;
209 208
210 list_for_each_entry(handle, &kbd_handler.h_list, h_node) { 209 list_for_each_entry(handle, &kbd_handler.h_list, h_node) {
211 error = handle->dev->setkeycode(handle->dev, scancode, keycode); 210 error = input_set_keycode(handle->dev, scancode, keycode);
212 if (!error) 211 if (!error)
213 break; 212 break;
214 } 213 }
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index f25facd97bb4..00b8a84b0319 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -43,5 +43,14 @@ config CARDMAN_4040
43 (http://www.omnikey.com/), or a current development version of OpenCT 43 (http://www.omnikey.com/), or a current development version of OpenCT
44 (http://www.opensc.org/). 44 (http://www.opensc.org/).
45 45
46config IPWIRELESS
47 tristate "IPWireless 3G UMTS PCMCIA card support"
48 depends on PCMCIA
49 select PPP
50 help
51 This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
52 some countries (for example Czech Republic, T-Mobile ISP) this card
53 is shipped for service called UMTS 4G.
54
46endmenu 55endmenu
47 56
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
index 0aae20985d57..be8f287aa398 100644
--- a/drivers/char/pcmcia/Makefile
+++ b/drivers/char/pcmcia/Makefile
@@ -4,6 +4,8 @@
4# Makefile for the Linux PCMCIA char device drivers. 4# Makefile for the Linux PCMCIA char device drivers.
5# 5#
6 6
7obj-y += ipwireless/
8
7obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o 9obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
8obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o 10obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
9obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o 11obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
diff --git a/drivers/char/pcmcia/ipwireless/Makefile b/drivers/char/pcmcia/ipwireless/Makefile
new file mode 100644
index 000000000000..b71eb593643d
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/Makefile
@@ -0,0 +1,10 @@
1#
2# drivers/char/pcmcia/ipwireless/Makefile
3#
4# Makefile for the IPWireless driver
5#
6
7obj-$(CONFIG_IPWIRELESS) += ipwireless.o
8
9ipwireless-objs := hardware.o main.o network.o tty.o
10
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c
new file mode 100644
index 000000000000..1f978ff87fa8
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/hardware.c
@@ -0,0 +1,1787 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24
25#include "hardware.h"
26#include "setup_protocol.h"
27#include "network.h"
28#include "main.h"
29
30static void ipw_send_setup_packet(struct ipw_hardware *hw);
31static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
32 unsigned int address,
33 unsigned char *data, int len,
34 int is_last);
35static void ipwireless_setup_timer(unsigned long data);
36static void handle_received_CTRL_packet(struct ipw_hardware *hw,
37 unsigned int channel_idx, unsigned char *data, int len);
38
39/*#define TIMING_DIAGNOSTICS*/
40
41#ifdef TIMING_DIAGNOSTICS
42
43static struct timing_stats {
44 unsigned long last_report_time;
45 unsigned long read_time;
46 unsigned long write_time;
47 unsigned long read_bytes;
48 unsigned long write_bytes;
49 unsigned long start_time;
50};
51
52static void start_timing(void)
53{
54 timing_stats.start_time = jiffies;
55}
56
57static void end_read_timing(unsigned length)
58{
59 timing_stats.read_time += (jiffies - start_time);
60 timing_stats.read_bytes += length + 2;
61 report_timing();
62}
63
64static void end_write_timing(unsigned length)
65{
66 timing_stats.write_time += (jiffies - start_time);
67 timing_stats.write_bytes += length + 2;
68 report_timing();
69}
70
71static void report_timing(void)
72{
73 unsigned long since = jiffies - timing_stats.last_report_time;
74
75 /* If it's been more than one second... */
76 if (since >= HZ) {
77 int first = (timing_stats.last_report_time == 0);
78
79 timing_stats.last_report_time = jiffies;
80 if (!first)
81 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
82 ": %u us elapsed - read %lu bytes in %u us, "
83 "wrote %lu bytes in %u us\n",
84 jiffies_to_usecs(since),
85 timing_stats.read_bytes,
86 jiffies_to_usecs(timing_stats.read_time),
87 timing_stats.write_bytes,
88 jiffies_to_usecs(timing_stats.write_time));
89
90 timing_stats.read_time = 0;
91 timing_stats.write_time = 0;
92 timing_stats.read_bytes = 0;
93 timing_stats.write_bytes = 0;
94 }
95}
96#else
97static void start_timing(void) { }
98static void end_read_timing(unsigned length) { }
99static void end_write_timing(unsigned length) { }
100#endif
101
102/* Imported IPW definitions */
103
104#define LL_MTU_V1 318
105#define LL_MTU_V2 250
106#define LL_MTU_MAX (LL_MTU_V1 > LL_MTU_V2 ? LL_MTU_V1 : LL_MTU_V2)
107
108#define PRIO_DATA 2
109#define PRIO_CTRL 1
110#define PRIO_SETUP 0
111
112/* Addresses */
113#define ADDR_SETUP_PROT 0
114
115/* Protocol ids */
116enum {
117 /* Identifier for the Com Data protocol */
118 TL_PROTOCOLID_COM_DATA = 0,
119
120 /* Identifier for the Com Control protocol */
121 TL_PROTOCOLID_COM_CTRL = 1,
122
123 /* Identifier for the Setup protocol */
124 TL_PROTOCOLID_SETUP = 2
125};
126
127/* Number of bytes in NL packet header (cannot do
128 * sizeof(nl_packet_header) since it's a bitfield) */
129#define NL_FIRST_PACKET_HEADER_SIZE 3
130
131/* Number of bytes in NL packet header (cannot do
132 * sizeof(nl_packet_header) since it's a bitfield) */
133#define NL_FOLLOWING_PACKET_HEADER_SIZE 1
134
135struct nl_first_packet_header {
136#if defined(__BIG_ENDIAN_BITFIELD)
137 unsigned char packet_rank:2;
138 unsigned char address:3;
139 unsigned char protocol:3;
140#else
141 unsigned char protocol:3;
142 unsigned char address:3;
143 unsigned char packet_rank:2;
144#endif
145 unsigned char length_lsb;
146 unsigned char length_msb;
147};
148
149struct nl_packet_header {
150#if defined(__BIG_ENDIAN_BITFIELD)
151 unsigned char packet_rank:2;
152 unsigned char address:3;
153 unsigned char protocol:3;
154#else
155 unsigned char protocol:3;
156 unsigned char address:3;
157 unsigned char packet_rank:2;
158#endif
159};
160
161/* Value of 'packet_rank' above */
162#define NL_INTERMEDIATE_PACKET 0x0
163#define NL_LAST_PACKET 0x1
164#define NL_FIRST_PACKET 0x2
165
166union nl_packet {
167 /* Network packet header of the first packet (a special case) */
168 struct nl_first_packet_header hdr_first;
169 /* Network packet header of the following packets (if any) */
170 struct nl_packet_header hdr;
171 /* Complete network packet (header + data) */
172 unsigned char rawpkt[LL_MTU_MAX];
173} __attribute__ ((__packed__));
174
175#define HW_VERSION_UNKNOWN -1
176#define HW_VERSION_1 1
177#define HW_VERSION_2 2
178
179/* IPW I/O ports */
180#define IOIER 0x00 /* Interrupt Enable Register */
181#define IOIR 0x02 /* Interrupt Source/ACK register */
182#define IODCR 0x04 /* Data Control Register */
183#define IODRR 0x06 /* Data Read Register */
184#define IODWR 0x08 /* Data Write Register */
185#define IOESR 0x0A /* Embedded Driver Status Register */
186#define IORXR 0x0C /* Rx Fifo Register (Host to Embedded) */
187#define IOTXR 0x0E /* Tx Fifo Register (Embedded to Host) */
188
189/* I/O ports and bit definitions for version 1 of the hardware */
190
191/* IER bits*/
192#define IER_RXENABLED 0x1
193#define IER_TXENABLED 0x2
194
195/* ISR bits */
196#define IR_RXINTR 0x1
197#define IR_TXINTR 0x2
198
199/* DCR bits */
200#define DCR_RXDONE 0x1
201#define DCR_TXDONE 0x2
202#define DCR_RXRESET 0x4
203#define DCR_TXRESET 0x8
204
205/* I/O ports and bit definitions for version 2 of the hardware */
206
207struct MEMCCR {
208 unsigned short reg_config_option; /* PCCOR: Configuration Option Register */
209 unsigned short reg_config_and_status; /* PCCSR: Configuration and Status Register */
210 unsigned short reg_pin_replacement; /* PCPRR: Pin Replacemant Register */
211 unsigned short reg_socket_and_copy; /* PCSCR: Socket and Copy Register */
212 unsigned short reg_ext_status; /* PCESR: Extendend Status Register */
213 unsigned short reg_io_base; /* PCIOB: I/O Base Register */
214};
215
216struct MEMINFREG {
217 unsigned short memreg_tx_old; /* TX Register (R/W) */
218 unsigned short pad1;
219 unsigned short memreg_rx_done; /* RXDone Register (R/W) */
220 unsigned short pad2;
221 unsigned short memreg_rx; /* RX Register (R/W) */
222 unsigned short pad3;
223 unsigned short memreg_pc_interrupt_ack; /* PC intr Ack Register (W) */
224 unsigned short pad4;
225 unsigned long memreg_card_present;/* Mask for Host to check (R) for
226 * CARD_PRESENT_VALUE */
227 unsigned short memreg_tx_new; /* TX2 (new) Register (R/W) */
228};
229
230#define IODMADPR 0x00 /* DMA Data Port Register (R/W) */
231
232#define CARD_PRESENT_VALUE (0xBEEFCAFEUL)
233
234#define MEMTX_TX 0x0001
235#define MEMRX_RX 0x0001
236#define MEMRX_RX_DONE 0x0001
237#define MEMRX_PCINTACKK 0x0001
238#define MEMRX_MEMSPURIOUSINT 0x0001
239
240#define NL_NUM_OF_PRIORITIES 3
241#define NL_NUM_OF_PROTOCOLS 3
242#define NL_NUM_OF_ADDRESSES NO_OF_IPW_CHANNELS
243
244struct ipw_hardware {
245 unsigned int base_port;
246 short hw_version;
247 unsigned short ll_mtu;
248 spinlock_t spinlock;
249
250 int initializing;
251 int init_loops;
252 struct timer_list setup_timer;
253
254 int tx_ready;
255 struct list_head tx_queue[NL_NUM_OF_PRIORITIES];
256 /* True if any packets are queued for transmission */
257 int tx_queued;
258
259 int rx_bytes_queued;
260 struct list_head rx_queue;
261 /* Pool of rx_packet structures that are not currently used. */
262 struct list_head rx_pool;
263 int rx_pool_size;
264 /* True if reception of data is blocked while userspace processes it. */
265 int blocking_rx;
266 /* True if there is RX data ready on the hardware. */
267 int rx_ready;
268 unsigned short last_memtx_serial;
269 /*
270 * Newer versions of the V2 card firmware send serial numbers in the
271 * MemTX register. 'serial_number_detected' is set true when we detect
272 * a non-zero serial number (indicating the new firmware). Thereafter,
273 * the driver can safely ignore the Timer Recovery re-sends to avoid
274 * out-of-sync problems.
275 */
276 int serial_number_detected;
277 struct work_struct work_rx;
278
279 /* True if we are to send the set-up data to the hardware. */
280 int to_setup;
281
282 /* Card has been removed */
283 int removed;
284 /* Saved irq value when we disable the interrupt. */
285 int irq;
286 /* True if this driver is shutting down. */
287 int shutting_down;
288 /* Modem control lines */
289 unsigned int control_lines[NL_NUM_OF_ADDRESSES];
290 struct ipw_rx_packet *packet_assembler[NL_NUM_OF_ADDRESSES];
291
292 struct tasklet_struct tasklet;
293
294 /* The handle for the network layer, for the sending of events to it. */
295 struct ipw_network *network;
296 struct MEMINFREG __iomem *memory_info_regs;
297 struct MEMCCR __iomem *memregs_CCR;
298 void (*reboot_callback) (void *data);
299 void *reboot_callback_data;
300
301 unsigned short __iomem *memreg_tx;
302};
303
304/*
305 * Packet info structure for tx packets.
306 * Note: not all the fields defined here are required for all protocols
307 */
308struct ipw_tx_packet {
309 struct list_head queue;
310 /* channel idx + 1 */
311 unsigned char dest_addr;
312 /* SETUP, CTRL or DATA */
313 unsigned char protocol;
314 /* Length of data block, which starts at the end of this structure */
315 unsigned short length;
316 /* Sending state */
317 /* Offset of where we've sent up to so far */
318 unsigned long offset;
319 /* Count of packet fragments, starting at 0 */
320 int fragment_count;
321
322 /* Called after packet is sent and before is freed */
323 void (*packet_callback) (void *cb_data, unsigned int packet_length);
324 void *callback_data;
325};
326
327/* Signals from DTE */
328#define COMCTRL_RTS 0
329#define COMCTRL_DTR 1
330
331/* Signals from DCE */
332#define COMCTRL_CTS 2
333#define COMCTRL_DCD 3
334#define COMCTRL_DSR 4
335#define COMCTRL_RI 5
336
337struct ipw_control_packet_body {
338 /* DTE signal or DCE signal */
339 unsigned char sig_no;
340 /* 0: set signal, 1: clear signal */
341 unsigned char value;
342} __attribute__ ((__packed__));
343
344struct ipw_control_packet {
345 struct ipw_tx_packet header;
346 struct ipw_control_packet_body body;
347};
348
349struct ipw_rx_packet {
350 struct list_head queue;
351 unsigned int capacity;
352 unsigned int length;
353 unsigned int protocol;
354 unsigned int channel_idx;
355};
356
357#ifdef IPWIRELESS_STATE_DEBUG
358int ipwireless_dump_hardware_state(char *p, size_t limit,
359 struct ipw_hardware *hw)
360{
361 return snprintf(p, limit,
362 "debug: initializing=%d\n"
363 "debug: tx_ready=%d\n"
364 "debug: tx_queued=%d\n"
365 "debug: rx_ready=%d\n"
366 "debug: rx_bytes_queued=%d\n"
367 "debug: blocking_rx=%d\n"
368 "debug: removed=%d\n"
369 "debug: hardware.shutting_down=%d\n"
370 "debug: to_setup=%d\n",
371 hw->initializing,
372 hw->tx_ready,
373 hw->tx_queued,
374 hw->rx_ready,
375 hw->rx_bytes_queued,
376 hw->blocking_rx,
377 hw->removed,
378 hw->shutting_down,
379 hw->to_setup);
380}
381#endif
382
383static char *data_type(const unsigned char *buf, unsigned length)
384{
385 struct nl_packet_header *hdr = (struct nl_packet_header *) buf;
386
387 if (length == 0)
388 return " ";
389
390 if (hdr->packet_rank & NL_FIRST_PACKET) {
391 switch (hdr->protocol) {
392 case TL_PROTOCOLID_COM_DATA: return "DATA ";
393 case TL_PROTOCOLID_COM_CTRL: return "CTRL ";
394 case TL_PROTOCOLID_SETUP: return "SETUP";
395 default: return "???? ";
396 }
397 } else
398 return " ";
399}
400
401#define DUMP_MAX_BYTES 64
402
403static void dump_data_bytes(const char *type, const unsigned char *data,
404 unsigned length)
405{
406 char prefix[56];
407
408 sprintf(prefix, IPWIRELESS_PCCARD_NAME ": %s %s ",
409 type, data_type(data, length));
410 print_hex_dump_bytes(prefix, 0, (void *)data,
411 length < DUMP_MAX_BYTES ? length : DUMP_MAX_BYTES);
412}
413
414static int do_send_fragment(struct ipw_hardware *hw, const unsigned char *data,
415 unsigned length)
416{
417 int i;
418 unsigned long flags;
419
420 start_timing();
421
422 if (length == 0)
423 return 0;
424
425 if (length > hw->ll_mtu)
426 return -1;
427
428 if (ipwireless_debug)
429 dump_data_bytes("send", data, length);
430
431 spin_lock_irqsave(&hw->spinlock, flags);
432
433 if (hw->hw_version == HW_VERSION_1) {
434 outw((unsigned short) length, hw->base_port + IODWR);
435
436 for (i = 0; i < length; i += 2) {
437 unsigned short d = data[i];
438 __le16 raw_data;
439
440 if (likely(i + 1 < length))
441 d |= data[i + 1] << 8;
442 raw_data = cpu_to_le16(d);
443 outw(raw_data, hw->base_port + IODWR);
444 }
445
446 outw(DCR_TXDONE, hw->base_port + IODCR);
447 } else if (hw->hw_version == HW_VERSION_2) {
448 outw((unsigned short) length, hw->base_port + IODMADPR);
449
450 for (i = 0; i < length; i += 2) {
451 unsigned short d = data[i];
452 __le16 raw_data;
453
454 if ((i + 1 < length))
455 d |= data[i + 1] << 8;
456 raw_data = cpu_to_le16(d);
457 outw(raw_data, hw->base_port + IODMADPR);
458 }
459 while ((i & 3) != 2) {
460 outw((unsigned short) 0xDEAD, hw->base_port + IODMADPR);
461 i += 2;
462 }
463 writew(MEMRX_RX, &hw->memory_info_regs->memreg_rx);
464 }
465
466 spin_unlock_irqrestore(&hw->spinlock, flags);
467
468 end_write_timing(length);
469
470 return 0;
471}
472
473static int do_send_packet(struct ipw_hardware *hw, struct ipw_tx_packet *packet)
474{
475 unsigned short fragment_data_len;
476 unsigned short data_left = packet->length - packet->offset;
477 unsigned short header_size;
478 union nl_packet pkt;
479
480 header_size =
481 (packet->fragment_count == 0)
482 ? NL_FIRST_PACKET_HEADER_SIZE
483 : NL_FOLLOWING_PACKET_HEADER_SIZE;
484 fragment_data_len = hw->ll_mtu - header_size;
485 if (data_left < fragment_data_len)
486 fragment_data_len = data_left;
487
488 pkt.hdr_first.protocol = packet->protocol;
489 pkt.hdr_first.address = packet->dest_addr;
490 pkt.hdr_first.packet_rank = 0;
491
492 /* First packet? */
493 if (packet->fragment_count == 0) {
494 pkt.hdr_first.packet_rank |= NL_FIRST_PACKET;
495 pkt.hdr_first.length_lsb = (unsigned char) packet->length;
496 pkt.hdr_first.length_msb =
497 (unsigned char) (packet->length >> 8);
498 }
499
500 memcpy(pkt.rawpkt + header_size,
501 ((unsigned char *) packet) + sizeof(struct ipw_tx_packet) +
502 packet->offset, fragment_data_len);
503 packet->offset += fragment_data_len;
504 packet->fragment_count++;
505
506 /* Last packet? (May also be first packet.) */
507 if (packet->offset == packet->length)
508 pkt.hdr_first.packet_rank |= NL_LAST_PACKET;
509 do_send_fragment(hw, pkt.rawpkt, header_size + fragment_data_len);
510
511 /* If this packet has unsent data, then re-queue it. */
512 if (packet->offset < packet->length) {
513 /*
514 * Re-queue it at the head of the highest priority queue so
515 * it goes before all other packets
516 */
517 unsigned long flags;
518
519 spin_lock_irqsave(&hw->spinlock, flags);
520 list_add(&packet->queue, &hw->tx_queue[0]);
521 spin_unlock_irqrestore(&hw->spinlock, flags);
522 } else {
523 if (packet->packet_callback)
524 packet->packet_callback(packet->callback_data,
525 packet->length);
526 kfree(packet);
527 }
528
529 return 0;
530}
531
532static void ipw_setup_hardware(struct ipw_hardware *hw)
533{
534 unsigned long flags;
535
536 spin_lock_irqsave(&hw->spinlock, flags);
537 if (hw->hw_version == HW_VERSION_1) {
538 /* Reset RX FIFO */
539 outw(DCR_RXRESET, hw->base_port + IODCR);
540 /* SB: Reset TX FIFO */
541 outw(DCR_TXRESET, hw->base_port + IODCR);
542
543 /* Enable TX and RX interrupts. */
544 outw(IER_TXENABLED | IER_RXENABLED, hw->base_port + IOIER);
545 } else {
546 /*
547 * Set INTRACK bit (bit 0), which means we must explicitly
548 * acknowledge interrupts by clearing bit 2 of reg_config_and_status.
549 */
550 unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status);
551
552 csr |= 1;
553 writew(csr, &hw->memregs_CCR->reg_config_and_status);
554 }
555 spin_unlock_irqrestore(&hw->spinlock, flags);
556}
557
558/*
559 * If 'packet' is NULL, then this function allocates a new packet, setting its
560 * length to 0 and ensuring it has the specified minimum amount of free space.
561 *
562 * If 'packet' is not NULL, then this function enlarges it if it doesn't
563 * have the specified minimum amount of free space.
564 *
565 */
566static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw,
567 struct ipw_rx_packet *packet,
568 int minimum_free_space)
569{
570
571 if (!packet) {
572 unsigned long flags;
573
574 /*
575 * If this is the first fragment, then we will need to fetch a
576 * packet to put it in.
577 */
578 spin_lock_irqsave(&hw->spinlock, flags);
579 /* If we have one in our pool, then pull it out. */
580 if (!list_empty(&hw->rx_pool)) {
581 packet = list_first_entry(&hw->rx_pool,
582 struct ipw_rx_packet, queue);
583 list_del(&packet->queue);
584 hw->rx_pool_size--;
585 spin_unlock_irqrestore(&hw->spinlock, flags);
586 } else {
587 /* Otherwise allocate a new one. */
588 static int min_capacity = 256;
589 int new_capacity;
590
591 spin_unlock_irqrestore(&hw->spinlock, flags);
592 new_capacity =
593 minimum_free_space > min_capacity
594 ? minimum_free_space
595 : min_capacity;
596 packet = kmalloc(sizeof(struct ipw_rx_packet)
597 + new_capacity, GFP_ATOMIC);
598 if (!packet)
599 return NULL;
600 packet->capacity = new_capacity;
601 }
602 packet->length = 0;
603 }
604
605 /*
606 * If this packet does not have sufficient capacity for the data we
607 * want to add, then make it bigger.
608 */
609 if (packet->length + minimum_free_space > packet->capacity) {
610 struct ipw_rx_packet *old_packet = packet;
611
612 packet = kmalloc(sizeof(struct ipw_rx_packet) +
613 old_packet->length + minimum_free_space,
614 GFP_ATOMIC);
615 if (!packet)
616 return NULL;
617 memcpy(packet, old_packet,
618 sizeof(struct ipw_rx_packet)
619 + old_packet->length);
620 packet->capacity = old_packet->length + minimum_free_space;
621 kfree(old_packet);
622 }
623
624 return packet;
625}
626
627static void pool_free(struct ipw_hardware *hw, struct ipw_rx_packet *packet)
628{
629 if (hw->rx_pool_size > 6)
630 kfree(packet);
631 else {
632 hw->rx_pool_size++;
633 list_add_tail(&packet->queue, &hw->rx_pool);
634 }
635}
636
637static void queue_received_packet(struct ipw_hardware *hw,
638 unsigned int protocol, unsigned int address,
639 unsigned char *data, int length, int is_last)
640{
641 unsigned int channel_idx = address - 1;
642 struct ipw_rx_packet *packet = NULL;
643 unsigned long flags;
644
645 /* Discard packet if channel index is out of range. */
646 if (channel_idx >= NL_NUM_OF_ADDRESSES) {
647 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
648 ": data packet has bad address %u\n", address);
649 return;
650 }
651
652 /*
653 * ->packet_assembler is safe to touch unlocked, this is the only place
654 */
655 if (protocol == TL_PROTOCOLID_COM_DATA) {
656 struct ipw_rx_packet **assem =
657 &hw->packet_assembler[channel_idx];
658
659 /*
660 * Create a new packet, or assembler already contains one
661 * enlarge it by 'length' bytes.
662 */
663 (*assem) = pool_allocate(hw, *assem, length);
664 if (!(*assem)) {
665 printk(KERN_ERR IPWIRELESS_PCCARD_NAME
666 ": no memory for incomming data packet, dropped!\n");
667 return;
668 }
669 (*assem)->protocol = protocol;
670 (*assem)->channel_idx = channel_idx;
671
672 /* Append this packet data onto existing data. */
673 memcpy((unsigned char *)(*assem) +
674 sizeof(struct ipw_rx_packet)
675 + (*assem)->length, data, length);
676 (*assem)->length += length;
677 if (is_last) {
678 packet = *assem;
679 *assem = NULL;
680 /* Count queued DATA bytes only */
681 spin_lock_irqsave(&hw->spinlock, flags);
682 hw->rx_bytes_queued += packet->length;
683 spin_unlock_irqrestore(&hw->spinlock, flags);
684 }
685 } else {
686 /* If it's a CTRL packet, don't assemble, just queue it. */
687 packet = pool_allocate(hw, NULL, length);
688 if (!packet) {
689 printk(KERN_ERR IPWIRELESS_PCCARD_NAME
690 ": no memory for incomming ctrl packet, dropped!\n");
691 return;
692 }
693 packet->protocol = protocol;
694 packet->channel_idx = channel_idx;
695 memcpy((unsigned char *)packet + sizeof(struct ipw_rx_packet),
696 data, length);
697 packet->length = length;
698 }
699
700 /*
701 * If this is the last packet, then send the assembled packet on to the
702 * network layer.
703 */
704 if (packet) {
705 spin_lock_irqsave(&hw->spinlock, flags);
706 list_add_tail(&packet->queue, &hw->rx_queue);
707 /* Block reception of incoming packets if queue is full. */
708 hw->blocking_rx =
709 hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE;
710
711 spin_unlock_irqrestore(&hw->spinlock, flags);
712 schedule_work(&hw->work_rx);
713 }
714}
715
716/*
717 * Workqueue callback
718 */
719static void ipw_receive_data_work(struct work_struct *work_rx)
720{
721 struct ipw_hardware *hw =
722 container_of(work_rx, struct ipw_hardware, work_rx);
723 unsigned long flags;
724
725 spin_lock_irqsave(&hw->spinlock, flags);
726 while (!list_empty(&hw->rx_queue)) {
727 struct ipw_rx_packet *packet =
728 list_first_entry(&hw->rx_queue,
729 struct ipw_rx_packet, queue);
730
731 if (hw->shutting_down)
732 break;
733 list_del(&packet->queue);
734
735 /*
736 * Note: ipwireless_network_packet_received must be called in a
737 * process context (i.e. via schedule_work) because the tty
738 * output code can sleep in the tty_flip_buffer_push call.
739 */
740 if (packet->protocol == TL_PROTOCOLID_COM_DATA) {
741 if (hw->network != NULL) {
742 /* If the network hasn't been disconnected. */
743 spin_unlock_irqrestore(&hw->spinlock, flags);
744 /*
745 * This must run unlocked due to tty processing
746 * and mutex locking
747 */
748 ipwireless_network_packet_received(
749 hw->network,
750 packet->channel_idx,
751 (unsigned char *)packet
752 + sizeof(struct ipw_rx_packet),
753 packet->length);
754 spin_lock_irqsave(&hw->spinlock, flags);
755 }
756 /* Count queued DATA bytes only */
757 hw->rx_bytes_queued -= packet->length;
758 } else {
759 /*
760 * This is safe to be called locked, callchain does
761 * not block
762 */
763 handle_received_CTRL_packet(hw, packet->channel_idx,
764 (unsigned char *)packet
765 + sizeof(struct ipw_rx_packet),
766 packet->length);
767 }
768 pool_free(hw, packet);
769 /*
770 * Unblock reception of incoming packets if queue is no longer
771 * full.
772 */
773 hw->blocking_rx =
774 hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE;
775 if (hw->shutting_down)
776 break;
777 }
778 spin_unlock_irqrestore(&hw->spinlock, flags);
779}
780
781static void handle_received_CTRL_packet(struct ipw_hardware *hw,
782 unsigned int channel_idx,
783 unsigned char *data, int len)
784{
785 struct ipw_control_packet_body *body =
786 (struct ipw_control_packet_body *) data;
787 unsigned int changed_mask;
788
789 if (len != sizeof(struct ipw_control_packet_body)) {
790 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
791 ": control packet was %d bytes - wrong size!\n",
792 len);
793 return;
794 }
795
796 switch (body->sig_no) {
797 case COMCTRL_CTS:
798 changed_mask = IPW_CONTROL_LINE_CTS;
799 break;
800 case COMCTRL_DCD:
801 changed_mask = IPW_CONTROL_LINE_DCD;
802 break;
803 case COMCTRL_DSR:
804 changed_mask = IPW_CONTROL_LINE_DSR;
805 break;
806 case COMCTRL_RI:
807 changed_mask = IPW_CONTROL_LINE_RI;
808 break;
809 default:
810 changed_mask = 0;
811 }
812
813 if (changed_mask != 0) {
814 if (body->value)
815 hw->control_lines[channel_idx] |= changed_mask;
816 else
817 hw->control_lines[channel_idx] &= ~changed_mask;
818 if (hw->network)
819 ipwireless_network_notify_control_line_change(
820 hw->network,
821 channel_idx,
822 hw->control_lines[channel_idx],
823 changed_mask);
824 }
825}
826
827static void handle_received_packet(struct ipw_hardware *hw,
828 union nl_packet *packet,
829 unsigned short len)
830{
831 unsigned int protocol = packet->hdr.protocol;
832 unsigned int address = packet->hdr.address;
833 unsigned int header_length;
834 unsigned char *data;
835 unsigned int data_len;
836 int is_last = packet->hdr.packet_rank & NL_LAST_PACKET;
837
838 if (packet->hdr.packet_rank & NL_FIRST_PACKET)
839 header_length = NL_FIRST_PACKET_HEADER_SIZE;
840 else
841 header_length = NL_FOLLOWING_PACKET_HEADER_SIZE;
842
843 data = packet->rawpkt + header_length;
844 data_len = len - header_length;
845 switch (protocol) {
846 case TL_PROTOCOLID_COM_DATA:
847 case TL_PROTOCOLID_COM_CTRL:
848 queue_received_packet(hw, protocol, address, data, data_len,
849 is_last);
850 break;
851 case TL_PROTOCOLID_SETUP:
852 handle_received_SETUP_packet(hw, address, data, data_len,
853 is_last);
854 break;
855 }
856}
857
858static void acknowledge_data_read(struct ipw_hardware *hw)
859{
860 if (hw->hw_version == HW_VERSION_1)
861 outw(DCR_RXDONE, hw->base_port + IODCR);
862 else
863 writew(MEMRX_PCINTACKK,
864 &hw->memory_info_regs->memreg_pc_interrupt_ack);
865}
866
867/*
868 * Retrieve a packet from the IPW hardware.
869 */
870static void do_receive_packet(struct ipw_hardware *hw)
871{
872 unsigned len;
873 unsigned int i;
874 unsigned char pkt[LL_MTU_MAX];
875
876 start_timing();
877
878 if (hw->hw_version == HW_VERSION_1) {
879 len = inw(hw->base_port + IODRR);
880 if (len > hw->ll_mtu) {
881 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
882 ": received a packet of %u bytes - "
883 "longer than the MTU!\n", len);
884 outw(DCR_RXDONE | DCR_RXRESET, hw->base_port + IODCR);
885 return;
886 }
887
888 for (i = 0; i < len; i += 2) {
889 __le16 raw_data = inw(hw->base_port + IODRR);
890 unsigned short data = le16_to_cpu(raw_data);
891
892 pkt[i] = (unsigned char) data;
893 pkt[i + 1] = (unsigned char) (data >> 8);
894 }
895 } else {
896 len = inw(hw->base_port + IODMADPR);
897 if (len > hw->ll_mtu) {
898 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
899 ": received a packet of %u bytes - "
900 "longer than the MTU!\n", len);
901 writew(MEMRX_PCINTACKK,
902 &hw->memory_info_regs->memreg_pc_interrupt_ack);
903 return;
904 }
905
906 for (i = 0; i < len; i += 2) {
907 __le16 raw_data = inw(hw->base_port + IODMADPR);
908 unsigned short data = le16_to_cpu(raw_data);
909
910 pkt[i] = (unsigned char) data;
911 pkt[i + 1] = (unsigned char) (data >> 8);
912 }
913
914 while ((i & 3) != 2) {
915 inw(hw->base_port + IODMADPR);
916 i += 2;
917 }
918 }
919
920 acknowledge_data_read(hw);
921
922 if (ipwireless_debug)
923 dump_data_bytes("recv", pkt, len);
924
925 handle_received_packet(hw, (union nl_packet *) pkt, len);
926
927 end_read_timing(len);
928}
929
930static int get_current_packet_priority(struct ipw_hardware *hw)
931{
932 /*
933 * If we're initializing, don't send anything of higher priority than
934 * PRIO_SETUP. The network layer therefore need not care about
935 * hardware initialization - any of its stuff will simply be queued
936 * until setup is complete.
937 */
938 return (hw->to_setup || hw->initializing
939 ? PRIO_SETUP + 1 :
940 NL_NUM_OF_PRIORITIES);
941}
942
943/*
944 * return 1 if something has been received from hw
945 */
946static int get_packets_from_hw(struct ipw_hardware *hw)
947{
948 int received = 0;
949 unsigned long flags;
950
951 spin_lock_irqsave(&hw->spinlock, flags);
952 while (hw->rx_ready && !hw->blocking_rx) {
953 received = 1;
954 hw->rx_ready--;
955 spin_unlock_irqrestore(&hw->spinlock, flags);
956
957 do_receive_packet(hw);
958
959 spin_lock_irqsave(&hw->spinlock, flags);
960 }
961 spin_unlock_irqrestore(&hw->spinlock, flags);
962
963 return received;
964}
965
966/*
967 * Send pending packet up to given priority, prioritize SETUP data until
968 * hardware is fully setup.
969 *
970 * return 1 if more packets can be sent
971 */
972static int send_pending_packet(struct ipw_hardware *hw, int priority_limit)
973{
974 int more_to_send = 0;
975 unsigned long flags;
976
977 spin_lock_irqsave(&hw->spinlock, flags);
978 if (hw->tx_queued && hw->tx_ready != 0) {
979 int priority;
980 struct ipw_tx_packet *packet = NULL;
981
982 hw->tx_ready--;
983
984 /* Pick a packet */
985 for (priority = 0; priority < priority_limit; priority++) {
986 if (!list_empty(&hw->tx_queue[priority])) {
987 packet = list_first_entry(
988 &hw->tx_queue[priority],
989 struct ipw_tx_packet,
990 queue);
991
992 list_del(&packet->queue);
993
994 break;
995 }
996 }
997 if (!packet) {
998 hw->tx_queued = 0;
999 spin_unlock_irqrestore(&hw->spinlock, flags);
1000 return 0;
1001 }
1002 spin_unlock_irqrestore(&hw->spinlock, flags);
1003
1004 /* Send */
1005 do_send_packet(hw, packet);
1006
1007 /* Check if more to send */
1008 spin_lock_irqsave(&hw->spinlock, flags);
1009 for (priority = 0; priority < priority_limit; priority++)
1010 if (!list_empty(&hw->tx_queue[priority])) {
1011 more_to_send = 1;
1012 break;
1013 }
1014
1015 if (!more_to_send)
1016 hw->tx_queued = 0;
1017 }
1018 spin_unlock_irqrestore(&hw->spinlock, flags);
1019
1020 return more_to_send;
1021}
1022
1023/*
1024 * Send and receive all queued packets.
1025 */
1026static void ipwireless_do_tasklet(unsigned long hw_)
1027{
1028 struct ipw_hardware *hw = (struct ipw_hardware *) hw_;
1029 unsigned long flags;
1030
1031 spin_lock_irqsave(&hw->spinlock, flags);
1032 if (hw->shutting_down) {
1033 spin_unlock_irqrestore(&hw->spinlock, flags);
1034 return;
1035 }
1036
1037 if (hw->to_setup == 1) {
1038 /*
1039 * Initial setup data sent to hardware
1040 */
1041 hw->to_setup = 2;
1042 spin_unlock_irqrestore(&hw->spinlock, flags);
1043
1044 ipw_setup_hardware(hw);
1045 ipw_send_setup_packet(hw);
1046
1047 send_pending_packet(hw, PRIO_SETUP + 1);
1048 get_packets_from_hw(hw);
1049 } else {
1050 int priority_limit = get_current_packet_priority(hw);
1051 int again;
1052
1053 spin_unlock_irqrestore(&hw->spinlock, flags);
1054
1055 do {
1056 again = send_pending_packet(hw, priority_limit);
1057 again |= get_packets_from_hw(hw);
1058 } while (again);
1059 }
1060}
1061
1062/*
1063 * return true if the card is physically present.
1064 */
1065static int is_card_present(struct ipw_hardware *hw)
1066{
1067 if (hw->hw_version == HW_VERSION_1)
1068 return inw(hw->base_port + IOIR) != 0xFFFF;
1069 else
1070 return readl(&hw->memory_info_regs->memreg_card_present) ==
1071 CARD_PRESENT_VALUE;
1072}
1073
1074static irqreturn_t ipwireless_handle_v1_interrupt(int irq,
1075 struct ipw_hardware *hw)
1076{
1077 unsigned short irqn;
1078
1079 irqn = inw(hw->base_port + IOIR);
1080
1081 /* Check if card is present */
1082 if (irqn == 0xFFFF)
1083 return IRQ_NONE;
1084 else if (irqn != 0) {
1085 unsigned short ack = 0;
1086 unsigned long flags;
1087
1088 /* Transmit complete. */
1089 if (irqn & IR_TXINTR) {
1090 ack |= IR_TXINTR;
1091 spin_lock_irqsave(&hw->spinlock, flags);
1092 hw->tx_ready++;
1093 spin_unlock_irqrestore(&hw->spinlock, flags);
1094 }
1095 /* Received data */
1096 if (irqn & IR_RXINTR) {
1097 ack |= IR_RXINTR;
1098 spin_lock_irqsave(&hw->spinlock, flags);
1099 hw->rx_ready++;
1100 spin_unlock_irqrestore(&hw->spinlock, flags);
1101 }
1102 if (ack != 0) {
1103 outw(ack, hw->base_port + IOIR);
1104 tasklet_schedule(&hw->tasklet);
1105 }
1106 return IRQ_HANDLED;
1107 }
1108 return IRQ_NONE;
1109}
1110
1111static void acknowledge_pcmcia_interrupt(struct ipw_hardware *hw)
1112{
1113 unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status);
1114
1115 csr &= 0xfffd;
1116 writew(csr, &hw->memregs_CCR->reg_config_and_status);
1117}
1118
1119static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq,
1120 struct ipw_hardware *hw)
1121{
1122 int tx = 0;
1123 int rx = 0;
1124 int rx_repeat = 0;
1125 int try_mem_tx_old;
1126 unsigned long flags;
1127
1128 do {
1129
1130 unsigned short memtx = readw(hw->memreg_tx);
1131 unsigned short memtx_serial;
1132 unsigned short memrxdone =
1133 readw(&hw->memory_info_regs->memreg_rx_done);
1134
1135 try_mem_tx_old = 0;
1136
1137 /* check whether the interrupt was generated by ipwireless card */
1138 if (!(memtx & MEMTX_TX) && !(memrxdone & MEMRX_RX_DONE)) {
1139
1140 /* check if the card uses memreg_tx_old register */
1141 if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
1142 memtx = readw(&hw->memory_info_regs->memreg_tx_old);
1143 if (memtx & MEMTX_TX) {
1144 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
1145 ": Using memreg_tx_old\n");
1146 hw->memreg_tx =
1147 &hw->memory_info_regs->memreg_tx_old;
1148 } else {
1149 return IRQ_NONE;
1150 }
1151 } else {
1152 return IRQ_NONE;
1153 }
1154 }
1155
1156 /*
1157 * See if the card is physically present. Note that while it is
1158 * powering up, it appears not to be present.
1159 */
1160 if (!is_card_present(hw)) {
1161 acknowledge_pcmcia_interrupt(hw);
1162 return IRQ_HANDLED;
1163 }
1164
1165 memtx_serial = memtx & (unsigned short) 0xff00;
1166 if (memtx & MEMTX_TX) {
1167 writew(memtx_serial, hw->memreg_tx);
1168
1169 if (hw->serial_number_detected) {
1170 if (memtx_serial != hw->last_memtx_serial) {
1171 hw->last_memtx_serial = memtx_serial;
1172 spin_lock_irqsave(&hw->spinlock, flags);
1173 hw->rx_ready++;
1174 spin_unlock_irqrestore(&hw->spinlock, flags);
1175 rx = 1;
1176 } else
1177 /* Ignore 'Timer Recovery' duplicates. */
1178 rx_repeat = 1;
1179 } else {
1180 /*
1181 * If a non-zero serial number is seen, then enable
1182 * serial number checking.
1183 */
1184 if (memtx_serial != 0) {
1185 hw->serial_number_detected = 1;
1186 printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
1187 ": memreg_tx serial num detected\n");
1188
1189 spin_lock_irqsave(&hw->spinlock, flags);
1190 hw->rx_ready++;
1191 spin_unlock_irqrestore(&hw->spinlock, flags);
1192 }
1193 rx = 1;
1194 }
1195 }
1196 if (memrxdone & MEMRX_RX_DONE) {
1197 writew(0, &hw->memory_info_regs->memreg_rx_done);
1198 spin_lock_irqsave(&hw->spinlock, flags);
1199 hw->tx_ready++;
1200 spin_unlock_irqrestore(&hw->spinlock, flags);
1201 tx = 1;
1202 }
1203 if (tx)
1204 writew(MEMRX_PCINTACKK,
1205 &hw->memory_info_regs->memreg_pc_interrupt_ack);
1206
1207 acknowledge_pcmcia_interrupt(hw);
1208
1209 if (tx || rx)
1210 tasklet_schedule(&hw->tasklet);
1211 else if (!rx_repeat) {
1212 if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
1213 if (hw->serial_number_detected)
1214 printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
1215 ": spurious interrupt - new_tx mode\n");
1216 else {
1217 printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
1218 ": no valid memreg_tx value - "
1219 "switching to the old memreg_tx\n");
1220 hw->memreg_tx =
1221 &hw->memory_info_regs->memreg_tx_old;
1222 try_mem_tx_old = 1;
1223 }
1224 } else
1225 printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
1226 ": spurious interrupt - old_tx mode\n");
1227 }
1228
1229 } while (try_mem_tx_old == 1);
1230
1231 return IRQ_HANDLED;
1232}
1233
1234irqreturn_t ipwireless_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1235{
1236 struct ipw_hardware *hw = dev_id;
1237
1238 if (hw->hw_version == HW_VERSION_1)
1239 return ipwireless_handle_v1_interrupt(irq, hw);
1240 else
1241 return ipwireless_handle_v2_v3_interrupt(irq, hw);
1242}
1243
1244static void flush_packets_to_hw(struct ipw_hardware *hw)
1245{
1246 int priority_limit;
1247 unsigned long flags;
1248
1249 spin_lock_irqsave(&hw->spinlock, flags);
1250 priority_limit = get_current_packet_priority(hw);
1251 spin_unlock_irqrestore(&hw->spinlock, flags);
1252
1253 while (send_pending_packet(hw, priority_limit));
1254}
1255
1256static void send_packet(struct ipw_hardware *hw, int priority,
1257 struct ipw_tx_packet *packet)
1258{
1259 unsigned long flags;
1260
1261 spin_lock_irqsave(&hw->spinlock, flags);
1262 list_add_tail(&packet->queue, &hw->tx_queue[priority]);
1263 hw->tx_queued = 1;
1264 spin_unlock_irqrestore(&hw->spinlock, flags);
1265
1266 flush_packets_to_hw(hw);
1267}
1268
1269/* Create data packet, non-atomic allocation */
1270static void *alloc_data_packet(int data_size,
1271 unsigned char dest_addr,
1272 unsigned char protocol)
1273{
1274 struct ipw_tx_packet *packet = kzalloc(
1275 sizeof(struct ipw_tx_packet) + data_size,
1276 GFP_ATOMIC);
1277
1278 if (!packet)
1279 return NULL;
1280
1281 INIT_LIST_HEAD(&packet->queue);
1282 packet->dest_addr = dest_addr;
1283 packet->protocol = protocol;
1284 packet->length = data_size;
1285
1286 return packet;
1287}
1288
1289static void *alloc_ctrl_packet(int header_size,
1290 unsigned char dest_addr,
1291 unsigned char protocol,
1292 unsigned char sig_no)
1293{
1294 /*
1295 * sig_no is located right after ipw_tx_packet struct in every
1296 * CTRL or SETUP packets, we can use ipw_control_packet as a
1297 * common struct
1298 */
1299 struct ipw_control_packet *packet = kzalloc(header_size, GFP_ATOMIC);
1300
1301 if (!packet)
1302 return NULL;
1303
1304 INIT_LIST_HEAD(&packet->header.queue);
1305 packet->header.dest_addr = dest_addr;
1306 packet->header.protocol = protocol;
1307 packet->header.length = header_size - sizeof(struct ipw_tx_packet);
1308 packet->body.sig_no = sig_no;
1309
1310 return packet;
1311}
1312
1313int ipwireless_send_packet(struct ipw_hardware *hw, unsigned int channel_idx,
1314 unsigned char *data, unsigned int length,
1315 void (*callback) (void *cb, unsigned int length),
1316 void *callback_data)
1317{
1318 struct ipw_tx_packet *packet;
1319
1320 packet = alloc_data_packet(length,
1321 (unsigned char) (channel_idx + 1),
1322 TL_PROTOCOLID_COM_DATA);
1323 if (!packet)
1324 return -ENOMEM;
1325 packet->packet_callback = callback;
1326 packet->callback_data = callback_data;
1327 memcpy((unsigned char *) packet +
1328 sizeof(struct ipw_tx_packet), data, length);
1329
1330 send_packet(hw, PRIO_DATA, packet);
1331 return 0;
1332}
1333
1334static int set_control_line(struct ipw_hardware *hw, int prio,
1335 unsigned int channel_idx, int line, int state)
1336{
1337 struct ipw_control_packet *packet;
1338 int protocolid = TL_PROTOCOLID_COM_CTRL;
1339
1340 if (prio == PRIO_SETUP)
1341 protocolid = TL_PROTOCOLID_SETUP;
1342
1343 packet = alloc_ctrl_packet(sizeof(struct ipw_control_packet),
1344 (unsigned char) (channel_idx + 1),
1345 protocolid, line);
1346 if (!packet)
1347 return -ENOMEM;
1348 packet->header.length = sizeof(struct ipw_control_packet_body);
1349 packet->body.value = (unsigned char) (state == 0 ? 0 : 1);
1350 send_packet(hw, prio, &packet->header);
1351 return 0;
1352}
1353
1354
1355static int set_DTR(struct ipw_hardware *hw, int priority,
1356 unsigned int channel_idx, int state)
1357{
1358 if (state != 0)
1359 hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_DTR;
1360 else
1361 hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_DTR;
1362
1363 return set_control_line(hw, priority, channel_idx, COMCTRL_DTR, state);
1364}
1365
1366static int set_RTS(struct ipw_hardware *hw, int priority,
1367 unsigned int channel_idx, int state)
1368{
1369 if (state != 0)
1370 hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_RTS;
1371 else
1372 hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_RTS;
1373
1374 return set_control_line(hw, priority, channel_idx, COMCTRL_RTS, state);
1375}
1376
1377int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx,
1378 int state)
1379{
1380 return set_DTR(hw, PRIO_CTRL, channel_idx, state);
1381}
1382
1383int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx,
1384 int state)
1385{
1386 return set_RTS(hw, PRIO_CTRL, channel_idx, state);
1387}
1388
1389struct ipw_setup_get_version_query_packet {
1390 struct ipw_tx_packet header;
1391 struct tl_setup_get_version_qry body;
1392};
1393
1394struct ipw_setup_config_packet {
1395 struct ipw_tx_packet header;
1396 struct tl_setup_config_msg body;
1397};
1398
1399struct ipw_setup_config_done_packet {
1400 struct ipw_tx_packet header;
1401 struct tl_setup_config_done_msg body;
1402};
1403
1404struct ipw_setup_open_packet {
1405 struct ipw_tx_packet header;
1406 struct tl_setup_open_msg body;
1407};
1408
1409struct ipw_setup_info_packet {
1410 struct ipw_tx_packet header;
1411 struct tl_setup_info_msg body;
1412};
1413
1414struct ipw_setup_reboot_msg_ack {
1415 struct ipw_tx_packet header;
1416 struct TlSetupRebootMsgAck body;
1417};
1418
1419/* This handles the actual initialization of the card */
1420static void __handle_setup_get_version_rsp(struct ipw_hardware *hw)
1421{
1422 struct ipw_setup_config_packet *config_packet;
1423 struct ipw_setup_config_done_packet *config_done_packet;
1424 struct ipw_setup_open_packet *open_packet;
1425 struct ipw_setup_info_packet *info_packet;
1426 int port;
1427 unsigned int channel_idx;
1428
1429 /* generate config packet */
1430 for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) {
1431 config_packet = alloc_ctrl_packet(
1432 sizeof(struct ipw_setup_config_packet),
1433 ADDR_SETUP_PROT,
1434 TL_PROTOCOLID_SETUP,
1435 TL_SETUP_SIGNO_CONFIG_MSG);
1436 if (!config_packet)
1437 goto exit_nomem;
1438 config_packet->header.length = sizeof(struct tl_setup_config_msg);
1439 config_packet->body.port_no = port;
1440 config_packet->body.prio_data = PRIO_DATA;
1441 config_packet->body.prio_ctrl = PRIO_CTRL;
1442 send_packet(hw, PRIO_SETUP, &config_packet->header);
1443 }
1444 config_done_packet = alloc_ctrl_packet(
1445 sizeof(struct ipw_setup_config_done_packet),
1446 ADDR_SETUP_PROT,
1447 TL_PROTOCOLID_SETUP,
1448 TL_SETUP_SIGNO_CONFIG_DONE_MSG);
1449 if (!config_done_packet)
1450 goto exit_nomem;
1451 config_done_packet->header.length = sizeof(struct tl_setup_config_done_msg);
1452 send_packet(hw, PRIO_SETUP, &config_done_packet->header);
1453
1454 /* generate open packet */
1455 for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) {
1456 open_packet = alloc_ctrl_packet(
1457 sizeof(struct ipw_setup_open_packet),
1458 ADDR_SETUP_PROT,
1459 TL_PROTOCOLID_SETUP,
1460 TL_SETUP_SIGNO_OPEN_MSG);
1461 if (!open_packet)
1462 goto exit_nomem;
1463 open_packet->header.length = sizeof(struct tl_setup_open_msg);
1464 open_packet->body.port_no = port;
1465 send_packet(hw, PRIO_SETUP, &open_packet->header);
1466 }
1467 for (channel_idx = 0;
1468 channel_idx < NL_NUM_OF_ADDRESSES; channel_idx++) {
1469 int ret;
1470
1471 ret = set_DTR(hw, PRIO_SETUP, channel_idx,
1472 (hw->control_lines[channel_idx] &
1473 IPW_CONTROL_LINE_DTR) != 0);
1474 if (ret) {
1475 printk(KERN_ERR IPWIRELESS_PCCARD_NAME
1476 ": error setting DTR (%d)\n", ret);
1477 return;
1478 }
1479
1480 set_RTS(hw, PRIO_SETUP, channel_idx,
1481 (hw->control_lines [channel_idx] &
1482 IPW_CONTROL_LINE_RTS) != 0);
1483 if (ret) {
1484 printk(KERN_ERR IPWIRELESS_PCCARD_NAME
1485 ": error setting RTS (%d)\n", ret);
1486 return;
1487 }
1488 }
1489 /*
1490 * For NDIS we assume that we are using sync PPP frames, for COM async.
1491 * This driver uses NDIS mode too. We don't bother with translation
1492 * from async -> sync PPP.
1493 */
1494 info_packet = alloc_ctrl_packet(sizeof(struct ipw_setup_info_packet),
1495 ADDR_SETUP_PROT,
1496 TL_PROTOCOLID_SETUP,
1497 TL_SETUP_SIGNO_INFO_MSG);
1498 if (!info_packet)
1499 goto exit_nomem;
1500 info_packet->header.length = sizeof(struct tl_setup_info_msg);
1501 info_packet->body.driver_type = NDISWAN_DRIVER;
1502 info_packet->body.major_version = NDISWAN_DRIVER_MAJOR_VERSION;
1503 info_packet->body.minor_version = NDISWAN_DRIVER_MINOR_VERSION;
1504 send_packet(hw, PRIO_SETUP, &info_packet->header);
1505
1506 /* Initialization is now complete, so we clear the 'to_setup' flag */
1507 hw->to_setup = 0;
1508
1509 return;
1510
1511exit_nomem:
1512 printk(KERN_ERR IPWIRELESS_PCCARD_NAME
1513 ": not enough memory to alloc control packet\n");
1514 hw->to_setup = -1;
1515}
1516
1517static void handle_setup_get_version_rsp(struct ipw_hardware *hw,
1518 unsigned char vers_no)
1519{
1520 del_timer(&hw->setup_timer);
1521 hw->initializing = 0;
1522 printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n");
1523
1524 if (vers_no == TL_SETUP_VERSION)
1525 __handle_setup_get_version_rsp(hw);
1526 else
1527 printk(KERN_ERR
1528 IPWIRELESS_PCCARD_NAME
1529 ": invalid hardware version no %u\n",
1530 (unsigned int) vers_no);
1531}
1532
1533static void ipw_send_setup_packet(struct ipw_hardware *hw)
1534{
1535 struct ipw_setup_get_version_query_packet *ver_packet;
1536
1537 ver_packet = alloc_ctrl_packet(
1538 sizeof(struct ipw_setup_get_version_query_packet),
1539 ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
1540 TL_SETUP_SIGNO_GET_VERSION_QRY);
1541 ver_packet->header.length = sizeof(struct tl_setup_get_version_qry);
1542
1543 /*
1544 * Response is handled in handle_received_SETUP_packet
1545 */
1546 send_packet(hw, PRIO_SETUP, &ver_packet->header);
1547}
1548
1549static void handle_received_SETUP_packet(struct ipw_hardware *hw,
1550 unsigned int address,
1551 unsigned char *data, int len,
1552 int is_last)
1553{
1554 union ipw_setup_rx_msg *rx_msg = (union ipw_setup_rx_msg *) data;
1555
1556 if (address != ADDR_SETUP_PROT) {
1557 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
1558 ": setup packet has bad address %d\n", address);
1559 return;
1560 }
1561
1562 switch (rx_msg->sig_no) {
1563 case TL_SETUP_SIGNO_GET_VERSION_RSP:
1564 if (hw->to_setup)
1565 handle_setup_get_version_rsp(hw,
1566 rx_msg->version_rsp_msg.version);
1567 break;
1568
1569 case TL_SETUP_SIGNO_OPEN_MSG:
1570 if (ipwireless_debug) {
1571 unsigned int channel_idx = rx_msg->open_msg.port_no - 1;
1572
1573 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
1574 ": OPEN_MSG [channel %u] reply received\n",
1575 channel_idx);
1576 }
1577 break;
1578
1579 case TL_SETUP_SIGNO_INFO_MSG_ACK:
1580 if (ipwireless_debug)
1581 printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
1582 ": card successfully configured as NDISWAN\n");
1583 break;
1584
1585 case TL_SETUP_SIGNO_REBOOT_MSG:
1586 if (hw->to_setup)
1587 printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
1588 ": Setup not completed - ignoring reboot msg\n");
1589 else {
1590 struct ipw_setup_reboot_msg_ack *packet;
1591
1592 printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
1593 ": Acknowledging REBOOT message\n");
1594 packet = alloc_ctrl_packet(
1595 sizeof(struct ipw_setup_reboot_msg_ack),
1596 ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
1597 TL_SETUP_SIGNO_REBOOT_MSG_ACK);
1598 packet->header.length =
1599 sizeof(struct TlSetupRebootMsgAck);
1600 send_packet(hw, PRIO_SETUP, &packet->header);
1601 if (hw->reboot_callback)
1602 hw->reboot_callback(hw->reboot_callback_data);
1603 }
1604 break;
1605
1606 default:
1607 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
1608 ": unknown setup message %u received\n",
1609 (unsigned int) rx_msg->sig_no);
1610 }
1611}
1612
1613static void do_close_hardware(struct ipw_hardware *hw)
1614{
1615 unsigned int irqn;
1616
1617 if (hw->hw_version == HW_VERSION_1) {
1618 /* Disable TX and RX interrupts. */
1619 outw(0, hw->base_port + IOIER);
1620
1621 /* Acknowledge any outstanding interrupt requests */
1622 irqn = inw(hw->base_port + IOIR);
1623 if (irqn & IR_TXINTR)
1624 outw(IR_TXINTR, hw->base_port + IOIR);
1625 if (irqn & IR_RXINTR)
1626 outw(IR_RXINTR, hw->base_port + IOIR);
1627
1628 synchronize_irq(hw->irq);
1629 }
1630}
1631
1632struct ipw_hardware *ipwireless_hardware_create(void)
1633{
1634 int i;
1635 struct ipw_hardware *hw =
1636 kzalloc(sizeof(struct ipw_hardware), GFP_KERNEL);
1637
1638 if (!hw)
1639 return NULL;
1640
1641 hw->irq = -1;
1642 hw->initializing = 1;
1643 hw->tx_ready = 1;
1644 hw->rx_bytes_queued = 0;
1645 hw->rx_pool_size = 0;
1646 hw->last_memtx_serial = (unsigned short) 0xffff;
1647 for (i = 0; i < NL_NUM_OF_PRIORITIES; i++)
1648 INIT_LIST_HEAD(&hw->tx_queue[i]);
1649
1650 INIT_LIST_HEAD(&hw->rx_queue);
1651 INIT_LIST_HEAD(&hw->rx_pool);
1652 spin_lock_init(&hw->spinlock);
1653 tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
1654 INIT_WORK(&hw->work_rx, ipw_receive_data_work);
1655 setup_timer(&hw->setup_timer, ipwireless_setup_timer,
1656 (unsigned long) hw);
1657
1658 return hw;
1659}
1660
1661void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
1662 unsigned int base_port,
1663 void __iomem *attr_memory,
1664 void __iomem *common_memory,
1665 int is_v2_card,
1666 void (*reboot_callback) (void *data),
1667 void *reboot_callback_data)
1668{
1669 if (hw->removed) {
1670 hw->removed = 0;
1671 enable_irq(hw->irq);
1672 }
1673 hw->base_port = base_port;
1674 hw->hw_version = is_v2_card ? HW_VERSION_2 : HW_VERSION_1;
1675 hw->ll_mtu = hw->hw_version == HW_VERSION_1 ? LL_MTU_V1 : LL_MTU_V2;
1676 hw->memregs_CCR = (struct MEMCCR __iomem *)
1677 ((unsigned short __iomem *) attr_memory + 0x200);
1678 hw->memory_info_regs = (struct MEMINFREG __iomem *) common_memory;
1679 hw->memreg_tx = &hw->memory_info_regs->memreg_tx_new;
1680 hw->reboot_callback = reboot_callback;
1681 hw->reboot_callback_data = reboot_callback_data;
1682}
1683
1684void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
1685{
1686 hw->initializing = 1;
1687 hw->init_loops = 0;
1688 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
1689 ": waiting for card to start up...\n");
1690 ipwireless_setup_timer((unsigned long) hw);
1691}
1692
1693static void ipwireless_setup_timer(unsigned long data)
1694{
1695 struct ipw_hardware *hw = (struct ipw_hardware *) data;
1696
1697 hw->init_loops++;
1698
1699 if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY &&
1700 hw->hw_version == HW_VERSION_2 &&
1701 hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
1702 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
1703 ": failed to startup using TX2, trying TX\n");
1704
1705 hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old;
1706 hw->init_loops = 0;
1707 }
1708 /* Give up after a certain number of retries */
1709 if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY) {
1710 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
1711 ": card failed to start up!\n");
1712 hw->initializing = 0;
1713 } else {
1714 /* Do not attempt to write to the board if it is not present. */
1715 if (is_card_present(hw)) {
1716 unsigned long flags;
1717
1718 spin_lock_irqsave(&hw->spinlock, flags);
1719 hw->to_setup = 1;
1720 hw->tx_ready = 1;
1721 spin_unlock_irqrestore(&hw->spinlock, flags);
1722 tasklet_schedule(&hw->tasklet);
1723 }
1724
1725 mod_timer(&hw->setup_timer,
1726 jiffies + msecs_to_jiffies(TL_SETUP_VERSION_QRY_TMO));
1727 }
1728}
1729
1730/*
1731 * Stop any interrupts from executing so that, once this function returns,
1732 * other layers of the driver can be sure they won't get any more callbacks.
1733 * Thus must be called on a proper process context.
1734 */
1735void ipwireless_stop_interrupts(struct ipw_hardware *hw)
1736{
1737 if (!hw->shutting_down) {
1738 /* Tell everyone we are going down. */
1739 hw->shutting_down = 1;
1740 del_timer(&hw->setup_timer);
1741
1742 /* Prevent the hardware from sending any more interrupts */
1743 do_close_hardware(hw);
1744 }
1745}
1746
1747void ipwireless_hardware_free(struct ipw_hardware *hw)
1748{
1749 int i;
1750 struct ipw_rx_packet *rp, *rq;
1751 struct ipw_tx_packet *tp, *tq;
1752
1753 ipwireless_stop_interrupts(hw);
1754
1755 flush_scheduled_work();
1756
1757 for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
1758 if (hw->packet_assembler[i] != NULL)
1759 kfree(hw->packet_assembler[i]);
1760
1761 for (i = 0; i < NL_NUM_OF_PRIORITIES; i++)
1762 list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) {
1763 list_del(&tp->queue);
1764 kfree(tp);
1765 }
1766
1767 list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) {
1768 list_del(&rp->queue);
1769 kfree(rp);
1770 }
1771
1772 list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
1773 list_del(&rp->queue);
1774 kfree(rp);
1775 }
1776 kfree(hw);
1777}
1778
1779/*
1780 * Associate the specified network with this hardware, so it will receive events
1781 * from it.
1782 */
1783void ipwireless_associate_network(struct ipw_hardware *hw,
1784 struct ipw_network *network)
1785{
1786 hw->network = network;
1787}
diff --git a/drivers/char/pcmcia/ipwireless/hardware.h b/drivers/char/pcmcia/ipwireless/hardware.h
new file mode 100644
index 000000000000..c83190ffb0e7
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/hardware.h
@@ -0,0 +1,64 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#ifndef _IPWIRELESS_CS_HARDWARE_H_
19#define _IPWIRELESS_CS_HARDWARE_H_
20
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/interrupt.h>
24
25#define IPW_CONTROL_LINE_CTS 0x0001
26#define IPW_CONTROL_LINE_DCD 0x0002
27#define IPW_CONTROL_LINE_DSR 0x0004
28#define IPW_CONTROL_LINE_RI 0x0008
29#define IPW_CONTROL_LINE_DTR 0x0010
30#define IPW_CONTROL_LINE_RTS 0x0020
31
32struct ipw_hardware;
33struct ipw_network;
34
35struct ipw_hardware *ipwireless_hardware_create(void);
36void ipwireless_hardware_free(struct ipw_hardware *hw);
37irqreturn_t ipwireless_interrupt(int irq, void *dev_id, struct pt_regs *regs);
38int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx,
39 int state);
40int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx,
41 int state);
42int ipwireless_send_packet(struct ipw_hardware *hw,
43 unsigned int channel_idx,
44 unsigned char *data,
45 unsigned int length,
46 void (*packet_sent_callback) (void *cb,
47 unsigned int length),
48 void *sent_cb_data);
49void ipwireless_associate_network(struct ipw_hardware *hw,
50 struct ipw_network *net);
51void ipwireless_stop_interrupts(struct ipw_hardware *hw);
52void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
53 unsigned int base_port,
54 void __iomem *attr_memory,
55 void __iomem *common_memory,
56 int is_v2_card,
57 void (*reboot_cb) (void *data),
58 void *reboot_cb_data);
59void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw);
60void ipwireless_sleep(unsigned int tenths);
61int ipwireless_dump_hardware_state(char *p, size_t limit,
62 struct ipw_hardware *hw);
63
64#endif
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
new file mode 100644
index 000000000000..00c7f8407e3e
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -0,0 +1,501 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#include "hardware.h"
19#include "network.h"
20#include "main.h"
21#include "tty.h"
22
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/io.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30
31#include <pcmcia/version.h>
32#include <pcmcia/cisreg.h>
33#include <pcmcia/device_id.h>
34#include <pcmcia/ss.h>
35#include <pcmcia/ds.h>
36#include <pcmcia/cs.h>
37
38static struct pcmcia_device_id ipw_ids[] = {
39 PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100),
40 PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200),
41 PCMCIA_DEVICE_NULL
42};
43MODULE_DEVICE_TABLE(pcmcia, ipw_ids);
44
45static void ipwireless_detach(struct pcmcia_device *link);
46
47/*
48 * Module params
49 */
50/* Debug mode: more verbose, print sent/recv bytes */
51int ipwireless_debug;
52int ipwireless_loopback;
53int ipwireless_out_queue = 1;
54
55module_param_named(debug, ipwireless_debug, int, 0);
56module_param_named(loopback, ipwireless_loopback, int, 0);
57module_param_named(out_queue, ipwireless_out_queue, int, 0);
58MODULE_PARM_DESC(debug, "switch on debug messages [0]");
59MODULE_PARM_DESC(loopback,
60 "debug: enable ras_raw channel [0]");
61MODULE_PARM_DESC(out_queue, "debug: set size of outgoing queue [1]");
62
63/* Executes in process context. */
64static void signalled_reboot_work(struct work_struct *work_reboot)
65{
66 struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev,
67 work_reboot);
68 struct pcmcia_device *link = ipw->link;
69 int ret = pccard_reset_card(link->socket);
70
71 if (ret != CS_SUCCESS)
72 cs_error(link, ResetCard, ret);
73}
74
75static void signalled_reboot_callback(void *callback_data)
76{
77 struct ipw_dev *ipw = (struct ipw_dev *) callback_data;
78
79 /* Delegate to process context. */
80 schedule_work(&ipw->work_reboot);
81}
82
83static int config_ipwireless(struct ipw_dev *ipw)
84{
85 struct pcmcia_device *link = ipw->link;
86 int ret;
87 config_info_t conf;
88 tuple_t tuple;
89 unsigned short buf[64];
90 cisparse_t parse;
91 unsigned short cor_value;
92 win_req_t request_attr_memory;
93 win_req_t request_common_memory;
94 memreq_t memreq_attr_memory;
95 memreq_t memreq_common_memory;
96
97 ipw->is_v2_card = 0;
98
99 tuple.Attributes = 0;
100 tuple.TupleData = (cisdata_t *) buf;
101 tuple.TupleDataMax = sizeof(buf);
102 tuple.TupleOffset = 0;
103
104 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
105
106 ret = pcmcia_get_first_tuple(link, &tuple);
107
108 while (ret == 0) {
109 ret = pcmcia_get_tuple_data(link, &tuple);
110
111 if (ret != CS_SUCCESS) {
112 cs_error(link, GetTupleData, ret);
113 goto exit0;
114 }
115 ret = pcmcia_get_next_tuple(link, &tuple);
116 }
117
118 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
119
120 ret = pcmcia_get_first_tuple(link, &tuple);
121
122 if (ret != CS_SUCCESS) {
123 cs_error(link, GetFirstTuple, ret);
124 goto exit0;
125 }
126
127 ret = pcmcia_get_tuple_data(link, &tuple);
128
129 if (ret != CS_SUCCESS) {
130 cs_error(link, GetTupleData, ret);
131 goto exit0;
132 }
133
134 ret = pcmcia_parse_tuple(link, &tuple, &parse);
135
136 if (ret != CS_SUCCESS) {
137 cs_error(link, ParseTuple, ret);
138 goto exit0;
139 }
140
141 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
142 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
143 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
144 link->io.IOAddrLines = 16;
145
146 link->irq.IRQInfo1 = parse.cftable_entry.irq.IRQInfo1;
147
148 /* 0x40 causes it to generate level mode interrupts. */
149 /* 0x04 enables IREQ pin. */
150 cor_value = parse.cftable_entry.index | 0x44;
151 link->conf.ConfigIndex = cor_value;
152
153 /* IRQ and I/O settings */
154 tuple.DesiredTuple = CISTPL_CONFIG;
155
156 ret = pcmcia_get_first_tuple(link, &tuple);
157
158 if (ret != CS_SUCCESS) {
159 cs_error(link, GetFirstTuple, ret);
160 goto exit0;
161 }
162
163 ret = pcmcia_get_tuple_data(link, &tuple);
164
165 if (ret != CS_SUCCESS) {
166 cs_error(link, GetTupleData, ret);
167 goto exit0;
168 }
169
170 ret = pcmcia_parse_tuple(link, &tuple, &parse);
171
172 if (ret != CS_SUCCESS) {
173 cs_error(link, GetTupleData, ret);
174 goto exit0;
175 }
176 link->conf.Attributes = CONF_ENABLE_IRQ;
177 link->conf.ConfigBase = parse.config.base;
178 link->conf.Present = parse.config.rmask[0];
179 link->conf.IntType = INT_MEMORY_AND_IO;
180
181 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
182 link->irq.Handler = ipwireless_interrupt;
183 link->irq.Instance = ipw->hardware;
184
185 ret = pcmcia_request_io(link, &link->io);
186
187 if (ret != CS_SUCCESS) {
188 cs_error(link, RequestIO, ret);
189 goto exit0;
190 }
191
192 /* memory settings */
193
194 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
195
196 ret = pcmcia_get_first_tuple(link, &tuple);
197
198 if (ret != CS_SUCCESS) {
199 cs_error(link, GetFirstTuple, ret);
200 goto exit1;
201 }
202
203 ret = pcmcia_get_tuple_data(link, &tuple);
204
205 if (ret != CS_SUCCESS) {
206 cs_error(link, GetTupleData, ret);
207 goto exit1;
208 }
209
210 ret = pcmcia_parse_tuple(link, &tuple, &parse);
211
212 if (ret != CS_SUCCESS) {
213 cs_error(link, ParseTuple, ret);
214 goto exit1;
215 }
216
217 if (parse.cftable_entry.mem.nwin > 0) {
218 request_common_memory.Attributes =
219 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
220 request_common_memory.Base =
221 parse.cftable_entry.mem.win[0].host_addr;
222 request_common_memory.Size = parse.cftable_entry.mem.win[0].len;
223 if (request_common_memory.Size < 0x1000)
224 request_common_memory.Size = 0x1000;
225 request_common_memory.AccessSpeed = 0;
226
227 ret = pcmcia_request_window(&link, &request_common_memory,
228 &ipw->handle_common_memory);
229
230 if (ret != CS_SUCCESS) {
231 cs_error(link, RequestWindow, ret);
232 goto exit1;
233 }
234
235 memreq_common_memory.CardOffset =
236 parse.cftable_entry.mem.win[0].card_addr;
237 memreq_common_memory.Page = 0;
238
239 ret = pcmcia_map_mem_page(ipw->handle_common_memory,
240 &memreq_common_memory);
241
242 if (ret != CS_SUCCESS) {
243 cs_error(link, MapMemPage, ret);
244 goto exit1;
245 }
246
247 ipw->is_v2_card =
248 parse.cftable_entry.mem.win[0].len == 0x100;
249
250 ipw->common_memory = ioremap(request_common_memory.Base,
251 request_common_memory.Size);
252
253 request_attr_memory.Attributes =
254 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE;
255 request_attr_memory.Base = 0;
256 request_attr_memory.Size = 0; /* this used to be 0x1000 */
257 request_attr_memory.AccessSpeed = 0;
258
259 ret = pcmcia_request_window(&link, &request_attr_memory,
260 &ipw->handle_attr_memory);
261
262 if (ret != CS_SUCCESS) {
263 cs_error(link, RequestWindow, ret);
264 goto exit2;
265 }
266
267 memreq_attr_memory.CardOffset = 0;
268 memreq_attr_memory.Page = 0;
269
270 ret = pcmcia_map_mem_page(ipw->handle_attr_memory,
271 &memreq_attr_memory);
272
273 if (ret != CS_SUCCESS) {
274 cs_error(link, MapMemPage, ret);
275 goto exit2;
276 }
277
278 ipw->attr_memory = ioremap(request_attr_memory.Base,
279 request_attr_memory.Size);
280 }
281
282 INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
283
284 ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1,
285 ipw->attr_memory, ipw->common_memory,
286 ipw->is_v2_card, signalled_reboot_callback,
287 ipw);
288
289 ret = pcmcia_request_irq(link, &link->irq);
290
291 if (ret != CS_SUCCESS) {
292 cs_error(link, RequestIRQ, ret);
293 goto exit3;
294 }
295
296 /* Look up current Vcc */
297
298 ret = pcmcia_get_configuration_info(link, &conf);
299
300 if (ret != CS_SUCCESS) {
301 cs_error(link, GetConfigurationInfo, ret);
302 goto exit4;
303 }
304
305 printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n",
306 ipw->is_v2_card ? "V2/V3" : "V1");
307 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
308 ": I/O ports 0x%04x-0x%04x, irq %d\n",
309 (unsigned int) link->io.BasePort1,
310 (unsigned int) (link->io.BasePort1 +
311 link->io.NumPorts1 - 1),
312 (unsigned int) link->irq.AssignedIRQ);
313 if (ipw->attr_memory && ipw->common_memory)
314 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
315 ": attr memory 0x%08lx-0x%08lx, "
316 "common memory 0x%08lx-0x%08lx\n",
317 request_attr_memory.Base,
318 request_attr_memory.Base
319 + request_attr_memory.Size - 1,
320 request_common_memory.Base,
321 request_common_memory.Base
322 + request_common_memory.Size - 1);
323
324 ipw->network = ipwireless_network_create(ipw->hardware);
325 if (!ipw->network)
326 goto exit3;
327
328 ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network,
329 ipw->nodes);
330 if (!ipw->tty)
331 goto exit3;
332
333 ipwireless_init_hardware_v2_v3(ipw->hardware);
334
335 /*
336 * Do the RequestConfiguration last, because it enables interrupts.
337 * Then we don't get any interrupts before we're ready for them.
338 */
339 ret = pcmcia_request_configuration(link, &link->conf);
340
341 if (ret != CS_SUCCESS) {
342 cs_error(link, RequestConfiguration, ret);
343 goto exit4;
344 }
345
346 link->dev_node = &ipw->nodes[0];
347
348 return 0;
349
350exit4:
351 pcmcia_disable_device(link);
352exit3:
353 if (ipw->attr_memory) {
354 iounmap(ipw->attr_memory);
355 pcmcia_release_window(ipw->handle_attr_memory);
356 pcmcia_disable_device(link);
357 }
358exit2:
359 if (ipw->common_memory) {
360 iounmap(ipw->common_memory);
361 pcmcia_release_window(ipw->handle_common_memory);
362 }
363exit1:
364 pcmcia_disable_device(link);
365exit0:
366 return -1;
367}
368
369static void release_ipwireless(struct ipw_dev *ipw)
370{
371 struct pcmcia_device *link = ipw->link;
372
373 pcmcia_disable_device(link);
374
375 if (ipw->common_memory)
376 iounmap(ipw->common_memory);
377 if (ipw->attr_memory)
378 iounmap(ipw->attr_memory);
379 if (ipw->common_memory)
380 pcmcia_release_window(ipw->handle_common_memory);
381 if (ipw->attr_memory)
382 pcmcia_release_window(ipw->handle_attr_memory);
383 pcmcia_disable_device(link);
384}
385
386/*
387 * ipwireless_attach() creates an "instance" of the driver, allocating
388 * local data structures for one device (one interface). The device
389 * is registered with Card Services.
390 *
391 * The pcmcia_device structure is initialized, but we don't actually
392 * configure the card at this point -- we wait until we receive a
393 * card insertion event.
394 */
395static int ipwireless_attach(struct pcmcia_device *link)
396{
397 struct ipw_dev *ipw;
398 int ret;
399
400 ipw = kzalloc(sizeof(struct ipw_dev), GFP_KERNEL);
401 if (!ipw)
402 return -ENOMEM;
403
404 ipw->link = link;
405 link->priv = ipw;
406 link->irq.Instance = ipw;
407
408 /* Link this device into our device list. */
409 link->dev_node = &ipw->nodes[0];
410
411 ipw->hardware = ipwireless_hardware_create();
412 if (!ipw->hardware) {
413 kfree(ipw);
414 return -ENOMEM;
415 }
416 /* RegisterClient will call config_ipwireless */
417
418 ret = config_ipwireless(ipw);
419
420 if (ret != 0) {
421 cs_error(link, RegisterClient, ret);
422 ipwireless_detach(link);
423 return ret;
424 }
425
426 return 0;
427}
428
429/*
430 * This deletes a driver "instance". The device is de-registered with
431 * Card Services. If it has been released, all local data structures
432 * are freed. Otherwise, the structures will be freed when the device
433 * is released.
434 */
435static void ipwireless_detach(struct pcmcia_device *link)
436{
437 struct ipw_dev *ipw = link->priv;
438
439 release_ipwireless(ipw);
440
441 /* Break the link with Card Services */
442 if (link)
443 pcmcia_disable_device(link);
444
445 if (ipw->tty != NULL)
446 ipwireless_tty_free(ipw->tty);
447 if (ipw->network != NULL)
448 ipwireless_network_free(ipw->network);
449 if (ipw->hardware != NULL)
450 ipwireless_hardware_free(ipw->hardware);
451 kfree(ipw);
452}
453
454static struct pcmcia_driver me = {
455 .owner = THIS_MODULE,
456 .probe = ipwireless_attach,
457 .remove = ipwireless_detach,
458 .drv = { .name = IPWIRELESS_PCCARD_NAME },
459 .id_table = ipw_ids
460};
461
462/*
463 * Module insertion : initialisation of the module.
464 * Register the card with cardmgr...
465 */
466static int __init init_ipwireless(void)
467{
468 int ret;
469
470 printk(KERN_INFO IPWIRELESS_PCCARD_NAME " "
471 IPWIRELESS_PCMCIA_VERSION " by " IPWIRELESS_PCMCIA_AUTHOR "\n");
472
473 ret = ipwireless_tty_init();
474 if (ret != 0)
475 return ret;
476
477 ret = pcmcia_register_driver(&me);
478 if (ret != 0)
479 ipwireless_tty_release();
480
481 return ret;
482}
483
484/*
485 * Module removal
486 */
487static void __exit exit_ipwireless(void)
488{
489 printk(KERN_INFO IPWIRELESS_PCCARD_NAME " "
490 IPWIRELESS_PCMCIA_VERSION " removed\n");
491
492 pcmcia_unregister_driver(&me);
493 ipwireless_tty_release();
494}
495
496module_init(init_ipwireless);
497module_exit(exit_ipwireless);
498
499MODULE_AUTHOR(IPWIRELESS_PCMCIA_AUTHOR);
500MODULE_DESCRIPTION(IPWIRELESS_PCCARD_NAME " " IPWIRELESS_PCMCIA_VERSION);
501MODULE_LICENSE("GPL");
diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h
new file mode 100644
index 000000000000..1bfdcc8d47d6
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/main.h
@@ -0,0 +1,70 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#ifndef _IPWIRELESS_CS_H_
19#define _IPWIRELESS_CS_H_
20
21#include <linux/sched.h>
22#include <linux/types.h>
23
24#include <pcmcia/cs_types.h>
25#include <pcmcia/cs.h>
26#include <pcmcia/cistpl.h>
27#include <pcmcia/ds.h>
28
29#include "hardware.h"
30
31#define IPWIRELESS_PCCARD_NAME "ipwireless"
32#define IPWIRELESS_PCMCIA_VERSION "1.1"
33#define IPWIRELESS_PCMCIA_AUTHOR \
34 "Stephen Blackheath, Ben Martel, Jiri Kosina and David Sterba"
35
36#define IPWIRELESS_TX_QUEUE_SIZE 262144
37#define IPWIRELESS_RX_QUEUE_SIZE 262144
38
39#define IPWIRELESS_STATE_DEBUG
40
41struct ipw_hardware;
42struct ipw_network;
43struct ipw_tty;
44
45struct ipw_dev {
46 struct pcmcia_device *link;
47 int is_v2_card;
48 window_handle_t handle_attr_memory;
49 void __iomem *attr_memory;
50 window_handle_t handle_common_memory;
51 void __iomem *common_memory;
52 dev_node_t nodes[2];
53 /* Reference to attribute memory, containing CIS data */
54 void *attribute_memory;
55
56 /* Hardware context */
57 struct ipw_hardware *hardware;
58 /* Network layer context */
59 struct ipw_network *network;
60 /* TTY device context */
61 struct ipw_tty *tty;
62 struct work_struct work_reboot;
63};
64
65/* Module parametres */
66extern int ipwireless_debug;
67extern int ipwireless_loopback;
68extern int ipwireless_out_queue;
69
70#endif
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c
new file mode 100644
index 000000000000..ff35230058d3
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/network.c
@@ -0,0 +1,512 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#include <linux/interrupt.h>
19#include <linux/kernel.h>
20#include <linux/mutex.h>
21#include <linux/netdevice.h>
22#include <linux/ppp_channel.h>
23#include <linux/ppp_defs.h>
24#include <linux/if_ppp.h>
25#include <linux/skbuff.h>
26
27#include "network.h"
28#include "hardware.h"
29#include "main.h"
30#include "tty.h"
31
32#define MAX_OUTGOING_PACKETS_QUEUED ipwireless_out_queue
33#define MAX_ASSOCIATED_TTYS 2
34
35#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
36
37struct ipw_network {
38 /* Hardware context, used for calls to hardware layer. */
39 struct ipw_hardware *hardware;
40 /* Context for kernel 'generic_ppp' functionality */
41 struct ppp_channel *ppp_channel;
42 /* tty context connected with IPW console */
43 struct ipw_tty *associated_ttys[NO_OF_IPW_CHANNELS][MAX_ASSOCIATED_TTYS];
44 /* True if ppp needs waking up once we're ready to xmit */
45 int ppp_blocked;
46 /* Number of packets queued up in hardware module. */
47 int outgoing_packets_queued;
48 /* Spinlock to avoid interrupts during shutdown */
49 spinlock_t spinlock;
50 struct mutex close_lock;
51
52 /* PPP ioctl data, not actually used anywere */
53 unsigned int flags;
54 unsigned int rbits;
55 u32 xaccm[8];
56 u32 raccm;
57 int mru;
58
59 int shutting_down;
60 unsigned int ras_control_lines;
61
62 struct work_struct work_go_online;
63 struct work_struct work_go_offline;
64};
65
66
67#ifdef IPWIRELESS_STATE_DEBUG
68int ipwireless_dump_network_state(char *p, size_t limit,
69 struct ipw_network *network)
70{
71 return snprintf(p, limit,
72 "debug: ppp_blocked=%d\n"
73 "debug: outgoing_packets_queued=%d\n"
74 "debug: network.shutting_down=%d\n",
75 network->ppp_blocked,
76 network->outgoing_packets_queued,
77 network->shutting_down);
78}
79#endif
80
81static void notify_packet_sent(void *callback_data, unsigned int packet_length)
82{
83 struct ipw_network *network = callback_data;
84 unsigned long flags;
85
86 spin_lock_irqsave(&network->spinlock, flags);
87 network->outgoing_packets_queued--;
88 if (network->ppp_channel != NULL) {
89 if (network->ppp_blocked) {
90 network->ppp_blocked = 0;
91 spin_unlock_irqrestore(&network->spinlock, flags);
92 ppp_output_wakeup(network->ppp_channel);
93 if (ipwireless_debug)
94 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
95 ": ppp unblocked\n");
96 } else
97 spin_unlock_irqrestore(&network->spinlock, flags);
98 } else
99 spin_unlock_irqrestore(&network->spinlock, flags);
100}
101
102/*
103 * Called by the ppp system when it has a packet to send to the hardware.
104 */
105static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
106 struct sk_buff *skb)
107{
108 struct ipw_network *network = ppp_channel->private;
109 unsigned long flags;
110
111 spin_lock_irqsave(&network->spinlock, flags);
112 if (network->outgoing_packets_queued < MAX_OUTGOING_PACKETS_QUEUED) {
113 unsigned char *buf;
114 static unsigned char header[] = {
115 PPP_ALLSTATIONS, /* 0xff */
116 PPP_UI, /* 0x03 */
117 };
118 int ret;
119
120 network->outgoing_packets_queued++;
121 spin_unlock_irqrestore(&network->spinlock, flags);
122
123 /*
124 * If we have the requested amount of headroom in the skb we
125 * were handed, then we can add the header efficiently.
126 */
127 if (skb_headroom(skb) >= 2) {
128 memcpy(skb_push(skb, 2), header, 2);
129 ret = ipwireless_send_packet(network->hardware,
130 IPW_CHANNEL_RAS, skb->data,
131 skb->len,
132 notify_packet_sent,
133 network);
134 if (ret == -1) {
135 skb_pull(skb, 2);
136 return 0;
137 }
138 } else {
139 /* Otherwise (rarely) we do it inefficiently. */
140 buf = kmalloc(skb->len + 2, GFP_ATOMIC);
141 if (!buf)
142 return 0;
143 memcpy(buf + 2, skb->data, skb->len);
144 memcpy(buf, header, 2);
145 ret = ipwireless_send_packet(network->hardware,
146 IPW_CHANNEL_RAS, buf,
147 skb->len + 2,
148 notify_packet_sent,
149 network);
150 kfree(buf);
151 if (ret == -1)
152 return 0;
153 }
154 kfree_skb(skb);
155 return 1;
156 } else {
157 /*
158 * Otherwise reject the packet, and flag that the ppp system
159 * needs to be unblocked once we are ready to send.
160 */
161 network->ppp_blocked = 1;
162 spin_unlock_irqrestore(&network->spinlock, flags);
163 return 0;
164 }
165}
166
167/* Handle an ioctl call that has come in via ppp. (copy of ppp_async_ioctl() */
168static int ipwireless_ppp_ioctl(struct ppp_channel *ppp_channel,
169 unsigned int cmd, unsigned long arg)
170{
171 struct ipw_network *network = ppp_channel->private;
172 int err, val;
173 u32 accm[8];
174 int __user *user_arg = (int __user *) arg;
175
176 err = -EFAULT;
177 switch (cmd) {
178 case PPPIOCGFLAGS:
179 val = network->flags | network->rbits;
180 if (put_user(val, user_arg))
181 break;
182 err = 0;
183 break;
184
185 case PPPIOCSFLAGS:
186 if (get_user(val, user_arg))
187 break;
188 network->flags = val & ~SC_RCV_BITS;
189 network->rbits = val & SC_RCV_BITS;
190 err = 0;
191 break;
192
193 case PPPIOCGASYNCMAP:
194 if (put_user(network->xaccm[0], user_arg))
195 break;
196 err = 0;
197 break;
198
199 case PPPIOCSASYNCMAP:
200 if (get_user(network->xaccm[0], user_arg))
201 break;
202 err = 0;
203 break;
204
205 case PPPIOCGRASYNCMAP:
206 if (put_user(network->raccm, user_arg))
207 break;
208 err = 0;
209 break;
210
211 case PPPIOCSRASYNCMAP:
212 if (get_user(network->raccm, user_arg))
213 break;
214 err = 0;
215 break;
216
217 case PPPIOCGXASYNCMAP:
218 if (copy_to_user((void __user *) arg, network->xaccm,
219 sizeof(network->xaccm)))
220 break;
221 err = 0;
222 break;
223
224 case PPPIOCSXASYNCMAP:
225 if (copy_from_user(accm, (void __user *) arg, sizeof(accm)))
226 break;
227 accm[2] &= ~0x40000000U; /* can't escape 0x5e */
228 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
229 memcpy(network->xaccm, accm, sizeof(network->xaccm));
230 err = 0;
231 break;
232
233 case PPPIOCGMRU:
234 if (put_user(network->mru, user_arg))
235 break;
236 err = 0;
237 break;
238
239 case PPPIOCSMRU:
240 if (get_user(val, user_arg))
241 break;
242 if (val < PPP_MRU)
243 val = PPP_MRU;
244 network->mru = val;
245 err = 0;
246 break;
247
248 default:
249 err = -ENOTTY;
250 }
251
252 return err;
253}
254
255static struct ppp_channel_ops ipwireless_ppp_channel_ops = {
256 .start_xmit = ipwireless_ppp_start_xmit,
257 .ioctl = ipwireless_ppp_ioctl
258};
259
260static void do_go_online(struct work_struct *work_go_online)
261{
262 struct ipw_network *network =
263 container_of(work_go_online, struct ipw_network,
264 work_go_online);
265 unsigned long flags;
266
267 spin_lock_irqsave(&network->spinlock, flags);
268 if (!network->ppp_channel) {
269 struct ppp_channel *channel;
270
271 spin_unlock_irqrestore(&network->spinlock, flags);
272 channel = kzalloc(sizeof(struct ppp_channel), GFP_KERNEL);
273 if (!channel) {
274 printk(KERN_ERR IPWIRELESS_PCCARD_NAME
275 ": unable to allocate PPP channel\n");
276 return;
277 }
278 channel->private = network;
279 channel->mtu = 16384; /* Wild guess */
280 channel->hdrlen = 2;
281 channel->ops = &ipwireless_ppp_channel_ops;
282
283 network->flags = 0;
284 network->rbits = 0;
285 network->mru = PPP_MRU;
286 memset(network->xaccm, 0, sizeof(network->xaccm));
287 network->xaccm[0] = ~0U;
288 network->xaccm[3] = 0x60000000U;
289 network->raccm = ~0U;
290 ppp_register_channel(channel);
291 spin_lock_irqsave(&network->spinlock, flags);
292 network->ppp_channel = channel;
293 }
294 spin_unlock_irqrestore(&network->spinlock, flags);
295}
296
297static void do_go_offline(struct work_struct *work_go_offline)
298{
299 struct ipw_network *network =
300 container_of(work_go_offline, struct ipw_network,
301 work_go_offline);
302 unsigned long flags;
303
304 mutex_lock(&network->close_lock);
305 spin_lock_irqsave(&network->spinlock, flags);
306 if (network->ppp_channel != NULL) {
307 struct ppp_channel *channel = network->ppp_channel;
308
309 network->ppp_channel = NULL;
310 spin_unlock_irqrestore(&network->spinlock, flags);
311 mutex_unlock(&network->close_lock);
312 ppp_unregister_channel(channel);
313 } else {
314 spin_unlock_irqrestore(&network->spinlock, flags);
315 mutex_unlock(&network->close_lock);
316 }
317}
318
319void ipwireless_network_notify_control_line_change(struct ipw_network *network,
320 unsigned int channel_idx,
321 unsigned int control_lines,
322 unsigned int changed_mask)
323{
324 int i;
325
326 if (channel_idx == IPW_CHANNEL_RAS)
327 network->ras_control_lines = control_lines;
328
329 for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
330 struct ipw_tty *tty =
331 network->associated_ttys[channel_idx][i];
332
333 /*
334 * If it's associated with a tty (other than the RAS channel
335 * when we're online), then send the data to that tty. The RAS
336 * channel's data is handled above - it always goes through
337 * ppp_generic.
338 */
339 if (tty)
340 ipwireless_tty_notify_control_line_change(tty,
341 channel_idx,
342 control_lines,
343 changed_mask);
344 }
345}
346
347/*
348 * Some versions of firmware stuff packets with 0xff 0x03 (PPP: ALLSTATIONS, UI)
349 * bytes, which are required on sent packet, but not always present on received
350 * packets
351 */
352static struct sk_buff *ipw_packet_received_skb(unsigned char *data,
353 unsigned int length)
354{
355 struct sk_buff *skb;
356
357 if (length > 2 && data[0] == PPP_ALLSTATIONS && data[1] == PPP_UI) {
358 length -= 2;
359 data += 2;
360 }
361
362 skb = dev_alloc_skb(length + 4);
363 skb_reserve(skb, 2);
364 memcpy(skb_put(skb, length), data, length);
365
366 return skb;
367}
368
369void ipwireless_network_packet_received(struct ipw_network *network,
370 unsigned int channel_idx,
371 unsigned char *data,
372 unsigned int length)
373{
374 int i;
375 unsigned long flags;
376
377 for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
378 struct ipw_tty *tty = network->associated_ttys[channel_idx][i];
379
380 /*
381 * If it's associated with a tty (other than the RAS channel
382 * when we're online), then send the data to that tty. The RAS
383 * channel's data is handled above - it always goes through
384 * ppp_generic.
385 */
386 if (tty && channel_idx == IPW_CHANNEL_RAS
387 && (network->ras_control_lines &
388 IPW_CONTROL_LINE_DCD) != 0
389 && ipwireless_tty_is_modem(tty)) {
390 /*
391 * If data came in on the RAS channel and this tty is
392 * the modem tty, and we are online, then we send it to
393 * the PPP layer.
394 */
395 mutex_lock(&network->close_lock);
396 spin_lock_irqsave(&network->spinlock, flags);
397 if (network->ppp_channel != NULL) {
398 struct sk_buff *skb;
399
400 spin_unlock_irqrestore(&network->spinlock,
401 flags);
402
403 /* Send the data to the ppp_generic module. */
404 skb = ipw_packet_received_skb(data, length);
405 ppp_input(network->ppp_channel, skb);
406 } else
407 spin_unlock_irqrestore(&network->spinlock,
408 flags);
409 mutex_unlock(&network->close_lock);
410 }
411 /* Otherwise we send it out the tty. */
412 else
413 ipwireless_tty_received(tty, data, length);
414 }
415}
416
417struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw)
418{
419 struct ipw_network *network =
420 kzalloc(sizeof(struct ipw_network), GFP_ATOMIC);
421
422 if (!network)
423 return NULL;
424
425 spin_lock_init(&network->spinlock);
426 mutex_init(&network->close_lock);
427
428 network->hardware = hw;
429
430 INIT_WORK(&network->work_go_online, do_go_online);
431 INIT_WORK(&network->work_go_offline, do_go_offline);
432
433 ipwireless_associate_network(hw, network);
434
435 return network;
436}
437
438void ipwireless_network_free(struct ipw_network *network)
439{
440 network->shutting_down = 1;
441
442 ipwireless_ppp_close(network);
443 flush_scheduled_work();
444
445 ipwireless_stop_interrupts(network->hardware);
446 ipwireless_associate_network(network->hardware, NULL);
447
448 kfree(network);
449}
450
451void ipwireless_associate_network_tty(struct ipw_network *network,
452 unsigned int channel_idx,
453 struct ipw_tty *tty)
454{
455 int i;
456
457 for (i = 0; i < MAX_ASSOCIATED_TTYS; i++)
458 if (network->associated_ttys[channel_idx][i] == NULL) {
459 network->associated_ttys[channel_idx][i] = tty;
460 break;
461 }
462}
463
464void ipwireless_disassociate_network_ttys(struct ipw_network *network,
465 unsigned int channel_idx)
466{
467 int i;
468
469 for (i = 0; i < MAX_ASSOCIATED_TTYS; i++)
470 network->associated_ttys[channel_idx][i] = NULL;
471}
472
473void ipwireless_ppp_open(struct ipw_network *network)
474{
475 if (ipwireless_debug)
476 printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": online\n");
477 schedule_work(&network->work_go_online);
478}
479
480void ipwireless_ppp_close(struct ipw_network *network)
481{
482 /* Disconnect from the wireless network. */
483 if (ipwireless_debug)
484 printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": offline\n");
485 schedule_work(&network->work_go_offline);
486}
487
488int ipwireless_ppp_channel_index(struct ipw_network *network)
489{
490 int ret = -1;
491 unsigned long flags;
492
493 spin_lock_irqsave(&network->spinlock, flags);
494 if (network->ppp_channel != NULL)
495 ret = ppp_channel_index(network->ppp_channel);
496 spin_unlock_irqrestore(&network->spinlock, flags);
497
498 return ret;
499}
500
501int ipwireless_ppp_unit_number(struct ipw_network *network)
502{
503 int ret = -1;
504 unsigned long flags;
505
506 spin_lock_irqsave(&network->spinlock, flags);
507 if (network->ppp_channel != NULL)
508 ret = ppp_unit_number(network->ppp_channel);
509 spin_unlock_irqrestore(&network->spinlock, flags);
510
511 return ret;
512}
diff --git a/drivers/char/pcmcia/ipwireless/network.h b/drivers/char/pcmcia/ipwireless/network.h
new file mode 100644
index 000000000000..b0e1e952fd14
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/network.h
@@ -0,0 +1,55 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#ifndef _IPWIRELESS_CS_NETWORK_H_
19#define _IPWIRELESS_CS_NETWORK_H_
20
21#include <linux/types.h>
22
23struct ipw_network;
24struct ipw_tty;
25struct ipw_hardware;
26
27/* Definitions of the different channels on the PCMCIA UE */
28#define IPW_CHANNEL_RAS 0
29#define IPW_CHANNEL_DIALLER 1
30#define IPW_CHANNEL_CONSOLE 2
31#define NO_OF_IPW_CHANNELS 5
32
33void ipwireless_network_notify_control_line_change(struct ipw_network *net,
34 unsigned int channel_idx, unsigned int control_lines,
35 unsigned int control_mask);
36void ipwireless_network_packet_received(struct ipw_network *net,
37 unsigned int channel_idx, unsigned char *data,
38 unsigned int length);
39struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw);
40void ipwireless_network_free(struct ipw_network *net);
41void ipwireless_associate_network_tty(struct ipw_network *net,
42 unsigned int channel_idx, struct ipw_tty *tty);
43void ipwireless_disassociate_network_ttys(struct ipw_network *net,
44 unsigned int channel_idx);
45
46void ipwireless_ppp_open(struct ipw_network *net);
47
48void ipwireless_ppp_close(struct ipw_network *net);
49int ipwireless_ppp_channel_index(struct ipw_network *net);
50int ipwireless_ppp_unit_number(struct ipw_network *net);
51
52int ipwireless_dump_network_state(char *p, size_t limit,
53 struct ipw_network *net);
54
55#endif
diff --git a/drivers/char/pcmcia/ipwireless/setup_protocol.h b/drivers/char/pcmcia/ipwireless/setup_protocol.h
new file mode 100644
index 000000000000..9d6bcc77c73c
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/setup_protocol.h
@@ -0,0 +1,108 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#ifndef _IPWIRELESS_CS_SETUP_PROTOCOL_H_
19#define _IPWIRELESS_CS_SETUP_PROTOCOL_H_
20
21/* Version of the setup protocol and transport protocols */
22#define TL_SETUP_VERSION 1
23
24#define TL_SETUP_VERSION_QRY_TMO 1000
25#define TL_SETUP_MAX_VERSION_QRY 30
26
27/* Message numbers 0-9 are obsoleted and must not be reused! */
28#define TL_SETUP_SIGNO_GET_VERSION_QRY 10
29#define TL_SETUP_SIGNO_GET_VERSION_RSP 11
30#define TL_SETUP_SIGNO_CONFIG_MSG 12
31#define TL_SETUP_SIGNO_CONFIG_DONE_MSG 13
32#define TL_SETUP_SIGNO_OPEN_MSG 14
33#define TL_SETUP_SIGNO_CLOSE_MSG 15
34
35#define TL_SETUP_SIGNO_INFO_MSG 20
36#define TL_SETUP_SIGNO_INFO_MSG_ACK 21
37
38#define TL_SETUP_SIGNO_REBOOT_MSG 22
39#define TL_SETUP_SIGNO_REBOOT_MSG_ACK 23
40
41/* Synchronous start-messages */
42struct tl_setup_get_version_qry {
43 unsigned char sig_no; /* TL_SETUP_SIGNO_GET_VERSION_QRY */
44} __attribute__ ((__packed__));
45
46struct tl_setup_get_version_rsp {
47 unsigned char sig_no; /* TL_SETUP_SIGNO_GET_VERSION_RSP */
48 unsigned char version; /* TL_SETUP_VERSION */
49} __attribute__ ((__packed__));
50
51struct tl_setup_config_msg {
52 unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_MSG */
53 unsigned char port_no;
54 unsigned char prio_data;
55 unsigned char prio_ctrl;
56} __attribute__ ((__packed__));
57
58struct tl_setup_config_done_msg {
59 unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_DONE_MSG */
60} __attribute__ ((__packed__));
61
62/* Asyncronous messages */
63struct tl_setup_open_msg {
64 unsigned char sig_no; /* TL_SETUP_SIGNO_OPEN_MSG */
65 unsigned char port_no;
66} __attribute__ ((__packed__));
67
68struct tl_setup_close_msg {
69 unsigned char sig_no; /* TL_SETUP_SIGNO_CLOSE_MSG */
70 unsigned char port_no;
71} __attribute__ ((__packed__));
72
73/* Driver type - for use in tl_setup_info_msg.driver_type */
74#define COMM_DRIVER 0
75#define NDISWAN_DRIVER 1
76#define NDISWAN_DRIVER_MAJOR_VERSION 2
77#define NDISWAN_DRIVER_MINOR_VERSION 0
78
79/*
80 * It should not matter when this message comes over as we just store the
81 * results and send the ACK.
82 */
83struct tl_setup_info_msg {
84 unsigned char sig_no; /* TL_SETUP_SIGNO_INFO_MSG */
85 unsigned char driver_type;
86 unsigned char major_version;
87 unsigned char minor_version;
88} __attribute__ ((__packed__));
89
90struct tl_setup_info_msgAck {
91 unsigned char sig_no; /* TL_SETUP_SIGNO_INFO_MSG_ACK */
92} __attribute__ ((__packed__));
93
94struct TlSetupRebootMsgAck {
95 unsigned char sig_no; /* TL_SETUP_SIGNO_REBOOT_MSG_ACK */
96} __attribute__ ((__packed__));
97
98/* Define a union of all the msgs that the driver can receive from the card.*/
99union ipw_setup_rx_msg {
100 unsigned char sig_no;
101 struct tl_setup_get_version_rsp version_rsp_msg;
102 struct tl_setup_open_msg open_msg;
103 struct tl_setup_close_msg close_msg;
104 struct tl_setup_info_msg InfoMsg;
105 struct tl_setup_info_msgAck info_msg_ack;
106} __attribute__ ((__packed__));
107
108#endif /* _IPWIRELESS_CS_SETUP_PROTOCOL_H_ */
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
new file mode 100644
index 000000000000..42f3815c5ce3
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -0,0 +1,688 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/mutex.h>
22#include <linux/ppp_defs.h>
23#include <linux/if.h>
24#include <linux/if_ppp.h>
25#include <linux/sched.h>
26#include <linux/serial.h>
27#include <linux/slab.h>
28#include <linux/tty.h>
29#include <linux/tty_driver.h>
30#include <linux/tty_flip.h>
31#include <linux/uaccess.h>
32#include <linux/version.h>
33
34#include "tty.h"
35#include "network.h"
36#include "hardware.h"
37#include "main.h"
38
39#define IPWIRELESS_PCMCIA_START (0)
40#define IPWIRELESS_PCMCIA_MINORS (24)
41#define IPWIRELESS_PCMCIA_MINOR_RANGE (8)
42
43#define TTYTYPE_MODEM (0)
44#define TTYTYPE_MONITOR (1)
45#define TTYTYPE_RAS_RAW (2)
46
47struct ipw_tty {
48 int index;
49 struct ipw_hardware *hardware;
50 unsigned int channel_idx;
51 unsigned int secondary_channel_idx;
52 int tty_type;
53 struct ipw_network *network;
54 struct tty_struct *linux_tty;
55 int open_count;
56 unsigned int control_lines;
57 struct mutex ipw_tty_mutex;
58 int tx_bytes_queued;
59 int closing;
60};
61
62static struct ipw_tty *ttys[IPWIRELESS_PCMCIA_MINORS];
63
64static struct tty_driver *ipw_tty_driver;
65
66static char *tty_type_name(int tty_type)
67{
68 static char *channel_names[] = {
69 "modem",
70 "monitor",
71 "RAS-raw"
72 };
73
74 return channel_names[tty_type];
75}
76
77static void report_registering(struct ipw_tty *tty)
78{
79 char *iftype = tty_type_name(tty->tty_type);
80
81 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
82 ": registering %s device ttyIPWp%d\n", iftype, tty->index);
83}
84
85static void report_deregistering(struct ipw_tty *tty)
86{
87 char *iftype = tty_type_name(tty->tty_type);
88
89 printk(KERN_INFO IPWIRELESS_PCCARD_NAME
90 ": deregistering %s device ttyIPWp%d\n", iftype,
91 tty->index);
92}
93
94static struct ipw_tty *get_tty(int minor)
95{
96 if (minor < ipw_tty_driver->minor_start
97 || minor >= ipw_tty_driver->minor_start +
98 IPWIRELESS_PCMCIA_MINORS)
99 return NULL;
100 else {
101 int minor_offset = minor - ipw_tty_driver->minor_start;
102
103 /*
104 * The 'ras_raw' channel is only available when 'loopback' mode
105 * is enabled.
106 * Number of minor starts with 16 (_RANGE * _RAS_RAW).
107 */
108 if (!ipwireless_loopback &&
109 minor_offset >=
110 IPWIRELESS_PCMCIA_MINOR_RANGE * TTYTYPE_RAS_RAW)
111 return NULL;
112
113 return ttys[minor_offset];
114 }
115}
116
117static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
118{
119 int minor = linux_tty->index;
120 struct ipw_tty *tty = get_tty(minor);
121
122 if (!tty)
123 return -ENODEV;
124
125 mutex_lock(&tty->ipw_tty_mutex);
126
127 if (tty->closing) {
128 mutex_unlock(&tty->ipw_tty_mutex);
129 return -ENODEV;
130 }
131 if (tty->open_count == 0)
132 tty->tx_bytes_queued = 0;
133
134 tty->open_count++;
135
136 tty->linux_tty = linux_tty;
137 linux_tty->driver_data = tty;
138 linux_tty->low_latency = 1;
139
140 if (tty->tty_type == TTYTYPE_MODEM)
141 ipwireless_ppp_open(tty->network);
142
143 mutex_unlock(&tty->ipw_tty_mutex);
144
145 return 0;
146}
147
148static void do_ipw_close(struct ipw_tty *tty)
149{
150 tty->open_count--;
151
152 if (tty->open_count == 0) {
153 struct tty_struct *linux_tty = tty->linux_tty;
154
155 if (linux_tty != NULL) {
156 tty->linux_tty = NULL;
157 linux_tty->driver_data = NULL;
158
159 if (tty->tty_type == TTYTYPE_MODEM)
160 ipwireless_ppp_close(tty->network);
161 }
162 }
163}
164
165static void ipw_hangup(struct tty_struct *linux_tty)
166{
167 struct ipw_tty *tty = linux_tty->driver_data;
168
169 if (!tty)
170 return;
171
172 mutex_lock(&tty->ipw_tty_mutex);
173 if (tty->open_count == 0) {
174 mutex_unlock(&tty->ipw_tty_mutex);
175 return;
176 }
177
178 do_ipw_close(tty);
179
180 mutex_unlock(&tty->ipw_tty_mutex);
181}
182
183static void ipw_close(struct tty_struct *linux_tty, struct file *filp)
184{
185 ipw_hangup(linux_tty);
186}
187
188/* Take data received from hardware, and send it out the tty */
189void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
190 unsigned int length)
191{
192 struct tty_struct *linux_tty;
193 int work = 0;
194
195 mutex_lock(&tty->ipw_tty_mutex);
196 linux_tty = tty->linux_tty;
197 if (linux_tty == NULL) {
198 mutex_unlock(&tty->ipw_tty_mutex);
199 return;
200 }
201
202 if (!tty->open_count) {
203 mutex_unlock(&tty->ipw_tty_mutex);
204 return;
205 }
206 mutex_unlock(&tty->ipw_tty_mutex);
207
208 work = tty_insert_flip_string(linux_tty, data, length);
209
210 if (work != length)
211 printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
212 ": %d chars not inserted to flip buffer!\n",
213 length - work);
214
215 /*
216 * This may sleep if ->low_latency is set
217 */
218 if (work)
219 tty_flip_buffer_push(linux_tty);
220}
221
222static void ipw_write_packet_sent_callback(void *callback_data,
223 unsigned int packet_length)
224{
225 struct ipw_tty *tty = callback_data;
226
227 /*
228 * Packet has been sent, so we subtract the number of bytes from our
229 * tally of outstanding TX bytes.
230 */
231 tty->tx_bytes_queued -= packet_length;
232}
233
234static int ipw_write(struct tty_struct *linux_tty,
235 const unsigned char *buf, int count)
236{
237 struct ipw_tty *tty = linux_tty->driver_data;
238 int room, ret;
239
240 if (!tty)
241 return -ENODEV;
242
243 mutex_lock(&tty->ipw_tty_mutex);
244 if (!tty->open_count) {
245 mutex_unlock(&tty->ipw_tty_mutex);
246 return -EINVAL;
247 }
248
249 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
250 if (room < 0)
251 room = 0;
252 /* Don't allow caller to write any more than we have room for */
253 if (count > room)
254 count = room;
255
256 if (count == 0) {
257 mutex_unlock(&tty->ipw_tty_mutex);
258 return 0;
259 }
260
261 ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
262 (unsigned char *) buf, count,
263 ipw_write_packet_sent_callback, tty);
264 if (ret == -1) {
265 mutex_unlock(&tty->ipw_tty_mutex);
266 return 0;
267 }
268
269 tty->tx_bytes_queued += count;
270 mutex_unlock(&tty->ipw_tty_mutex);
271
272 return count;
273}
274
275static int ipw_write_room(struct tty_struct *linux_tty)
276{
277 struct ipw_tty *tty = linux_tty->driver_data;
278 int room;
279
280 if (!tty)
281 return -ENODEV;
282
283 if (!tty->open_count)
284 return -EINVAL;
285
286 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
287 if (room < 0)
288 room = 0;
289
290 return room;
291}
292
293static int ipwireless_get_serial_info(struct ipw_tty *tty,
294 struct serial_struct __user *retinfo)
295{
296 struct serial_struct tmp;
297
298 if (!retinfo)
299 return (-EFAULT);
300
301 memset(&tmp, 0, sizeof(tmp));
302 tmp.type = PORT_UNKNOWN;
303 tmp.line = tty->index;
304 tmp.port = 0;
305 tmp.irq = 0;
306 tmp.flags = 0;
307 tmp.baud_base = 115200;
308 tmp.close_delay = 0;
309 tmp.closing_wait = 0;
310 tmp.custom_divisor = 0;
311 tmp.hub6 = 0;
312 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
313 return -EFAULT;
314
315 return 0;
316}
317
318static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
319{
320 struct ipw_tty *tty = linux_tty->driver_data;
321
322 if (!tty)
323 return -ENODEV;
324
325 if (!tty->open_count)
326 return -EINVAL;
327
328 return tty->tx_bytes_queued;
329}
330
331static int get_control_lines(struct ipw_tty *tty)
332{
333 unsigned int my = tty->control_lines;
334 unsigned int out = 0;
335
336 if (my & IPW_CONTROL_LINE_RTS)
337 out |= TIOCM_RTS;
338 if (my & IPW_CONTROL_LINE_DTR)
339 out |= TIOCM_DTR;
340 if (my & IPW_CONTROL_LINE_CTS)
341 out |= TIOCM_CTS;
342 if (my & IPW_CONTROL_LINE_DSR)
343 out |= TIOCM_DSR;
344 if (my & IPW_CONTROL_LINE_DCD)
345 out |= TIOCM_CD;
346
347 return out;
348}
349
350static int set_control_lines(struct ipw_tty *tty, unsigned int set,
351 unsigned int clear)
352{
353 int ret;
354
355 if (set & TIOCM_RTS) {
356 ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 1);
357 if (ret)
358 return ret;
359 if (tty->secondary_channel_idx != -1) {
360 ret = ipwireless_set_RTS(tty->hardware,
361 tty->secondary_channel_idx, 1);
362 if (ret)
363 return ret;
364 }
365 }
366 if (set & TIOCM_DTR) {
367 ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 1);
368 if (ret)
369 return ret;
370 if (tty->secondary_channel_idx != -1) {
371 ret = ipwireless_set_DTR(tty->hardware,
372 tty->secondary_channel_idx, 1);
373 if (ret)
374 return ret;
375 }
376 }
377 if (clear & TIOCM_RTS) {
378 ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 0);
379 if (tty->secondary_channel_idx != -1) {
380 ret = ipwireless_set_RTS(tty->hardware,
381 tty->secondary_channel_idx, 0);
382 if (ret)
383 return ret;
384 }
385 }
386 if (clear & TIOCM_DTR) {
387 ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 0);
388 if (tty->secondary_channel_idx != -1) {
389 ret = ipwireless_set_DTR(tty->hardware,
390 tty->secondary_channel_idx, 0);
391 if (ret)
392 return ret;
393 }
394 }
395 return 0;
396}
397
398static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
399{
400 struct ipw_tty *tty = linux_tty->driver_data;
401
402 if (!tty)
403 return -ENODEV;
404
405 if (!tty->open_count)
406 return -EINVAL;
407
408 return get_control_lines(tty);
409}
410
411static int
412ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
413 unsigned int set, unsigned int clear)
414{
415 struct ipw_tty *tty = linux_tty->driver_data;
416
417 if (!tty)
418 return -ENODEV;
419
420 if (!tty->open_count)
421 return -EINVAL;
422
423 return set_control_lines(tty, set, clear);
424}
425
426static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
427 unsigned int cmd, unsigned long arg)
428{
429 struct ipw_tty *tty = linux_tty->driver_data;
430
431 if (!tty)
432 return -ENODEV;
433
434 if (!tty->open_count)
435 return -EINVAL;
436
437 switch (cmd) {
438 case TIOCGSERIAL:
439 return ipwireless_get_serial_info(tty, (void __user *) arg);
440
441 case TIOCSSERIAL:
442 return 0; /* Keeps the PCMCIA scripts happy. */
443 }
444
445 if (tty->tty_type == TTYTYPE_MODEM) {
446 switch (cmd) {
447 case PPPIOCGCHAN:
448 {
449 int chan = ipwireless_ppp_channel_index(
450 tty->network);
451
452 if (chan < 0)
453 return -ENODEV;
454 if (put_user(chan, (int __user *) arg))
455 return -EFAULT;
456 }
457 return 0;
458
459 case PPPIOCGUNIT:
460 {
461 int unit = ipwireless_ppp_unit_number(
462 tty->network);
463
464 if (unit < 0)
465 return -ENODEV;
466 if (put_user(unit, (int __user *) arg))
467 return -EFAULT;
468 }
469 return 0;
470
471 case TCGETS:
472 case TCGETA:
473 return n_tty_ioctl(linux_tty, file, cmd, arg);
474
475 case TCFLSH:
476 return n_tty_ioctl(linux_tty, file, cmd, arg);
477
478 case FIONREAD:
479 {
480 int val = 0;
481
482 if (put_user(val, (int __user *) arg))
483 return -EFAULT;
484 }
485 return 0;
486 }
487 }
488
489 return -ENOIOCTLCMD;
490}
491
492static int add_tty(dev_node_t *nodesp, int j,
493 struct ipw_hardware *hardware,
494 struct ipw_network *network, int channel_idx,
495 int secondary_channel_idx, int tty_type)
496{
497 ttys[j] = kzalloc(sizeof(struct ipw_tty), GFP_KERNEL);
498 if (!ttys[j])
499 return -ENOMEM;
500 ttys[j]->index = j;
501 ttys[j]->hardware = hardware;
502 ttys[j]->channel_idx = channel_idx;
503 ttys[j]->secondary_channel_idx = secondary_channel_idx;
504 ttys[j]->network = network;
505 ttys[j]->tty_type = tty_type;
506 mutex_init(&ttys[j]->ipw_tty_mutex);
507
508 tty_register_device(ipw_tty_driver, j, NULL);
509 ipwireless_associate_network_tty(network, channel_idx, ttys[j]);
510
511 if (secondary_channel_idx != -1)
512 ipwireless_associate_network_tty(network,
513 secondary_channel_idx,
514 ttys[j]);
515 if (nodesp != NULL) {
516 sprintf(nodesp->dev_name, "ttyIPWp%d", j);
517 nodesp->major = ipw_tty_driver->major;
518 nodesp->minor = j + ipw_tty_driver->minor_start;
519 }
520 if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j])
521 report_registering(ttys[j]);
522 return 0;
523}
524
525struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
526 struct ipw_network *network,
527 dev_node_t *nodes)
528{
529 int i, j;
530
531 for (i = 0; i < IPWIRELESS_PCMCIA_MINOR_RANGE; i++) {
532 int allfree = 1;
533
534 for (j = i; j < IPWIRELESS_PCMCIA_MINORS;
535 j += IPWIRELESS_PCMCIA_MINOR_RANGE)
536 if (ttys[j] != NULL) {
537 allfree = 0;
538 break;
539 }
540
541 if (allfree) {
542 j = i;
543
544 if (add_tty(&nodes[0], j, hardware, network,
545 IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS,
546 TTYTYPE_MODEM))
547 return NULL;
548
549 j += IPWIRELESS_PCMCIA_MINOR_RANGE;
550 if (add_tty(&nodes[1], j, hardware, network,
551 IPW_CHANNEL_DIALLER, -1,
552 TTYTYPE_MONITOR))
553 return NULL;
554
555 j += IPWIRELESS_PCMCIA_MINOR_RANGE;
556 if (add_tty(NULL, j, hardware, network,
557 IPW_CHANNEL_RAS, -1,
558 TTYTYPE_RAS_RAW))
559 return NULL;
560
561 nodes[0].next = &nodes[1];
562 nodes[1].next = NULL;
563
564 return ttys[i];
565 }
566 }
567 return NULL;
568}
569
570/*
571 * Must be called before ipwireless_network_free().
572 */
573void ipwireless_tty_free(struct ipw_tty *tty)
574{
575 int j;
576 struct ipw_network *network = ttys[tty->index]->network;
577
578 for (j = tty->index; j < IPWIRELESS_PCMCIA_MINORS;
579 j += IPWIRELESS_PCMCIA_MINOR_RANGE) {
580 struct ipw_tty *ttyj = ttys[j];
581
582 if (ttyj) {
583 mutex_lock(&ttyj->ipw_tty_mutex);
584 if (get_tty(j + ipw_tty_driver->minor_start) == ttyj)
585 report_deregistering(ttyj);
586 ttyj->closing = 1;
587 if (ttyj->linux_tty != NULL) {
588 mutex_unlock(&ttyj->ipw_tty_mutex);
589 tty_hangup(ttyj->linux_tty);
590 /* Wait till the tty_hangup has completed */
591 flush_scheduled_work();
592 mutex_lock(&ttyj->ipw_tty_mutex);
593 }
594 while (ttyj->open_count)
595 do_ipw_close(ttyj);
596 ipwireless_disassociate_network_ttys(network,
597 ttyj->channel_idx);
598 tty_unregister_device(ipw_tty_driver, j);
599 ttys[j] = NULL;
600 mutex_unlock(&ttyj->ipw_tty_mutex);
601 kfree(ttyj);
602 }
603 }
604}
605
606static struct tty_operations tty_ops = {
607 .open = ipw_open,
608 .close = ipw_close,
609 .hangup = ipw_hangup,
610 .write = ipw_write,
611 .write_room = ipw_write_room,
612 .ioctl = ipw_ioctl,
613 .chars_in_buffer = ipw_chars_in_buffer,
614 .tiocmget = ipw_tiocmget,
615 .tiocmset = ipw_tiocmset,
616};
617
618int ipwireless_tty_init(void)
619{
620 int result;
621
622 ipw_tty_driver = alloc_tty_driver(IPWIRELESS_PCMCIA_MINORS);
623 if (!ipw_tty_driver)
624 return -ENOMEM;
625
626 ipw_tty_driver->owner = THIS_MODULE;
627 ipw_tty_driver->driver_name = IPWIRELESS_PCCARD_NAME;
628 ipw_tty_driver->name = "ttyIPWp";
629 ipw_tty_driver->major = 0;
630 ipw_tty_driver->minor_start = IPWIRELESS_PCMCIA_START;
631 ipw_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
632 ipw_tty_driver->subtype = SERIAL_TYPE_NORMAL;
633 ipw_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
634 ipw_tty_driver->init_termios = tty_std_termios;
635 ipw_tty_driver->init_termios.c_cflag =
636 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
637 ipw_tty_driver->init_termios.c_ispeed = 9600;
638 ipw_tty_driver->init_termios.c_ospeed = 9600;
639 tty_set_operations(ipw_tty_driver, &tty_ops);
640 result = tty_register_driver(ipw_tty_driver);
641 if (result) {
642 printk(KERN_ERR IPWIRELESS_PCCARD_NAME
643 ": failed to register tty driver\n");
644 put_tty_driver(ipw_tty_driver);
645 return result;
646 }
647
648 return 0;
649}
650
651void ipwireless_tty_release(void)
652{
653 int ret;
654
655 ret = tty_unregister_driver(ipw_tty_driver);
656 put_tty_driver(ipw_tty_driver);
657 if (ret != 0)
658 printk(KERN_ERR IPWIRELESS_PCCARD_NAME
659 ": tty_unregister_driver failed with code %d\n", ret);
660}
661
662int ipwireless_tty_is_modem(struct ipw_tty *tty)
663{
664 return tty->tty_type == TTYTYPE_MODEM;
665}
666
667void
668ipwireless_tty_notify_control_line_change(struct ipw_tty *tty,
669 unsigned int channel_idx,
670 unsigned int control_lines,
671 unsigned int changed_mask)
672{
673 unsigned int old_control_lines = tty->control_lines;
674
675 tty->control_lines = (tty->control_lines & ~changed_mask)
676 | (control_lines & changed_mask);
677
678 /*
679 * If DCD is de-asserted, we close the tty so pppd can tell that we
680 * have gone offline.
681 */
682 if ((old_control_lines & IPW_CONTROL_LINE_DCD)
683 && !(tty->control_lines & IPW_CONTROL_LINE_DCD)
684 && tty->linux_tty) {
685 tty_hangup(tty->linux_tty);
686 }
687}
688
diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h
new file mode 100644
index 000000000000..b0deb9168b6b
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/tty.h
@@ -0,0 +1,48 @@
1/*
2 * IPWireless 3G PCMCIA Network Driver
3 *
4 * Original code
5 * by Stephen Blackheath <stephen@blacksapphire.com>,
6 * Ben Martel <benm@symmetric.co.nz>
7 *
8 * Copyrighted as follows:
9 * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
10 *
11 * Various driver changes and rewrites, port to new kernels
12 * Copyright (C) 2006-2007 Jiri Kosina
13 *
14 * Misc code cleanups and updates
15 * Copyright (C) 2007 David Sterba
16 */
17
18#ifndef _IPWIRELESS_CS_TTY_H_
19#define _IPWIRELESS_CS_TTY_H_
20
21#include <linux/types.h>
22#include <linux/sched.h>
23
24#include <pcmcia/cs_types.h>
25#include <pcmcia/cs.h>
26#include <pcmcia/cistpl.h>
27#include <pcmcia/ds.h>
28
29struct ipw_tty;
30struct ipw_network;
31struct ipw_hardware;
32
33int ipwireless_tty_init(void);
34void ipwireless_tty_release(void);
35
36struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw,
37 struct ipw_network *net,
38 dev_node_t *nodes);
39void ipwireless_tty_free(struct ipw_tty *tty);
40void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
41 unsigned int length);
42int ipwireless_tty_is_modem(struct ipw_tty *tty);
43void ipwireless_tty_notify_control_line_change(struct ipw_tty *tty,
44 unsigned int channel_idx,
45 unsigned int control_lines,
46 unsigned int changed_mask);
47
48#endif
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 63512d906f02..9dea14db724c 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -137,6 +137,18 @@ config INPUT_EVBUG
137 To compile this driver as a module, choose M here: the 137 To compile this driver as a module, choose M here: the
138 module will be called evbug. 138 module will be called evbug.
139 139
140config INPUT_APMPOWER
141 tristate "Input Power Event -> APM Bridge" if EMBEDDED
142 depends on INPUT && APM_EMULATION
143 ---help---
144 Say Y here if you want suspend key events to trigger a user
145 requested suspend through APM. This is useful on embedded
146 systems where such behviour is desired without userspace
147 interaction. If unsure, say N.
148
149 To compile this driver as a module, choose M here: the
150 module will be called apm-power.
151
140comment "Input Device Drivers" 152comment "Input Device Drivers"
141 153
142source "drivers/input/keyboard/Kconfig" 154source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 99af903bd3ce..2ae87b19caa8 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_INPUT_TABLET) += tablet/
22obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/ 22obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
23obj-$(CONFIG_INPUT_MISC) += misc/ 23obj-$(CONFIG_INPUT_MISC) += misc/
24 24
25obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
diff --git a/drivers/input/apm-power.c b/drivers/input/apm-power.c
new file mode 100644
index 000000000000..c36d110b349a
--- /dev/null
+++ b/drivers/input/apm-power.c
@@ -0,0 +1,131 @@
1/*
2 * Input Power Event -> APM Bridge
3 *
4 * Copyright (c) 2007 Richard Purdie
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/input.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/tty.h>
17#include <linux/delay.h>
18#include <linux/pm.h>
19#include <linux/apm-emulation.h>
20
21static void system_power_event(unsigned int keycode)
22{
23 switch (keycode) {
24 case KEY_SUSPEND:
25 apm_queue_event(APM_USER_SUSPEND);
26
27 printk(KERN_INFO "apm-power: Requesting system suspend...\n");
28 break;
29 default:
30 break;
31 }
32}
33
34static void apmpower_event(struct input_handle *handle, unsigned int type,
35 unsigned int code, int value)
36{
37 /* only react on key down events */
38 if (value != 1)
39 return;
40
41 switch (type) {
42 case EV_PWR:
43 system_power_event(code);
44 break;
45
46 default:
47 break;
48 }
49}
50
51static int apmpower_connect(struct input_handler *handler,
52 struct input_dev *dev,
53 const struct input_device_id *id)
54{
55 struct input_handle *handle;
56 int error;
57
58 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
59 if (!handle)
60 return -ENOMEM;
61
62 handle->dev = dev;
63 handle->handler = handler;
64 handle->name = "apm-power";
65
66 handler->private = handle;
67
68 error = input_register_handle(handle);
69 if (error) {
70 printk(KERN_ERR
71 "apm-power: Failed to register input power handler, "
72 "error %d\n", error);
73 kfree(handle);
74 return error;
75 }
76
77 error = input_open_device(handle);
78 if (error) {
79 printk(KERN_ERR
80 "apm-power: Failed to open input power device, "
81 "error %d\n", error);
82 input_unregister_handle(handle);
83 kfree(handle);
84 return error;
85 }
86
87 return 0;
88}
89
90static void apmpower_disconnect(struct input_handle *handler)
91{
92 struct input_handle *handle = handler->private;
93
94 input_close_device(handle);
95 kfree(handle);
96}
97
98static const struct input_device_id apmpower_ids[] = {
99 {
100 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
101 .evbit = { BIT_MASK(EV_PWR) },
102 },
103 { },
104};
105
106MODULE_DEVICE_TABLE(input, apmpower_ids);
107
108static struct input_handler apmpower_handler = {
109 .event = apmpower_event,
110 .connect = apmpower_connect,
111 .disconnect = apmpower_disconnect,
112 .name = "apm-power",
113 .id_table = apmpower_ids,
114};
115
116static int __init apmpower_init(void)
117{
118 return input_register_handler(&apmpower_handler);
119}
120
121static void __exit apmpower_exit(void)
122{
123 input_unregister_handler(&apmpower_handler);
124}
125
126module_init(apmpower_init);
127module_exit(apmpower_exit);
128
129MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
130MODULE_DESCRIPTION("Input Power Event -> APM Bridge");
131MODULE_LICENSE("GPL");
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index e5b4e9bfbdc5..0727b0a12557 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -617,7 +617,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
617 if (get_user(t, ip)) 617 if (get_user(t, ip))
618 return -EFAULT; 618 return -EFAULT;
619 619
620 error = dev->getkeycode(dev, t, &v); 620 error = input_get_keycode(dev, t, &v);
621 if (error) 621 if (error)
622 return error; 622 return error;
623 623
@@ -630,7 +630,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
630 if (get_user(t, ip) || get_user(v, ip + 1)) 630 if (get_user(t, ip) || get_user(v, ip + 1))
631 return -EFAULT; 631 return -EFAULT;
632 632
633 return dev->setkeycode(dev, t, v); 633 return input_set_keycode(dev, t, v);
634 634
635 case EVIOCSFF: 635 case EVIOCSFF:
636 if (copy_from_user(&effect, p, sizeof(effect))) 636 if (copy_from_user(&effect, p, sizeof(effect)))
@@ -683,7 +683,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
683 case EV_FF: bits = dev->ffbit; len = FF_MAX; break; 683 case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
684 case EV_SW: bits = dev->swbit; len = SW_MAX; break; 684 case EV_SW: bits = dev->swbit; len = SW_MAX; break;
685 default: return -EINVAL; 685 default: return -EINVAL;
686 } 686 }
687 return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode); 687 return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
688 } 688 }
689 689
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 92b359894e81..490918a5d192 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -60,17 +60,21 @@ static void input_polled_device_work(struct work_struct *work)
60{ 60{
61 struct input_polled_dev *dev = 61 struct input_polled_dev *dev =
62 container_of(work, struct input_polled_dev, work.work); 62 container_of(work, struct input_polled_dev, work.work);
63 unsigned long delay;
63 64
64 dev->poll(dev); 65 dev->poll(dev);
65 queue_delayed_work(polldev_wq, &dev->work, 66
66 msecs_to_jiffies(dev->poll_interval)); 67 delay = msecs_to_jiffies(dev->poll_interval);
68 if (delay >= HZ)
69 delay = round_jiffies_relative(delay);
70
71 queue_delayed_work(polldev_wq, &dev->work, delay);
67} 72}
68 73
69static int input_open_polled_device(struct input_dev *input) 74static int input_open_polled_device(struct input_dev *input)
70{ 75{
71 struct input_polled_dev *dev = input->private; 76 struct input_polled_dev *dev = input->private;
72 int error; 77 int error;
73 unsigned long ticks;
74 78
75 error = input_polldev_start_workqueue(); 79 error = input_polldev_start_workqueue();
76 if (error) 80 if (error)
@@ -79,10 +83,8 @@ static int input_open_polled_device(struct input_dev *input)
79 if (dev->flush) 83 if (dev->flush)
80 dev->flush(dev); 84 dev->flush(dev);
81 85
82 ticks = msecs_to_jiffies(dev->poll_interval); 86 queue_delayed_work(polldev_wq, &dev->work,
83 if (ticks >= HZ) 87 msecs_to_jiffies(dev->poll_interval));
84 ticks = round_jiffies(ticks);
85 queue_delayed_work(polldev_wq, &dev->work, ticks);
86 88
87 return 0; 89 return 0;
88} 90}
@@ -91,7 +93,7 @@ static void input_close_polled_device(struct input_dev *input)
91{ 93{
92 struct input_polled_dev *dev = input->private; 94 struct input_polled_dev *dev = input->private;
93 95
94 cancel_rearming_delayed_workqueue(polldev_wq, &dev->work); 96 cancel_delayed_work_sync(&dev->work);
95 input_polldev_stop_workqueue(); 97 input_polldev_stop_workqueue();
96} 98}
97 99
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a0be978501ff..f02c242c3114 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -493,7 +493,7 @@ static void input_disconnect_device(struct input_dev *dev)
493 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 493 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
494 for (code = 0; code <= KEY_MAX; code++) { 494 for (code = 0; code <= KEY_MAX; code++) {
495 if (is_event_supported(code, dev->keybit, KEY_MAX) && 495 if (is_event_supported(code, dev->keybit, KEY_MAX) &&
496 test_bit(code, dev->key)) { 496 __test_and_clear_bit(code, dev->key)) {
497 input_pass_event(dev, EV_KEY, code, 0); 497 input_pass_event(dev, EV_KEY, code, 0);
498 } 498 }
499 } 499 }
@@ -526,7 +526,7 @@ static int input_default_getkeycode(struct input_dev *dev,
526 if (!dev->keycodesize) 526 if (!dev->keycodesize)
527 return -EINVAL; 527 return -EINVAL;
528 528
529 if (scancode < 0 || scancode >= dev->keycodemax) 529 if (scancode >= dev->keycodemax)
530 return -EINVAL; 530 return -EINVAL;
531 531
532 *keycode = input_fetch_keycode(dev, scancode); 532 *keycode = input_fetch_keycode(dev, scancode);
@@ -540,10 +540,7 @@ static int input_default_setkeycode(struct input_dev *dev,
540 int old_keycode; 540 int old_keycode;
541 int i; 541 int i;
542 542
543 if (scancode < 0 || scancode >= dev->keycodemax) 543 if (scancode >= dev->keycodemax)
544 return -EINVAL;
545
546 if (keycode < 0 || keycode > KEY_MAX)
547 return -EINVAL; 544 return -EINVAL;
548 545
549 if (!dev->keycodesize) 546 if (!dev->keycodesize)
@@ -586,6 +583,75 @@ static int input_default_setkeycode(struct input_dev *dev,
586 return 0; 583 return 0;
587} 584}
588 585
586/**
587 * input_get_keycode - retrieve keycode currently mapped to a given scancode
588 * @dev: input device which keymap is being queried
589 * @scancode: scancode (or its equivalent for device in question) for which
590 * keycode is needed
591 * @keycode: result
592 *
593 * This function should be called by anyone interested in retrieving current
594 * keymap. Presently keyboard and evdev handlers use it.
595 */
596int input_get_keycode(struct input_dev *dev, int scancode, int *keycode)
597{
598 if (scancode < 0)
599 return -EINVAL;
600
601 return dev->getkeycode(dev, scancode, keycode);
602}
603EXPORT_SYMBOL(input_get_keycode);
604
605/**
606 * input_get_keycode - assign new keycode to a given scancode
607 * @dev: input device which keymap is being updated
608 * @scancode: scancode (or its equivalent for device in question)
609 * @keycode: new keycode to be assigned to the scancode
610 *
611 * This function should be called by anyone needing to update current
612 * keymap. Presently keyboard and evdev handlers use it.
613 */
614int input_set_keycode(struct input_dev *dev, int scancode, int keycode)
615{
616 unsigned long flags;
617 int old_keycode;
618 int retval;
619
620 if (scancode < 0)
621 return -EINVAL;
622
623 if (keycode < 0 || keycode > KEY_MAX)
624 return -EINVAL;
625
626 spin_lock_irqsave(&dev->event_lock, flags);
627
628 retval = dev->getkeycode(dev, scancode, &old_keycode);
629 if (retval)
630 goto out;
631
632 retval = dev->setkeycode(dev, scancode, keycode);
633 if (retval)
634 goto out;
635
636 /*
637 * Simulate keyup event if keycode is not present
638 * in the keymap anymore
639 */
640 if (test_bit(EV_KEY, dev->evbit) &&
641 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
642 __test_and_clear_bit(old_keycode, dev->key)) {
643
644 input_pass_event(dev, EV_KEY, old_keycode, 0);
645 if (dev->sync)
646 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
647 }
648
649 out:
650 spin_unlock_irqrestore(&dev->event_lock, flags);
651
652 return retval;
653}
654EXPORT_SYMBOL(input_set_keycode);
589 655
590#define MATCH_BIT(bit, max) \ 656#define MATCH_BIT(bit, max) \
591 for (i = 0; i < BITS_TO_LONGS(max); i++) \ 657 for (i = 0; i < BITS_TO_LONGS(max); i++) \
@@ -755,7 +821,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v)
755 return 0; 821 return 0;
756} 822}
757 823
758static struct seq_operations input_devices_seq_ops = { 824static const struct seq_operations input_devices_seq_ops = {
759 .start = input_devices_seq_start, 825 .start = input_devices_seq_start,
760 .next = input_devices_seq_next, 826 .next = input_devices_seq_next,
761 .stop = input_devices_seq_stop, 827 .stop = input_devices_seq_stop,
@@ -808,7 +874,7 @@ static int input_handlers_seq_show(struct seq_file *seq, void *v)
808 874
809 return 0; 875 return 0;
810} 876}
811static struct seq_operations input_handlers_seq_ops = { 877static const struct seq_operations input_handlers_seq_ops = {
812 .start = input_handlers_seq_start, 878 .start = input_handlers_seq_start,
813 .next = input_handlers_seq_next, 879 .next = input_handlers_seq_next,
814 .stop = input_handlers_seq_stop, 880 .stop = input_handlers_seq_stop,
@@ -1329,9 +1395,6 @@ int input_register_device(struct input_dev *dev)
1329 snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id), 1395 snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id),
1330 "input%ld", (unsigned long) atomic_inc_return(&input_no) - 1); 1396 "input%ld", (unsigned long) atomic_inc_return(&input_no) - 1);
1331 1397
1332 if (dev->cdev.dev)
1333 dev->dev.parent = dev->cdev.dev;
1334
1335 error = device_add(&dev->dev); 1398 error = device_add(&dev->dev);
1336 if (error) 1399 if (error)
1337 return error; 1400 return error;
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 5cf9f3610e67..deb9f825f92c 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -32,7 +32,6 @@
32#include <linux/errno.h> 32#include <linux/errno.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/init.h> 35#include <linux/init.h>
37#include <linux/input.h> 36#include <linux/input.h>
38#include <linux/interrupt.h> 37#include <linux/interrupt.h>
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 15739880afc6..f32e031dcb27 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -31,7 +31,6 @@
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <linux/bitops.h> 35#include <linux/bitops.h>
37#include <linux/init.h> 36#include <linux/init.h>
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index a6ca9d5e252f..960e501c60c8 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -33,7 +33,6 @@
33 33
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/delay.h> 36#include <linux/delay.h>
38#include <linux/init.h> 37#include <linux/init.h>
39#include <linux/parport.h> 38#include <linux/parport.h>
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index df2a9d02ca6c..07a32aff5a31 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -33,7 +33,6 @@
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/init.h> 36#include <linux/init.h>
38#include <linux/parport.h> 37#include <linux/parport.h>
39#include <linux/input.h> 38#include <linux/input.h>
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index 6f826b37d9aa..a2517fa72eb8 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -85,7 +85,7 @@ static struct iforce_device iforce_device[] = {
85 85
86static int iforce_playback(struct input_dev *dev, int effect_id, int value) 86static int iforce_playback(struct input_dev *dev, int effect_id, int value)
87{ 87{
88 struct iforce* iforce = dev->private; 88 struct iforce *iforce = input_get_drvdata(dev);
89 struct iforce_core_effect *core_effect = &iforce->core_effects[effect_id]; 89 struct iforce_core_effect *core_effect = &iforce->core_effects[effect_id];
90 90
91 if (value > 0) 91 if (value > 0)
@@ -99,7 +99,7 @@ static int iforce_playback(struct input_dev *dev, int effect_id, int value)
99 99
100static void iforce_set_gain(struct input_dev *dev, u16 gain) 100static void iforce_set_gain(struct input_dev *dev, u16 gain)
101{ 101{
102 struct iforce* iforce = dev->private; 102 struct iforce *iforce = input_get_drvdata(dev);
103 unsigned char data[3]; 103 unsigned char data[3];
104 104
105 data[0] = gain >> 9; 105 data[0] = gain >> 9;
@@ -108,7 +108,7 @@ static void iforce_set_gain(struct input_dev *dev, u16 gain)
108 108
109static void iforce_set_autocenter(struct input_dev *dev, u16 magnitude) 109static void iforce_set_autocenter(struct input_dev *dev, u16 magnitude)
110{ 110{
111 struct iforce* iforce = dev->private; 111 struct iforce *iforce = input_get_drvdata(dev);
112 unsigned char data[3]; 112 unsigned char data[3];
113 113
114 data[0] = 0x03; 114 data[0] = 0x03;
@@ -126,7 +126,7 @@ static void iforce_set_autocenter(struct input_dev *dev, u16 magnitude)
126 */ 126 */
127static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) 127static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
128{ 128{
129 struct iforce* iforce = dev->private; 129 struct iforce *iforce = input_get_drvdata(dev);
130 struct iforce_core_effect *core_effect = &iforce->core_effects[effect->id]; 130 struct iforce_core_effect *core_effect = &iforce->core_effects[effect->id];
131 int ret; 131 int ret;
132 132
@@ -173,7 +173,7 @@ static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect,
173 */ 173 */
174static int iforce_erase_effect(struct input_dev *dev, int effect_id) 174static int iforce_erase_effect(struct input_dev *dev, int effect_id)
175{ 175{
176 struct iforce *iforce = dev->private; 176 struct iforce *iforce = input_get_drvdata(dev);
177 struct iforce_core_effect *core_effect = &iforce->core_effects[effect_id]; 177 struct iforce_core_effect *core_effect = &iforce->core_effects[effect_id];
178 int err = 0; 178 int err = 0;
179 179
@@ -191,7 +191,7 @@ static int iforce_erase_effect(struct input_dev *dev, int effect_id)
191 191
192static int iforce_open(struct input_dev *dev) 192static int iforce_open(struct input_dev *dev)
193{ 193{
194 struct iforce *iforce = dev->private; 194 struct iforce *iforce = input_get_drvdata(dev);
195 195
196 switch (iforce->bus) { 196 switch (iforce->bus) {
197#ifdef CONFIG_JOYSTICK_IFORCE_USB 197#ifdef CONFIG_JOYSTICK_IFORCE_USB
@@ -213,7 +213,7 @@ static int iforce_open(struct input_dev *dev)
213 213
214static void iforce_release(struct input_dev *dev) 214static void iforce_release(struct input_dev *dev)
215{ 215{
216 struct iforce *iforce = dev->private; 216 struct iforce *iforce = input_get_drvdata(dev);
217 int i; 217 int i;
218 218
219 if (test_bit(EV_FF, dev->evbit)) { 219 if (test_bit(EV_FF, dev->evbit)) {
@@ -298,7 +298,8 @@ int iforce_init_device(struct iforce *iforce)
298#endif 298#endif
299 } 299 }
300 300
301 input_dev->private = iforce; 301 input_set_drvdata(input_dev, iforce);
302
302 input_dev->name = "Unknown I-Force device"; 303 input_dev->name = "Unknown I-Force device";
303 input_dev->open = iforce_open; 304 input_dev->open = iforce_open;
304 input_dev->close = iforce_release; 305 input_dev->close = iforce_release;
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index bbebd4e2ad7f..989483f53160 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -35,7 +35,6 @@
35#include <linux/parport.h> 35#include <linux/parport.h>
36#include <linux/input.h> 36#include <linux/input.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/moduleparam.h>
39#include <linux/init.h> 38#include <linux/init.h>
40#include <linux/mutex.h> 39#include <linux/mutex.h>
41 40
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6e9d75bd2b15..0380597249bb 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -75,7 +75,6 @@
75#include <linux/slab.h> 75#include <linux/slab.h>
76#include <linux/stat.h> 76#include <linux/stat.h>
77#include <linux/module.h> 77#include <linux/module.h>
78#include <linux/moduleparam.h>
79#include <linux/usb/input.h> 78#include <linux/usb/input.h>
80 79
81#define DRIVER_VERSION "v0.0.6" 80#define DRIVER_VERSION "v0.0.6"
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 086d58c0ccbe..8ea709be3306 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -154,6 +154,27 @@ config KEYBOARD_SPITZ
154 To compile this driver as a module, choose M here: the 154 To compile this driver as a module, choose M here: the
155 module will be called spitzkbd. 155 module will be called spitzkbd.
156 156
157config KEYBOARD_TOSA
158 tristate "Tosa keyboard"
159 depends on MACH_TOSA
160 default y
161 help
162 Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
163
164 To compile this driver as a module, choose M here: the
165 module will be called tosakbd.
166
167config KEYBOARD_TOSA_USE_EXT_KEYCODES
168 bool "Tosa keyboard: use extended keycodes"
169 depends on KEYBOARD_TOSA
170 default n
171 help
172 Say Y here to enable the tosa keyboard driver to generate extended
173 (>= 127) keycodes. Be aware, that they can't be correctly interpreted
174 by either console keyboard driver or by Kdrive keybd driver.
175
176 Say Y only if you know, what you are doing!
177
157config KEYBOARD_AMIGA 178config KEYBOARD_AMIGA
158 tristate "Amiga keyboard" 179 tristate "Amiga keyboard"
159 depends on AMIGA 180 depends on AMIGA
@@ -239,13 +260,13 @@ config KEYBOARD_OMAP
239 module will be called omap-keypad. 260 module will be called omap-keypad.
240 261
241config KEYBOARD_PXA27x 262config KEYBOARD_PXA27x
242 tristate "PXA27x keyboard support" 263 tristate "PXA27x/PXA3xx keypad support"
243 depends on PXA27x 264 depends on PXA27x || PXA3xx
244 help 265 help
245 Enable support for PXA27x matrix keyboard controller 266 Enable support for PXA27x/PXA3xx keypad controller
246 267
247 To compile this driver as a module, choose M here: the 268 To compile this driver as a module, choose M here: the
248 module will be called pxa27x_keyboard. 269 module will be called pxa27x_keypad.
249 270
250config KEYBOARD_AAED2000 271config KEYBOARD_AAED2000
251 tristate "AAED-2000 keyboard" 272 tristate "AAED-2000 keyboard"
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index e97455fdcc83..e741f4031012 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -15,10 +15,11 @@ obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
15obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o 15obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
16obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o 16obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
17obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o 17obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
18obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
18obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o 19obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
19obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o 20obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
20obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o 21obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
21obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keyboard.o 22obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
22obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o 23obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
23obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o 24obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
24obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o 25obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index b39c5b31e620..4a95adc4cc78 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -19,7 +19,6 @@
19 19
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/interrupt.h> 23#include <linux/interrupt.h>
25#include <linux/init.h> 24#include <linux/init.h>
@@ -28,6 +27,7 @@
28#include <linux/workqueue.h> 27#include <linux/workqueue.h>
29#include <linux/libps2.h> 28#include <linux/libps2.h>
30#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/dmi.h>
31 31
32#define DRIVER_DESC "AT and PS/2 keyboard driver" 32#define DRIVER_DESC "AT and PS/2 keyboard driver"
33 33
@@ -201,6 +201,7 @@ struct atkbd {
201 201
202 unsigned short id; 202 unsigned short id;
203 unsigned char keycode[512]; 203 unsigned char keycode[512];
204 DECLARE_BITMAP(force_release_mask, 512);
204 unsigned char set; 205 unsigned char set;
205 unsigned char translated; 206 unsigned char translated;
206 unsigned char extra; 207 unsigned char extra;
@@ -225,6 +226,11 @@ struct atkbd {
225 unsigned long event_mask; 226 unsigned long event_mask;
226}; 227};
227 228
229/*
230 * System-specific ketymap fixup routine
231 */
232static void (*atkbd_platform_fixup)(struct atkbd *);
233
228static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, 234static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
229 ssize_t (*handler)(struct atkbd *, char *)); 235 ssize_t (*handler)(struct atkbd *, char *));
230static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, 236static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
@@ -349,7 +355,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
349 struct atkbd *atkbd = serio_get_drvdata(serio); 355 struct atkbd *atkbd = serio_get_drvdata(serio);
350 struct input_dev *dev = atkbd->dev; 356 struct input_dev *dev = atkbd->dev;
351 unsigned int code = data; 357 unsigned int code = data;
352 int scroll = 0, hscroll = 0, click = -1, add_release_event = 0; 358 int scroll = 0, hscroll = 0, click = -1;
353 int value; 359 int value;
354 unsigned char keycode; 360 unsigned char keycode;
355 361
@@ -414,14 +420,6 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
414 "Some program might be trying access hardware directly.\n", 420 "Some program might be trying access hardware directly.\n",
415 data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys); 421 data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
416 goto out; 422 goto out;
417 case ATKBD_RET_HANGEUL:
418 case ATKBD_RET_HANJA:
419 /*
420 * These keys do not report release and thus need to be
421 * flagged properly
422 */
423 add_release_event = 1;
424 break;
425 case ATKBD_RET_ERR: 423 case ATKBD_RET_ERR:
426 atkbd->err_count++; 424 atkbd->err_count++;
427#ifdef ATKBD_DEBUG 425#ifdef ATKBD_DEBUG
@@ -491,7 +489,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
491 input_event(dev, EV_KEY, keycode, value); 489 input_event(dev, EV_KEY, keycode, value);
492 input_sync(dev); 490 input_sync(dev);
493 491
494 if (value && add_release_event) { 492 if (value && test_bit(code, atkbd->force_release_mask)) {
495 input_report_key(dev, keycode, 0); 493 input_report_key(dev, keycode, 0);
496 input_sync(dev); 494 input_sync(dev);
497 } 495 }
@@ -824,7 +822,6 @@ static void atkbd_disconnect(struct serio *serio)
824 atkbd_disable(atkbd); 822 atkbd_disable(atkbd);
825 823
826 /* make sure we don't have a command in flight */ 824 /* make sure we don't have a command in flight */
827 synchronize_sched(); /* Allow atkbd_interrupt()s to complete. */
828 flush_scheduled_work(); 825 flush_scheduled_work();
829 826
830 sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group); 827 sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
@@ -834,6 +831,22 @@ static void atkbd_disconnect(struct serio *serio)
834 kfree(atkbd); 831 kfree(atkbd);
835} 832}
836 833
834/*
835 * Most special keys (Fn+F?) on Dell Latitudes do not generate release
836 * events so we have to do it ourselves.
837 */
838static void atkbd_latitude_keymap_fixup(struct atkbd *atkbd)
839{
840 const unsigned int forced_release_keys[] = {
841 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93,
842 };
843 int i;
844
845 if (atkbd->set == 2)
846 for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
847 __set_bit(forced_release_keys[i],
848 atkbd->force_release_mask);
849}
837 850
838/* 851/*
839 * atkbd_set_keycode_table() initializes keyboard's keycode table 852 * atkbd_set_keycode_table() initializes keyboard's keycode table
@@ -842,17 +855,20 @@ static void atkbd_disconnect(struct serio *serio)
842 855
843static void atkbd_set_keycode_table(struct atkbd *atkbd) 856static void atkbd_set_keycode_table(struct atkbd *atkbd)
844{ 857{
858 unsigned int scancode;
845 int i, j; 859 int i, j;
846 860
847 memset(atkbd->keycode, 0, sizeof(atkbd->keycode)); 861 memset(atkbd->keycode, 0, sizeof(atkbd->keycode));
862 bitmap_zero(atkbd->force_release_mask, 512);
848 863
849 if (atkbd->translated) { 864 if (atkbd->translated) {
850 for (i = 0; i < 128; i++) { 865 for (i = 0; i < 128; i++) {
851 atkbd->keycode[i] = atkbd_set2_keycode[atkbd_unxlate_table[i]]; 866 scancode = atkbd_unxlate_table[i];
852 atkbd->keycode[i | 0x80] = atkbd_set2_keycode[atkbd_unxlate_table[i] | 0x80]; 867 atkbd->keycode[i] = atkbd_set2_keycode[scancode];
868 atkbd->keycode[i | 0x80] = atkbd_set2_keycode[scancode | 0x80];
853 if (atkbd->scroll) 869 if (atkbd->scroll)
854 for (j = 0; j < ARRAY_SIZE(atkbd_scroll_keys); j++) 870 for (j = 0; j < ARRAY_SIZE(atkbd_scroll_keys); j++)
855 if ((atkbd_unxlate_table[i] | 0x80) == atkbd_scroll_keys[j].set2) 871 if ((scancode | 0x80) == atkbd_scroll_keys[j].set2)
856 atkbd->keycode[i | 0x80] = atkbd_scroll_keys[j].keycode; 872 atkbd->keycode[i | 0x80] = atkbd_scroll_keys[j].keycode;
857 } 873 }
858 } else if (atkbd->set == 3) { 874 } else if (atkbd->set == 3) {
@@ -861,12 +877,29 @@ static void atkbd_set_keycode_table(struct atkbd *atkbd)
861 memcpy(atkbd->keycode, atkbd_set2_keycode, sizeof(atkbd->keycode)); 877 memcpy(atkbd->keycode, atkbd_set2_keycode, sizeof(atkbd->keycode));
862 878
863 if (atkbd->scroll) 879 if (atkbd->scroll)
864 for (i = 0; i < ARRAY_SIZE(atkbd_scroll_keys); i++) 880 for (i = 0; i < ARRAY_SIZE(atkbd_scroll_keys); i++) {
865 atkbd->keycode[atkbd_scroll_keys[i].set2] = atkbd_scroll_keys[i].keycode; 881 scancode = atkbd_scroll_keys[i].set2;
882 atkbd->keycode[scancode] = atkbd_scroll_keys[i].keycode;
883 }
866 } 884 }
867 885
868 atkbd->keycode[atkbd_compat_scancode(atkbd, ATKBD_RET_HANGEUL)] = KEY_HANGUEL; 886/*
869 atkbd->keycode[atkbd_compat_scancode(atkbd, ATKBD_RET_HANJA)] = KEY_HANJA; 887 * HANGEUL and HANJA keys do not send release events so we need to
888 * generate such events ourselves
889 */
890 scancode = atkbd_compat_scancode(atkbd, ATKBD_RET_HANGEUL);
891 atkbd->keycode[scancode] = KEY_HANGEUL;
892 __set_bit(scancode, atkbd->force_release_mask);
893
894 scancode = atkbd_compat_scancode(atkbd, ATKBD_RET_HANJA);
895 atkbd->keycode[scancode] = KEY_HANJA;
896 __set_bit(scancode, atkbd->force_release_mask);
897
898/*
899 * Perform additional fixups
900 */
901 if (atkbd_platform_fixup)
902 atkbd_platform_fixup(atkbd);
870} 903}
871 904
872/* 905/*
@@ -1401,9 +1434,29 @@ static ssize_t atkbd_show_err_count(struct atkbd *atkbd, char *buf)
1401 return sprintf(buf, "%lu\n", atkbd->err_count); 1434 return sprintf(buf, "%lu\n", atkbd->err_count);
1402} 1435}
1403 1436
1437static int __init atkbd_setup_fixup(const struct dmi_system_id *id)
1438{
1439 atkbd_platform_fixup = id->driver_data;
1440 return 0;
1441}
1442
1443static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1444 {
1445 .ident = "Dell Latitude series",
1446 .matches = {
1447 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1448 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude"),
1449 },
1450 .callback = atkbd_setup_fixup,
1451 .driver_data = atkbd_latitude_keymap_fixup,
1452 },
1453 { }
1454};
1404 1455
1405static int __init atkbd_init(void) 1456static int __init atkbd_init(void)
1406{ 1457{
1458 dmi_check_system(atkbd_dmi_quirk_table);
1459
1407 return serio_register_driver(&atkbd_drv); 1460 return serio_register_driver(&atkbd_drv);
1408} 1461}
1409 1462
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 1b08f4e79dd2..32e2c2605d95 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -64,7 +64,6 @@
64#include <linux/delay.h> 64#include <linux/delay.h>
65#include <linux/slab.h> 65#include <linux/slab.h>
66#include <linux/module.h> 66#include <linux/module.h>
67#include <linux/moduleparam.h>
68#include <linux/interrupt.h> 67#include <linux/interrupt.h>
69#include <linux/init.h> 68#include <linux/init.h>
70#include <linux/input.h> 69#include <linux/input.h>
diff --git a/drivers/input/keyboard/pxa27x_keyboard.c b/drivers/input/keyboard/pxa27x_keyboard.c
deleted file mode 100644
index bdd64ee4c5c8..000000000000
--- a/drivers/input/keyboard/pxa27x_keyboard.c
+++ /dev/null
@@ -1,274 +0,0 @@
1/*
2 * linux/drivers/input/keyboard/pxa27x_keyboard.c
3 *
4 * Driver for the pxa27x matrix keyboard controller.
5 *
6 * Created: Feb 22, 2007
7 * Author: Rodolfo Giometti <giometti@linux.it>
8 *
9 * Based on a previous implementations by Kevin O'Connor
10 * <kevin_at_koconnor.net> and Alex Osborne <bobofdoom@gmail.com> and
11 * on some suggestions by Nicolas Pitre <nico@cam.org>.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/input.h>
24#include <linux/device.h>
25#include <linux/platform_device.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28
29#include <asm/mach-types.h>
30#include <asm/mach/arch.h>
31#include <asm/mach/map.h>
32
33#include <asm/arch/hardware.h>
34#include <asm/arch/pxa-regs.h>
35#include <asm/arch/irqs.h>
36#include <asm/arch/pxa27x_keyboard.h>
37
38#define DRIVER_NAME "pxa27x-keyboard"
39
40#define KPASMKP(col) (col/2 == 0 ? KPASMKP0 : \
41 col/2 == 1 ? KPASMKP1 : \
42 col/2 == 2 ? KPASMKP2 : KPASMKP3)
43#define KPASMKPx_MKC(row, col) (1 << (row + 16 * (col % 2)))
44
45static struct clk *pxakbd_clk;
46
47static irqreturn_t pxakbd_irq_handler(int irq, void *dev_id)
48{
49 struct platform_device *pdev = dev_id;
50 struct pxa27x_keyboard_platform_data *pdata = pdev->dev.platform_data;
51 struct input_dev *input_dev = platform_get_drvdata(pdev);
52 unsigned long kpc = KPC;
53 int p, row, col, rel;
54
55 if (kpc & KPC_DI) {
56 unsigned long kpdk = KPDK;
57
58 if (!(kpdk & KPDK_DKP)) {
59 /* better luck next time */
60 } else if (kpc & KPC_REE0) {
61 unsigned long kprec = KPREC;
62 KPREC = 0x7f;
63
64 if (kprec & KPREC_OF0)
65 rel = (kprec & 0xff) + 0x7f;
66 else if (kprec & KPREC_UF0)
67 rel = (kprec & 0xff) - 0x7f - 0xff;
68 else
69 rel = (kprec & 0xff) - 0x7f;
70
71 if (rel) {
72 input_report_rel(input_dev, REL_WHEEL, rel);
73 input_sync(input_dev);
74 }
75 }
76 }
77
78 if (kpc & KPC_MI) {
79 /* report the status of every button */
80 for (row = 0; row < pdata->nr_rows; row++) {
81 for (col = 0; col < pdata->nr_cols; col++) {
82 p = KPASMKP(col) & KPASMKPx_MKC(row, col) ?
83 1 : 0;
84 pr_debug("keycode %x - pressed %x\n",
85 pdata->keycodes[row][col], p);
86 input_report_key(input_dev,
87 pdata->keycodes[row][col], p);
88 }
89 }
90 input_sync(input_dev);
91 }
92
93 return IRQ_HANDLED;
94}
95
96static int pxakbd_open(struct input_dev *dev)
97{
98 /* Set keypad control register */
99 KPC |= (KPC_ASACT |
100 KPC_MS_ALL |
101 (2 << 6) | KPC_REE0 | KPC_DK_DEB_SEL |
102 KPC_ME | KPC_MIE | KPC_DE | KPC_DIE);
103
104 KPC &= ~KPC_AS; /* disable automatic scan */
105 KPC &= ~KPC_IMKP; /* do not ignore multiple keypresses */
106
107 /* Set rotary count to mid-point value */
108 KPREC = 0x7F;
109
110 /* Enable unit clock */
111 clk_enable(pxakbd_clk);
112
113 return 0;
114}
115
116static void pxakbd_close(struct input_dev *dev)
117{
118 /* Disable clock unit */
119 clk_disable(pxakbd_clk);
120}
121
122#ifdef CONFIG_PM
123static int pxakbd_suspend(struct platform_device *pdev, pm_message_t state)
124{
125 struct pxa27x_keyboard_platform_data *pdata = pdev->dev.platform_data;
126
127 /* Save controller status */
128 pdata->reg_kpc = KPC;
129 pdata->reg_kprec = KPREC;
130
131 return 0;
132}
133
134static int pxakbd_resume(struct platform_device *pdev)
135{
136 struct pxa27x_keyboard_platform_data *pdata = pdev->dev.platform_data;
137 struct input_dev *input_dev = platform_get_drvdata(pdev);
138
139 mutex_lock(&input_dev->mutex);
140
141 if (input_dev->users) {
142 /* Restore controller status */
143 KPC = pdata->reg_kpc;
144 KPREC = pdata->reg_kprec;
145
146 /* Enable unit clock */
147 clk_disable(pxakbd_clk);
148 clk_enable(pxakbd_clk);
149 }
150
151 mutex_unlock(&input_dev->mutex);
152
153 return 0;
154}
155#else
156#define pxakbd_suspend NULL
157#define pxakbd_resume NULL
158#endif
159
160static int __devinit pxakbd_probe(struct platform_device *pdev)
161{
162 struct pxa27x_keyboard_platform_data *pdata = pdev->dev.platform_data;
163 struct input_dev *input_dev;
164 int i, row, col, error;
165
166 pxakbd_clk = clk_get(&pdev->dev, "KBDCLK");
167 if (IS_ERR(pxakbd_clk)) {
168 error = PTR_ERR(pxakbd_clk);
169 goto err_clk;
170 }
171
172 /* Create and register the input driver. */
173 input_dev = input_allocate_device();
174 if (!input_dev) {
175 printk(KERN_ERR "Cannot request keypad device\n");
176 error = -ENOMEM;
177 goto err_alloc;
178 }
179
180 input_dev->name = DRIVER_NAME;
181 input_dev->id.bustype = BUS_HOST;
182 input_dev->open = pxakbd_open;
183 input_dev->close = pxakbd_close;
184 input_dev->dev.parent = &pdev->dev;
185
186 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
187 BIT_MASK(EV_REL);
188 input_dev->relbit[BIT_WORD(REL_WHEEL)] = BIT_MASK(REL_WHEEL);
189 for (row = 0; row < pdata->nr_rows; row++) {
190 for (col = 0; col < pdata->nr_cols; col++) {
191 int code = pdata->keycodes[row][col];
192 if (code > 0)
193 set_bit(code, input_dev->keybit);
194 }
195 }
196
197 error = request_irq(IRQ_KEYPAD, pxakbd_irq_handler, IRQF_DISABLED,
198 DRIVER_NAME, pdev);
199 if (error) {
200 printk(KERN_ERR "Cannot request keypad IRQ\n");
201 goto err_free_dev;
202 }
203
204 platform_set_drvdata(pdev, input_dev);
205
206 /* Register the input device */
207 error = input_register_device(input_dev);
208 if (error)
209 goto err_free_irq;
210
211 /* Setup GPIOs. */
212 for (i = 0; i < pdata->nr_rows + pdata->nr_cols; i++)
213 pxa_gpio_mode(pdata->gpio_modes[i]);
214
215 /*
216 * Store rows/cols info into keyboard registers.
217 */
218
219 KPC |= (pdata->nr_rows - 1) << 26;
220 KPC |= (pdata->nr_cols - 1) << 23;
221
222 for (col = 0; col < pdata->nr_cols; col++)
223 KPC |= KPC_MS0 << col;
224
225 return 0;
226
227 err_free_irq:
228 platform_set_drvdata(pdev, NULL);
229 free_irq(IRQ_KEYPAD, pdev);
230 err_free_dev:
231 input_free_device(input_dev);
232 err_alloc:
233 clk_put(pxakbd_clk);
234 err_clk:
235 return error;
236}
237
238static int __devexit pxakbd_remove(struct platform_device *pdev)
239{
240 struct input_dev *input_dev = platform_get_drvdata(pdev);
241
242 input_unregister_device(input_dev);
243 free_irq(IRQ_KEYPAD, pdev);
244 clk_put(pxakbd_clk);
245 platform_set_drvdata(pdev, NULL);
246
247 return 0;
248}
249
250static struct platform_driver pxakbd_driver = {
251 .probe = pxakbd_probe,
252 .remove = __devexit_p(pxakbd_remove),
253 .suspend = pxakbd_suspend,
254 .resume = pxakbd_resume,
255 .driver = {
256 .name = DRIVER_NAME,
257 },
258};
259
260static int __init pxakbd_init(void)
261{
262 return platform_driver_register(&pxakbd_driver);
263}
264
265static void __exit pxakbd_exit(void)
266{
267 platform_driver_unregister(&pxakbd_driver);
268}
269
270module_init(pxakbd_init);
271module_exit(pxakbd_exit);
272
273MODULE_DESCRIPTION("PXA27x Matrix Keyboard Driver");
274MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
new file mode 100644
index 000000000000..6224c2fb3b65
--- /dev/null
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -0,0 +1,572 @@
1/*
2 * linux/drivers/input/keyboard/pxa27x_keypad.c
3 *
4 * Driver for the pxa27x matrix keyboard controller.
5 *
6 * Created: Feb 22, 2007
7 * Author: Rodolfo Giometti <giometti@linux.it>
8 *
9 * Based on a previous implementations by Kevin O'Connor
10 * <kevin_at_koconnor.net> and Alex Osborne <bobofdoom@gmail.com> and
11 * on some suggestions by Nicolas Pitre <nico@cam.org>.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/input.h>
24#include <linux/device.h>
25#include <linux/platform_device.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28
29#include <asm/mach-types.h>
30#include <asm/mach/arch.h>
31#include <asm/mach/map.h>
32
33#include <asm/arch/hardware.h>
34#include <asm/arch/pxa27x_keypad.h>
35
36/*
37 * Keypad Controller registers
38 */
39#define KPC 0x0000 /* Keypad Control register */
40#define KPDK 0x0008 /* Keypad Direct Key register */
41#define KPREC 0x0010 /* Keypad Rotary Encoder register */
42#define KPMK 0x0018 /* Keypad Matrix Key register */
43#define KPAS 0x0020 /* Keypad Automatic Scan register */
44
45/* Keypad Automatic Scan Multiple Key Presser register 0-3 */
46#define KPASMKP0 0x0028
47#define KPASMKP1 0x0030
48#define KPASMKP2 0x0038
49#define KPASMKP3 0x0040
50#define KPKDI 0x0048
51
52/* bit definitions */
53#define KPC_MKRN(n) ((((n) & 0x7) - 1) << 26) /* matrix key row number */
54#define KPC_MKCN(n) ((((n) & 0x7) - 1) << 23) /* matrix key column number */
55#define KPC_DKN(n) ((((n) & 0x7) - 1) << 6) /* direct key number */
56
57#define KPC_AS (0x1 << 30) /* Automatic Scan bit */
58#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */
59#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */
60#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */
61
62#define KPC_MS(n) (0x1 << (13 + (n))) /* Matrix scan line 'n' */
63#define KPC_MS_ALL (0xff << 13)
64
65#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */
66#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */
67#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */
68#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */
69#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */
70#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */
71#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */
72#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */
73#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */
74
75#define KPDK_DKP (0x1 << 31)
76#define KPDK_DK(n) ((n) & 0xff)
77
78#define KPREC_OF1 (0x1 << 31)
79#define kPREC_UF1 (0x1 << 30)
80#define KPREC_OF0 (0x1 << 15)
81#define KPREC_UF0 (0x1 << 14)
82
83#define KPREC_RECOUNT0(n) ((n) & 0xff)
84#define KPREC_RECOUNT1(n) (((n) >> 16) & 0xff)
85
86#define KPMK_MKP (0x1 << 31)
87#define KPAS_SO (0x1 << 31)
88#define KPASMKPx_SO (0x1 << 31)
89
90#define KPAS_MUKP(n) (((n) >> 26) & 0x1f)
91#define KPAS_RP(n) (((n) >> 4) & 0xf)
92#define KPAS_CP(n) ((n) & 0xf)
93
94#define KPASMKP_MKC_MASK (0xff)
95
96#define keypad_readl(off) __raw_readl(keypad->mmio_base + (off))
97#define keypad_writel(off, v) __raw_writel((v), keypad->mmio_base + (off))
98
99#define MAX_MATRIX_KEY_NUM (8 * 8)
100
101struct pxa27x_keypad {
102 struct pxa27x_keypad_platform_data *pdata;
103
104 struct clk *clk;
105 struct input_dev *input_dev;
106 void __iomem *mmio_base;
107
108 /* matrix key code map */
109 unsigned int matrix_keycodes[MAX_MATRIX_KEY_NUM];
110
111 /* state row bits of each column scan */
112 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
113 uint32_t direct_key_state;
114
115 unsigned int direct_key_mask;
116
117 int rotary_rel_code[2];
118 int rotary_up_key[2];
119 int rotary_down_key[2];
120};
121
122static void pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
123{
124 struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
125 struct input_dev *input_dev = keypad->input_dev;
126 unsigned int *key;
127 int i;
128
129 key = &pdata->matrix_key_map[0];
130 for (i = 0; i < pdata->matrix_key_map_size; i++, key++) {
131 int row = ((*key) >> 28) & 0xf;
132 int col = ((*key) >> 24) & 0xf;
133 int code = (*key) & 0xffffff;
134
135 keypad->matrix_keycodes[(row << 3) + col] = code;
136 set_bit(code, input_dev->keybit);
137 }
138
139 keypad->rotary_up_key[0] = pdata->rotary0_up_key;
140 keypad->rotary_up_key[1] = pdata->rotary1_up_key;
141 keypad->rotary_down_key[0] = pdata->rotary0_down_key;
142 keypad->rotary_down_key[1] = pdata->rotary1_down_key;
143 keypad->rotary_rel_code[0] = pdata->rotary0_rel_code;
144 keypad->rotary_rel_code[1] = pdata->rotary1_rel_code;
145
146 if (pdata->rotary0_up_key && pdata->rotary0_down_key) {
147 set_bit(pdata->rotary0_up_key, input_dev->keybit);
148 set_bit(pdata->rotary0_down_key, input_dev->keybit);
149 } else
150 set_bit(pdata->rotary0_rel_code, input_dev->relbit);
151
152 if (pdata->rotary1_up_key && pdata->rotary1_down_key) {
153 set_bit(pdata->rotary1_up_key, input_dev->keybit);
154 set_bit(pdata->rotary1_down_key, input_dev->keybit);
155 } else
156 set_bit(pdata->rotary1_rel_code, input_dev->relbit);
157}
158
159static inline unsigned int lookup_matrix_keycode(
160 struct pxa27x_keypad *keypad, int row, int col)
161{
162 return keypad->matrix_keycodes[(row << 3) + col];
163}
164
165static void pxa27x_keypad_scan_matrix(struct pxa27x_keypad *keypad)
166{
167 struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
168 int row, col, num_keys_pressed = 0;
169 uint32_t new_state[MAX_MATRIX_KEY_COLS];
170 uint32_t kpas = keypad_readl(KPAS);
171
172 num_keys_pressed = KPAS_MUKP(kpas);
173
174 memset(new_state, 0, sizeof(new_state));
175
176 if (num_keys_pressed == 0)
177 goto scan;
178
179 if (num_keys_pressed == 1) {
180 col = KPAS_CP(kpas);
181 row = KPAS_RP(kpas);
182
183 /* if invalid row/col, treat as no key pressed */
184 if (col >= pdata->matrix_key_cols ||
185 row >= pdata->matrix_key_rows)
186 goto scan;
187
188 new_state[col] = (1 << row);
189 goto scan;
190 }
191
192 if (num_keys_pressed > 1) {
193 uint32_t kpasmkp0 = keypad_readl(KPASMKP0);
194 uint32_t kpasmkp1 = keypad_readl(KPASMKP1);
195 uint32_t kpasmkp2 = keypad_readl(KPASMKP2);
196 uint32_t kpasmkp3 = keypad_readl(KPASMKP3);
197
198 new_state[0] = kpasmkp0 & KPASMKP_MKC_MASK;
199 new_state[1] = (kpasmkp0 >> 16) & KPASMKP_MKC_MASK;
200 new_state[2] = kpasmkp1 & KPASMKP_MKC_MASK;
201 new_state[3] = (kpasmkp1 >> 16) & KPASMKP_MKC_MASK;
202 new_state[4] = kpasmkp2 & KPASMKP_MKC_MASK;
203 new_state[5] = (kpasmkp2 >> 16) & KPASMKP_MKC_MASK;
204 new_state[6] = kpasmkp3 & KPASMKP_MKC_MASK;
205 new_state[7] = (kpasmkp3 >> 16) & KPASMKP_MKC_MASK;
206 }
207scan:
208 for (col = 0; col < pdata->matrix_key_cols; col++) {
209 uint32_t bits_changed;
210
211 bits_changed = keypad->matrix_key_state[col] ^ new_state[col];
212 if (bits_changed == 0)
213 continue;
214
215 for (row = 0; row < pdata->matrix_key_rows; row++) {
216 if ((bits_changed & (1 << row)) == 0)
217 continue;
218
219 input_report_key(keypad->input_dev,
220 lookup_matrix_keycode(keypad, row, col),
221 new_state[col] & (1 << row));
222 }
223 }
224 input_sync(keypad->input_dev);
225 memcpy(keypad->matrix_key_state, new_state, sizeof(new_state));
226}
227
228#define DEFAULT_KPREC (0x007f007f)
229
230static inline int rotary_delta(uint32_t kprec)
231{
232 if (kprec & KPREC_OF0)
233 return (kprec & 0xff) + 0x7f;
234 else if (kprec & KPREC_UF0)
235 return (kprec & 0xff) - 0x7f - 0xff;
236 else
237 return (kprec & 0xff) - 0x7f;
238}
239
240static void report_rotary_event(struct pxa27x_keypad *keypad, int r, int delta)
241{
242 struct input_dev *dev = keypad->input_dev;
243
244 if (delta == 0)
245 return;
246
247 if (keypad->rotary_up_key[r] && keypad->rotary_down_key[r]) {
248 int keycode = (delta > 0) ? keypad->rotary_up_key[r] :
249 keypad->rotary_down_key[r];
250
251 /* simulate a press-n-release */
252 input_report_key(dev, keycode, 1);
253 input_sync(dev);
254 input_report_key(dev, keycode, 0);
255 input_sync(dev);
256 } else {
257 input_report_rel(dev, keypad->rotary_rel_code[r], delta);
258 input_sync(dev);
259 }
260}
261
262static void pxa27x_keypad_scan_rotary(struct pxa27x_keypad *keypad)
263{
264 struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
265 uint32_t kprec;
266
267 /* read and reset to default count value */
268 kprec = keypad_readl(KPREC);
269 keypad_writel(KPREC, DEFAULT_KPREC);
270
271 if (pdata->enable_rotary0)
272 report_rotary_event(keypad, 0, rotary_delta(kprec));
273
274 if (pdata->enable_rotary1)
275 report_rotary_event(keypad, 1, rotary_delta(kprec >> 16));
276}
277
278static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
279{
280 struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
281 unsigned int new_state;
282 uint32_t kpdk, bits_changed;
283 int i;
284
285 kpdk = keypad_readl(KPDK);
286
287 if (pdata->enable_rotary0 || pdata->enable_rotary1)
288 pxa27x_keypad_scan_rotary(keypad);
289
290 if (pdata->direct_key_map == NULL)
291 return;
292
293 new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
294 bits_changed = keypad->direct_key_state ^ new_state;
295
296 if (bits_changed == 0)
297 return;
298
299 for (i = 0; i < pdata->direct_key_num; i++) {
300 if (bits_changed & (1 << i))
301 input_report_key(keypad->input_dev,
302 pdata->direct_key_map[i],
303 (new_state & (1 << i)));
304 }
305 input_sync(keypad->input_dev);
306 keypad->direct_key_state = new_state;
307}
308
309static irqreturn_t pxa27x_keypad_irq_handler(int irq, void *dev_id)
310{
311 struct pxa27x_keypad *keypad = dev_id;
312 unsigned long kpc = keypad_readl(KPC);
313
314 if (kpc & KPC_DI)
315 pxa27x_keypad_scan_direct(keypad);
316
317 if (kpc & KPC_MI)
318 pxa27x_keypad_scan_matrix(keypad);
319
320 return IRQ_HANDLED;
321}
322
323static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
324{
325 struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
326 unsigned int mask = 0, direct_key_num = 0;
327 unsigned long kpc = 0;
328
329 /* enable matrix keys with automatic scan */
330 if (pdata->matrix_key_rows && pdata->matrix_key_cols) {
331 kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
332 kpc |= KPC_MKRN(pdata->matrix_key_rows) |
333 KPC_MKCN(pdata->matrix_key_cols);
334 }
335
336 /* enable rotary key, debounce interval same as direct keys */
337 if (pdata->enable_rotary0) {
338 mask |= 0x03;
339 direct_key_num = 2;
340 kpc |= KPC_REE0;
341 }
342
343 if (pdata->enable_rotary1) {
344 mask |= 0x0c;
345 direct_key_num = 4;
346 kpc |= KPC_REE1;
347 }
348
349 if (pdata->direct_key_num > direct_key_num)
350 direct_key_num = pdata->direct_key_num;
351
352 keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
353
354 /* enable direct key */
355 if (direct_key_num)
356 kpc |= KPC_DE | KPC_DIE | KPC_DKN(direct_key_num);
357
358 keypad_writel(KPC, kpc | KPC_RE_ZERO_DEB);
359 keypad_writel(KPREC, DEFAULT_KPREC);
360 keypad_writel(KPKDI, pdata->debounce_interval);
361}
362
363static int pxa27x_keypad_open(struct input_dev *dev)
364{
365 struct pxa27x_keypad *keypad = input_get_drvdata(dev);
366
367 /* Enable unit clock */
368 clk_enable(keypad->clk);
369 pxa27x_keypad_config(keypad);
370
371 return 0;
372}
373
374static void pxa27x_keypad_close(struct input_dev *dev)
375{
376 struct pxa27x_keypad *keypad = input_get_drvdata(dev);
377
378 /* Disable clock unit */
379 clk_disable(keypad->clk);
380}
381
382#ifdef CONFIG_PM
383static int pxa27x_keypad_suspend(struct platform_device *pdev, pm_message_t state)
384{
385 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
386
387 clk_disable(keypad->clk);
388 return 0;
389}
390
391static int pxa27x_keypad_resume(struct platform_device *pdev)
392{
393 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
394 struct input_dev *input_dev = keypad->input_dev;
395
396 mutex_lock(&input_dev->mutex);
397
398 if (input_dev->users) {
399 /* Enable unit clock */
400 clk_enable(keypad->clk);
401 pxa27x_keypad_config(keypad);
402 }
403
404 mutex_unlock(&input_dev->mutex);
405
406 return 0;
407}
408#else
409#define pxa27x_keypad_suspend NULL
410#define pxa27x_keypad_resume NULL
411#endif
412
413#define res_size(res) ((res)->end - (res)->start + 1)
414
415static int __devinit pxa27x_keypad_probe(struct platform_device *pdev)
416{
417 struct pxa27x_keypad *keypad;
418 struct input_dev *input_dev;
419 struct resource *res;
420 int irq, error;
421
422 keypad = kzalloc(sizeof(struct pxa27x_keypad), GFP_KERNEL);
423 if (keypad == NULL) {
424 dev_err(&pdev->dev, "failed to allocate driver data\n");
425 return -ENOMEM;
426 }
427
428 keypad->pdata = pdev->dev.platform_data;
429 if (keypad->pdata == NULL) {
430 dev_err(&pdev->dev, "no platform data defined\n");
431 error = -EINVAL;
432 goto failed_free;
433 }
434
435 irq = platform_get_irq(pdev, 0);
436 if (irq < 0) {
437 dev_err(&pdev->dev, "failed to get keypad irq\n");
438 error = -ENXIO;
439 goto failed_free;
440 }
441
442 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
443 if (res == NULL) {
444 dev_err(&pdev->dev, "failed to get I/O memory\n");
445 error = -ENXIO;
446 goto failed_free;
447 }
448
449 res = request_mem_region(res->start, res_size(res), pdev->name);
450 if (res == NULL) {
451 dev_err(&pdev->dev, "failed to request I/O memory\n");
452 error = -EBUSY;
453 goto failed_free;
454 }
455
456 keypad->mmio_base = ioremap(res->start, res_size(res));
457 if (keypad->mmio_base == NULL) {
458 dev_err(&pdev->dev, "failed to remap I/O memory\n");
459 error = -ENXIO;
460 goto failed_free_mem;
461 }
462
463 keypad->clk = clk_get(&pdev->dev, "KBDCLK");
464 if (IS_ERR(keypad->clk)) {
465 dev_err(&pdev->dev, "failed to get keypad clock\n");
466 error = PTR_ERR(keypad->clk);
467 goto failed_free_io;
468 }
469
470 /* Create and register the input driver. */
471 input_dev = input_allocate_device();
472 if (!input_dev) {
473 dev_err(&pdev->dev, "failed to allocate input device\n");
474 error = -ENOMEM;
475 goto failed_put_clk;
476 }
477
478 input_dev->name = pdev->name;
479 input_dev->id.bustype = BUS_HOST;
480 input_dev->open = pxa27x_keypad_open;
481 input_dev->close = pxa27x_keypad_close;
482 input_dev->dev.parent = &pdev->dev;
483
484 keypad->input_dev = input_dev;
485 input_set_drvdata(input_dev, keypad);
486
487 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
488 BIT_MASK(EV_REL);
489
490 pxa27x_keypad_build_keycode(keypad);
491 platform_set_drvdata(pdev, keypad);
492
493 error = request_irq(irq, pxa27x_keypad_irq_handler, IRQF_DISABLED,
494 pdev->name, keypad);
495 if (error) {
496 dev_err(&pdev->dev, "failed to request IRQ\n");
497 goto failed_free_dev;
498 }
499
500 /* Register the input device */
501 error = input_register_device(input_dev);
502 if (error) {
503 dev_err(&pdev->dev, "failed to register input device\n");
504 goto failed_free_irq;
505 }
506
507 return 0;
508
509failed_free_irq:
510 free_irq(irq, pdev);
511 platform_set_drvdata(pdev, NULL);
512failed_free_dev:
513 input_free_device(input_dev);
514failed_put_clk:
515 clk_put(keypad->clk);
516failed_free_io:
517 iounmap(keypad->mmio_base);
518failed_free_mem:
519 release_mem_region(res->start, res_size(res));
520failed_free:
521 kfree(keypad);
522 return error;
523}
524
525static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
526{
527 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
528 struct resource *res;
529
530 free_irq(platform_get_irq(pdev, 0), pdev);
531
532 clk_disable(keypad->clk);
533 clk_put(keypad->clk);
534
535 input_unregister_device(keypad->input_dev);
536 input_free_device(keypad->input_dev);
537
538 iounmap(keypad->mmio_base);
539
540 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
541 release_mem_region(res->start, res_size(res));
542
543 platform_set_drvdata(pdev, NULL);
544 kfree(keypad);
545 return 0;
546}
547
548static struct platform_driver pxa27x_keypad_driver = {
549 .probe = pxa27x_keypad_probe,
550 .remove = __devexit_p(pxa27x_keypad_remove),
551 .suspend = pxa27x_keypad_suspend,
552 .resume = pxa27x_keypad_resume,
553 .driver = {
554 .name = "pxa27x-keypad",
555 },
556};
557
558static int __init pxa27x_keypad_init(void)
559{
560 return platform_driver_register(&pxa27x_keypad_driver);
561}
562
563static void __exit pxa27x_keypad_exit(void)
564{
565 platform_driver_unregister(&pxa27x_keypad_driver);
566}
567
568module_init(pxa27x_keypad_init);
569module_exit(pxa27x_keypad_exit);
570
571MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
572MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
new file mode 100644
index 000000000000..3884d1e3f070
--- /dev/null
+++ b/drivers/input/keyboard/tosakbd.c
@@ -0,0 +1,415 @@
1/*
2 * Keyboard driver for Sharp Tosa models (SL-6000x)
3 *
4 * Copyright (c) 2005 Dirk Opfer
5 * Copyright (c) 2007 Dmitry Baryshkov
6 *
7 * Based on xtkbd.c/locomkbd.c/corgikbd.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/input.h>
19#include <linux/delay.h>
20#include <linux/interrupt.h>
21
22#include <asm/arch/gpio.h>
23#include <asm/arch/tosa.h>
24
25#define KB_ROWMASK(r) (1 << (r))
26#define SCANCODE(r, c) (((r)<<4) + (c) + 1)
27#define NR_SCANCODES SCANCODE(TOSA_KEY_SENSE_NUM - 1, TOSA_KEY_STROBE_NUM - 1) + 1
28
29#define SCAN_INTERVAL (HZ/10)
30
31#define KB_DISCHARGE_DELAY 10
32#define KB_ACTIVATE_DELAY 10
33
34static unsigned int tosakbd_keycode[NR_SCANCODES] = {
350,
360, KEY_W, 0, 0, 0, KEY_K, KEY_BACKSPACE, KEY_P,
370, 0, 0, 0, 0, 0, 0, 0,
38KEY_Q, KEY_E, KEY_T, KEY_Y, 0, KEY_O, KEY_I, KEY_COMMA,
390, 0, 0, 0, 0, 0, 0, 0,
40KEY_A, KEY_D, KEY_G, KEY_U, 0, KEY_L, KEY_ENTER, KEY_DOT,
410, 0, 0, 0, 0, 0, 0, 0,
42KEY_Z, KEY_C, KEY_V, KEY_J, TOSA_KEY_ADDRESSBOOK, TOSA_KEY_CANCEL, TOSA_KEY_CENTER, TOSA_KEY_OK,
43KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, 0,
44KEY_S, KEY_R, KEY_B, KEY_N, TOSA_KEY_CALENDAR, TOSA_KEY_HOMEPAGE, KEY_LEFTCTRL, TOSA_KEY_LIGHT,
450, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0,
46KEY_TAB, KEY_SLASH, KEY_H, KEY_M, TOSA_KEY_MENU, 0, KEY_UP, 0,
470, 0, TOSA_KEY_FN, 0, 0, 0, 0, 0,
48KEY_X, KEY_F, KEY_SPACE, KEY_APOSTROPHE, TOSA_KEY_MAIL, KEY_LEFT, KEY_DOWN, KEY_RIGHT,
490, 0, 0,
50};
51
52struct tosakbd {
53 unsigned int keycode[ARRAY_SIZE(tosakbd_keycode)];
54 struct input_dev *input;
55
56 spinlock_t lock; /* protect kbd scanning */
57 struct timer_list timer;
58};
59
60
61/* Helper functions for reading the keyboard matrix
62 * Note: We should really be using pxa_gpio_mode to alter GPDR but it
63 * requires a function call per GPIO bit which is excessive
64 * when we need to access 12 bits at once, multiple times.
65 * These functions must be called within local_irq_save()/local_irq_restore()
66 * or similar.
67 */
68#define GET_ROWS_STATUS(c) ((GPLR2 & TOSA_GPIO_ALL_SENSE_BIT) >> TOSA_GPIO_ALL_SENSE_RSHIFT)
69
70static inline void tosakbd_discharge_all(void)
71{
72 /* STROBE All HiZ */
73 GPCR1 = TOSA_GPIO_HIGH_STROBE_BIT;
74 GPDR1 &= ~TOSA_GPIO_HIGH_STROBE_BIT;
75 GPCR2 = TOSA_GPIO_LOW_STROBE_BIT;
76 GPDR2 &= ~TOSA_GPIO_LOW_STROBE_BIT;
77}
78
79static inline void tosakbd_activate_all(void)
80{
81 /* STROBE ALL -> High */
82 GPSR1 = TOSA_GPIO_HIGH_STROBE_BIT;
83 GPDR1 |= TOSA_GPIO_HIGH_STROBE_BIT;
84 GPSR2 = TOSA_GPIO_LOW_STROBE_BIT;
85 GPDR2 |= TOSA_GPIO_LOW_STROBE_BIT;
86
87 udelay(KB_DISCHARGE_DELAY);
88
89 /* STATE CLEAR */
90 GEDR2 |= TOSA_GPIO_ALL_SENSE_BIT;
91}
92
93static inline void tosakbd_activate_col(int col)
94{
95 if (col <= 5) {
96 /* STROBE col -> High, not col -> HiZ */
97 GPSR1 = TOSA_GPIO_STROBE_BIT(col);
98 GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
99 } else {
100 /* STROBE col -> High, not col -> HiZ */
101 GPSR2 = TOSA_GPIO_STROBE_BIT(col);
102 GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
103 }
104}
105
106static inline void tosakbd_reset_col(int col)
107{
108 if (col <= 5) {
109 /* STROBE col -> Low */
110 GPCR1 = TOSA_GPIO_STROBE_BIT(col);
111 /* STROBE col -> out, not col -> HiZ */
112 GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
113 } else {
114 /* STROBE col -> Low */
115 GPCR2 = TOSA_GPIO_STROBE_BIT(col);
116 /* STROBE col -> out, not col -> HiZ */
117 GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
118 }
119}
120/*
121 * The tosa keyboard only generates interrupts when a key is pressed.
122 * So when a key is pressed, we enable a timer. This timer scans the
123 * keyboard, and this is how we detect when the key is released.
124 */
125
126/* Scan the hardware keyboard and push any changes up through the input layer */
127static void tosakbd_scankeyboard(struct platform_device *dev)
128{
129 struct tosakbd *tosakbd = platform_get_drvdata(dev);
130 unsigned int row, col, rowd;
131 unsigned long flags;
132 unsigned int num_pressed = 0;
133
134 spin_lock_irqsave(&tosakbd->lock, flags);
135
136 for (col = 0; col < TOSA_KEY_STROBE_NUM; col++) {
137 /*
138 * Discharge the output driver capacitatance
139 * in the keyboard matrix. (Yes it is significant..)
140 */
141 tosakbd_discharge_all();
142 udelay(KB_DISCHARGE_DELAY);
143
144 tosakbd_activate_col(col);
145 udelay(KB_ACTIVATE_DELAY);
146
147 rowd = GET_ROWS_STATUS(col);
148
149 for (row = 0; row < TOSA_KEY_SENSE_NUM; row++) {
150 unsigned int scancode, pressed;
151 scancode = SCANCODE(row, col);
152 pressed = rowd & KB_ROWMASK(row);
153
154 if (pressed && !tosakbd->keycode[scancode])
155 dev_warn(&dev->dev,
156 "unhandled scancode: 0x%02x\n",
157 scancode);
158
159 input_report_key(tosakbd->input,
160 tosakbd->keycode[scancode],
161 pressed);
162 if (pressed)
163 num_pressed++;
164 }
165
166 tosakbd_reset_col(col);
167 }
168
169 tosakbd_activate_all();
170
171 input_sync(tosakbd->input);
172
173 /* if any keys are pressed, enable the timer */
174 if (num_pressed)
175 mod_timer(&tosakbd->timer, jiffies + SCAN_INTERVAL);
176
177 spin_unlock_irqrestore(&tosakbd->lock, flags);
178}
179
180/*
181 * tosa keyboard interrupt handler.
182 */
183static irqreturn_t tosakbd_interrupt(int irq, void *__dev)
184{
185 struct platform_device *dev = __dev;
186 struct tosakbd *tosakbd = platform_get_drvdata(dev);
187
188 if (!timer_pending(&tosakbd->timer)) {
189 /** wait chattering delay **/
190 udelay(20);
191 tosakbd_scankeyboard(dev);
192 }
193
194 return IRQ_HANDLED;
195}
196
197/*
198 * tosa timer checking for released keys
199 */
200static void tosakbd_timer_callback(unsigned long __dev)
201{
202 struct platform_device *dev = (struct platform_device *)__dev;
203 tosakbd_scankeyboard(dev);
204}
205
206#ifdef CONFIG_PM
207static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
208{
209 struct tosakbd *tosakbd = platform_get_drvdata(dev);
210
211 del_timer_sync(&tosakbd->timer);
212
213 return 0;
214}
215
216static int tosakbd_resume(struct platform_device *dev)
217{
218 tosakbd_scankeyboard(dev);
219
220 return 0;
221}
222#else
223#define tosakbd_suspend NULL
224#define tosakbd_resume NULL
225#endif
226
227static int __devinit tosakbd_probe(struct platform_device *pdev) {
228
229 int i;
230 struct tosakbd *tosakbd;
231 struct input_dev *input_dev;
232 int error;
233
234 tosakbd = kzalloc(sizeof(struct tosakbd), GFP_KERNEL);
235 if (!tosakbd)
236 return -ENOMEM;
237
238 input_dev = input_allocate_device();
239 if (!input_dev) {
240 kfree(tosakbd);
241 return -ENOMEM;
242 }
243
244 platform_set_drvdata(pdev, tosakbd);
245
246 spin_lock_init(&tosakbd->lock);
247
248 /* Init Keyboard rescan timer */
249 init_timer(&tosakbd->timer);
250 tosakbd->timer.function = tosakbd_timer_callback;
251 tosakbd->timer.data = (unsigned long) pdev;
252
253 tosakbd->input = input_dev;
254
255 input_set_drvdata(input_dev, tosakbd);
256 input_dev->name = "Tosa Keyboard";
257 input_dev->phys = "tosakbd/input0";
258 input_dev->dev.parent = &pdev->dev;
259
260 input_dev->id.bustype = BUS_HOST;
261 input_dev->id.vendor = 0x0001;
262 input_dev->id.product = 0x0001;
263 input_dev->id.version = 0x0100;
264
265 input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
266 input_dev->keycode = tosakbd->keycode;
267 input_dev->keycodesize = sizeof(unsigned int);
268 input_dev->keycodemax = ARRAY_SIZE(tosakbd_keycode);
269
270 memcpy(tosakbd->keycode, tosakbd_keycode, sizeof(tosakbd_keycode));
271
272 for (i = 0; i < ARRAY_SIZE(tosakbd_keycode); i++)
273 __set_bit(tosakbd->keycode[i], input_dev->keybit);
274 clear_bit(0, input_dev->keybit);
275
276 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
277 for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
278 int gpio = TOSA_GPIO_KEY_SENSE(i);
279 int irq;
280 error = gpio_request(gpio, "tosakbd");
281 if (error < 0) {
282 printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
283 " error %d\n", gpio, error);
284 goto fail;
285 }
286
287 error = gpio_direction_input(TOSA_GPIO_KEY_SENSE(i));
288 if (error < 0) {
289 printk(KERN_ERR "tosakbd: failed to configure input"
290 " direction for GPIO %d, error %d\n",
291 gpio, error);
292 gpio_free(gpio);
293 goto fail;
294 }
295
296 irq = gpio_to_irq(gpio);
297 if (irq < 0) {
298 error = irq;
299 printk(KERN_ERR "gpio-keys: Unable to get irq number"
300 " for GPIO %d, error %d\n",
301 gpio, error);
302 gpio_free(gpio);
303 goto fail;
304 }
305
306 error = request_irq(irq, tosakbd_interrupt,
307 IRQF_DISABLED | IRQF_TRIGGER_RISING,
308 "tosakbd", pdev);
309
310 if (error) {
311 printk("tosakbd: Can't get IRQ: %d: error %d!\n",
312 irq, error);
313 gpio_free(gpio);
314 goto fail;
315 }
316 }
317
318 /* Set Strobe lines as outputs - set high */
319 for (i = 0; i < TOSA_KEY_STROBE_NUM; i++) {
320 int gpio = TOSA_GPIO_KEY_STROBE(i);
321 error = gpio_request(gpio, "tosakbd");
322 if (error < 0) {
323 printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
324 " error %d\n", gpio, error);
325 goto fail2;
326 }
327
328 error = gpio_direction_output(gpio, 1);
329 if (error < 0) {
330 printk(KERN_ERR "tosakbd: failed to configure input"
331 " direction for GPIO %d, error %d\n",
332 gpio, error);
333 gpio_free(gpio);
334 goto fail;
335 }
336
337 }
338
339 error = input_register_device(input_dev);
340 if (error) {
341 printk(KERN_ERR "tosakbd: Unable to register input device, "
342 "error: %d\n", error);
343 goto fail;
344 }
345
346 printk(KERN_INFO "input: Tosa Keyboard Registered\n");
347
348 return 0;
349
350fail2:
351 while (--i >= 0)
352 gpio_free(TOSA_GPIO_KEY_STROBE(i));
353
354 i = TOSA_KEY_SENSE_NUM;
355fail:
356 while (--i >= 0) {
357 free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), pdev);
358 gpio_free(TOSA_GPIO_KEY_SENSE(i));
359 }
360
361 platform_set_drvdata(pdev, NULL);
362 input_free_device(input_dev);
363 kfree(tosakbd);
364
365 return error;
366}
367
368static int __devexit tosakbd_remove(struct platform_device *dev) {
369
370 int i;
371 struct tosakbd *tosakbd = platform_get_drvdata(dev);
372
373 for (i = 0; i < TOSA_KEY_STROBE_NUM; i++)
374 gpio_free(TOSA_GPIO_KEY_STROBE(i));
375
376 for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
377 free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), dev);
378 gpio_free(TOSA_GPIO_KEY_SENSE(i));
379 }
380
381 del_timer_sync(&tosakbd->timer);
382
383 input_unregister_device(tosakbd->input);
384
385 kfree(tosakbd);
386
387 return 0;
388}
389
390static struct platform_driver tosakbd_driver = {
391 .probe = tosakbd_probe,
392 .remove = __devexit_p(tosakbd_remove),
393 .suspend = tosakbd_suspend,
394 .resume = tosakbd_resume,
395 .driver = {
396 .name = "tosa-keyboard",
397 },
398};
399
400static int __devinit tosakbd_init(void)
401{
402 return platform_driver_register(&tosakbd_driver);
403}
404
405static void __exit tosakbd_exit(void)
406{
407 platform_driver_unregister(&tosakbd_driver);
408}
409
410module_init(tosakbd_init);
411module_exit(tosakbd_exit);
412
413MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
414MODULE_DESCRIPTION("Tosa Keyboard Driver");
415MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 8f5c7b90187d..8b10d9f23bef 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -40,6 +40,20 @@ config INPUT_M68K_BEEP
40 tristate "M68k Beeper support" 40 tristate "M68k Beeper support"
41 depends on M68K 41 depends on M68K
42 42
43config INPUT_APANEL
44 tristate "Fujitsu Lifebook Application Panel buttons"
45 depends on X86
46 select I2C_I801
47 select INPUT_POLLDEV
48 select CHECK_SIGNATURE
49 help
50 Say Y here for support of the Application Panel buttons, used on
51 Fujitsu Lifebook. These are attached to the mainboard through
52 an SMBus interface managed by the I2C Intel ICH (i801) driver.
53
54 To compile this driver as a module, choose M here: the module will
55 be called apanel.
56
43config INPUT_IXP4XX_BEEPER 57config INPUT_IXP4XX_BEEPER
44 tristate "IXP4XX Beeper support" 58 tristate "IXP4XX Beeper support"
45 depends on ARCH_IXP4XX 59 depends on ARCH_IXP4XX
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 3585b5038418..ebd39f291d25 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
18obj-$(CONFIG_INPUT_YEALINK) += yealink.o 18obj-$(CONFIG_INPUT_YEALINK) += yealink.o
19obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o 19obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
20obj-$(CONFIG_INPUT_UINPUT) += uinput.o 20obj-$(CONFIG_INPUT_UINPUT) += uinput.o
21obj-$(CONFIG_INPUT_APANEL) += apanel.o
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
new file mode 100644
index 000000000000..9531d8c7444f
--- /dev/null
+++ b/drivers/input/misc/apanel.c
@@ -0,0 +1,378 @@
1/*
2 * Fujitsu Lifebook Application Panel button drive
3 *
4 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
5 * Copyright (C) 2001-2003 Jochen Eisinger <jochen@penguin-breeder.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * Many Fujitsu Lifebook laptops have a small panel of buttons that are
12 * accessible via the i2c/smbus interface. This driver polls those
13 * buttons and generates input events.
14 *
15 * For more details see:
16 * http://apanel.sourceforge.net/tech.php
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/ioport.h>
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/input-polldev.h>
25#include <linux/i2c.h>
26#include <linux/workqueue.h>
27#include <linux/leds.h>
28
29#define APANEL_NAME "Fujitsu Application Panel"
30#define APANEL_VERSION "1.3.1"
31#define APANEL "apanel"
32
33/* How often we poll keys - msecs */
34#define POLL_INTERVAL_DEFAULT 1000
35
36/* Magic constants in BIOS that tell about buttons */
37enum apanel_devid {
38 APANEL_DEV_NONE = 0,
39 APANEL_DEV_APPBTN = 1,
40 APANEL_DEV_CDBTN = 2,
41 APANEL_DEV_LCD = 3,
42 APANEL_DEV_LED = 4,
43
44 APANEL_DEV_MAX,
45};
46
47enum apanel_chip {
48 CHIP_NONE = 0,
49 CHIP_OZ992C = 1,
50 CHIP_OZ163T = 2,
51 CHIP_OZ711M3 = 4,
52};
53
54/* Result of BIOS snooping/probing -- what features are supported */
55static enum apanel_chip device_chip[APANEL_DEV_MAX];
56
57#define MAX_PANEL_KEYS 12
58
59struct apanel {
60 struct input_polled_dev *ipdev;
61 struct i2c_client client;
62 unsigned short keymap[MAX_PANEL_KEYS];
63 u16 nkeys;
64 u16 led_bits;
65 struct work_struct led_work;
66 struct led_classdev mail_led;
67};
68
69
70static int apanel_probe(struct i2c_adapter *, int, int);
71
72/* for now, we only support one address */
73static unsigned short normal_i2c[] = {0, I2C_CLIENT_END};
74static unsigned short ignore = I2C_CLIENT_END;
75static struct i2c_client_address_data addr_data = {
76 .normal_i2c = normal_i2c,
77 .probe = &ignore,
78 .ignore = &ignore,
79};
80
81static void report_key(struct input_dev *input, unsigned keycode)
82{
83 pr_debug(APANEL ": report key %#x\n", keycode);
84 input_report_key(input, keycode, 1);
85 input_sync(input);
86
87 input_report_key(input, keycode, 0);
88 input_sync(input);
89}
90
91/* Poll for key changes
92 *
93 * Read Application keys via SMI
94 * A (0x4), B (0x8), Internet (0x2), Email (0x1).
95 *
96 * CD keys:
97 * Forward (0x100), Rewind (0x200), Stop (0x400), Pause (0x800)
98 */
99static void apanel_poll(struct input_polled_dev *ipdev)
100{
101 struct apanel *ap = ipdev->private;
102 struct input_dev *idev = ipdev->input;
103 u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
104 s32 data;
105 int i;
106
107 data = i2c_smbus_read_word_data(&ap->client, cmd);
108 if (data < 0)
109 return; /* ignore errors (due to ACPI??) */
110
111 /* write back to clear latch */
112 i2c_smbus_write_word_data(&ap->client, cmd, 0);
113
114 if (!data)
115 return;
116
117 dev_dbg(&idev->dev, APANEL ": data %#x\n", data);
118 for (i = 0; i < idev->keycodemax; i++)
119 if ((1u << i) & data)
120 report_key(idev, ap->keymap[i]);
121}
122
123/* Track state changes of LED */
124static void led_update(struct work_struct *work)
125{
126 struct apanel *ap = container_of(work, struct apanel, led_work);
127
128 i2c_smbus_write_word_data(&ap->client, 0x10, ap->led_bits);
129}
130
131static void mail_led_set(struct led_classdev *led,
132 enum led_brightness value)
133{
134 struct apanel *ap = container_of(led, struct apanel, mail_led);
135
136 if (value != LED_OFF)
137 ap->led_bits |= 0x8000;
138 else
139 ap->led_bits &= ~0x8000;
140
141 schedule_work(&ap->led_work);
142}
143
144static int apanel_detach_client(struct i2c_client *client)
145{
146 struct apanel *ap = i2c_get_clientdata(client);
147
148 if (device_chip[APANEL_DEV_LED] != CHIP_NONE)
149 led_classdev_unregister(&ap->mail_led);
150
151 input_unregister_polled_device(ap->ipdev);
152 i2c_detach_client(&ap->client);
153 input_free_polled_device(ap->ipdev);
154
155 return 0;
156}
157
158/* Function is invoked for every i2c adapter. */
159static int apanel_attach_adapter(struct i2c_adapter *adap)
160{
161 dev_dbg(&adap->dev, APANEL ": attach adapter id=%d\n", adap->id);
162
163 /* Our device is connected only to i801 on laptop */
164 if (adap->id != I2C_HW_SMBUS_I801)
165 return -ENODEV;
166
167 return i2c_probe(adap, &addr_data, apanel_probe);
168}
169
170static void apanel_shutdown(struct i2c_client *client)
171{
172 apanel_detach_client(client);
173}
174
175static struct i2c_driver apanel_driver = {
176 .driver = {
177 .name = APANEL,
178 },
179 .attach_adapter = &apanel_attach_adapter,
180 .detach_client = &apanel_detach_client,
181 .shutdown = &apanel_shutdown,
182};
183
184static struct apanel apanel = {
185 .client = {
186 .driver = &apanel_driver,
187 .name = APANEL,
188 },
189 .keymap = {
190 [0] = KEY_MAIL,
191 [1] = KEY_WWW,
192 [2] = KEY_PROG2,
193 [3] = KEY_PROG1,
194
195 [8] = KEY_FORWARD,
196 [9] = KEY_REWIND,
197 [10] = KEY_STOPCD,
198 [11] = KEY_PLAYPAUSE,
199
200 },
201 .mail_led = {
202 .name = "mail:blue",
203 .brightness_set = mail_led_set,
204 },
205};
206
207/* NB: Only one panel on the i2c. */
208static int apanel_probe(struct i2c_adapter *bus, int address, int kind)
209{
210 struct apanel *ap;
211 struct input_polled_dev *ipdev;
212 struct input_dev *idev;
213 u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
214 int i, err = -ENOMEM;
215
216 dev_dbg(&bus->dev, APANEL ": probe adapter %p addr %d kind %d\n",
217 bus, address, kind);
218
219 ap = &apanel;
220
221 ipdev = input_allocate_polled_device();
222 if (!ipdev)
223 goto out1;
224
225 ap->ipdev = ipdev;
226 ap->client.adapter = bus;
227 ap->client.addr = address;
228
229 i2c_set_clientdata(&ap->client, ap);
230
231 err = i2c_attach_client(&ap->client);
232 if (err)
233 goto out2;
234
235 err = i2c_smbus_write_word_data(&ap->client, cmd, 0);
236 if (err) {
237 dev_warn(&ap->client.dev, APANEL ": smbus write error %d\n",
238 err);
239 goto out3;
240 }
241
242 ipdev->poll = apanel_poll;
243 ipdev->poll_interval = POLL_INTERVAL_DEFAULT;
244 ipdev->private = ap;
245
246 idev = ipdev->input;
247 idev->name = APANEL_NAME " buttons";
248 idev->phys = "apanel/input0";
249 idev->id.bustype = BUS_HOST;
250 idev->dev.parent = &ap->client.dev;
251
252 set_bit(EV_KEY, idev->evbit);
253
254 idev->keycode = ap->keymap;
255 idev->keycodesize = sizeof(ap->keymap[0]);
256 idev->keycodemax = (device_chip[APANEL_DEV_CDBTN] != CHIP_NONE) ? 12 : 4;
257
258 for (i = 0; i < idev->keycodemax; i++)
259 if (ap->keymap[i])
260 set_bit(ap->keymap[i], idev->keybit);
261
262 err = input_register_polled_device(ipdev);
263 if (err)
264 goto out3;
265
266 INIT_WORK(&ap->led_work, led_update);
267 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
268 err = led_classdev_register(&ap->client.dev, &ap->mail_led);
269 if (err)
270 goto out4;
271 }
272
273 return 0;
274out4:
275 input_unregister_polled_device(ipdev);
276out3:
277 i2c_detach_client(&ap->client);
278out2:
279 input_free_polled_device(ipdev);
280out1:
281 return err;
282}
283
284/* Scan the system ROM for the signature "FJKEYINF" */
285static __init const void __iomem *bios_signature(const void __iomem *bios)
286{
287 ssize_t offset;
288 const unsigned char signature[] = "FJKEYINF";
289
290 for (offset = 0; offset < 0x10000; offset += 0x10) {
291 if (check_signature(bios + offset, signature,
292 sizeof(signature)-1))
293 return bios + offset;
294 }
295 pr_notice(APANEL ": Fujitsu BIOS signature '%s' not found...\n",
296 signature);
297 return NULL;
298}
299
300static int __init apanel_init(void)
301{
302 void __iomem *bios;
303 const void __iomem *p;
304 u8 devno;
305 int found = 0;
306
307 bios = ioremap(0xF0000, 0x10000); /* Can't fail */
308
309 p = bios_signature(bios);
310 if (!p) {
311 iounmap(bios);
312 return -ENODEV;
313 }
314
315 /* just use the first address */
316 p += 8;
317 normal_i2c[0] = readb(p+3) >> 1;
318
319 for ( ; (devno = readb(p)) & 0x7f; p += 4) {
320 unsigned char method, slave, chip;
321
322 method = readb(p + 1);
323 chip = readb(p + 2);
324 slave = readb(p + 3) >> 1;
325
326 if (slave != normal_i2c[0]) {
327 pr_notice(APANEL ": only one SMBus slave "
328 "address supported, skiping device...\n");
329 continue;
330 }
331
332 /* translate alternative device numbers */
333 switch (devno) {
334 case 6:
335 devno = APANEL_DEV_APPBTN;
336 break;
337 case 7:
338 devno = APANEL_DEV_LED;
339 break;
340 }
341
342 if (devno >= APANEL_DEV_MAX)
343 pr_notice(APANEL ": unknown device %u found\n", devno);
344 else if (device_chip[devno] != CHIP_NONE)
345 pr_warning(APANEL ": duplicate entry for devno %u\n", devno);
346
347 else if (method != 1 && method != 2 && method != 4) {
348 pr_notice(APANEL ": unknown method %u for devno %u\n",
349 method, devno);
350 } else {
351 device_chip[devno] = (enum apanel_chip) chip;
352 ++found;
353 }
354 }
355 iounmap(bios);
356
357 if (found == 0) {
358 pr_info(APANEL ": no input devices reported by BIOS\n");
359 return -EIO;
360 }
361
362 return i2c_add_driver(&apanel_driver);
363}
364module_init(apanel_init);
365
366static void __exit apanel_cleanup(void)
367{
368 i2c_del_driver(&apanel_driver);
369}
370module_exit(apanel_cleanup);
371
372MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
373MODULE_DESCRIPTION(APANEL_NAME " driver");
374MODULE_LICENSE("GPL");
375MODULE_VERSION(APANEL_VERSION);
376
377MODULE_ALIAS("dmi:*:svnFUJITSU:pnLifeBook*:pvr*:rvnFUJITSU:*");
378MODULE_ALIAS("dmi:*:svnFUJITSU:pnLifebook*:pvr*:rvnFUJITSU:*");
diff --git a/drivers/input/misc/ati_remote.c b/drivers/input/misc/ati_remote.c
index 3a7937481ad8..f3b86c2b0797 100644
--- a/drivers/input/misc/ati_remote.c
+++ b/drivers/input/misc/ati_remote.c
@@ -90,7 +90,6 @@
90#include <linux/init.h> 90#include <linux/init.h>
91#include <linux/slab.h> 91#include <linux/slab.h>
92#include <linux/module.h> 92#include <linux/module.h>
93#include <linux/moduleparam.h>
94#include <linux/usb/input.h> 93#include <linux/usb/input.h>
95#include <linux/wait.h> 94#include <linux/wait.h>
96#include <linux/jiffies.h> 95#include <linux/jiffies.h>
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 4e3ad657ed80..1b871917340a 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -29,9 +29,10 @@
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <acpi/acpi_drivers.h> 30#include <acpi/acpi_drivers.h>
31 31
32#define ACPI_ATLAS_NAME "Atlas ACPI" 32#define ACPI_ATLAS_NAME "Atlas ACPI"
33#define ACPI_ATLAS_CLASS "Atlas" 33#define ACPI_ATLAS_CLASS "Atlas"
34 34
35static unsigned short atlas_keymap[16];
35static struct input_dev *input_dev; 36static struct input_dev *input_dev;
36 37
37/* button handling code */ 38/* button handling code */
@@ -50,12 +51,15 @@ static acpi_status acpi_atlas_button_handler(u32 function,
50 void *handler_context, void *region_context) 51 void *handler_context, void *region_context)
51{ 52{
52 acpi_status status; 53 acpi_status status;
53 int keycode;
54 54
55 if (function == ACPI_WRITE) { 55 if (function == ACPI_WRITE) {
56 keycode = KEY_F1 + (address & 0x0F); 56 int code = address & 0x0f;
57 input_report_key(input_dev, keycode, !(address & 0x10)); 57 int key_down = !(address & 0x10);
58
59 input_event(input_dev, EV_MSC, MSC_SCAN, code);
60 input_report_key(input_dev, atlas_keymap[code], key_down);
58 input_sync(input_dev); 61 input_sync(input_dev);
62
59 status = 0; 63 status = 0;
60 } else { 64 } else {
61 printk(KERN_WARNING "atlas: shrugged on unexpected function" 65 printk(KERN_WARNING "atlas: shrugged on unexpected function"
@@ -70,6 +74,7 @@ static acpi_status acpi_atlas_button_handler(u32 function,
70static int atlas_acpi_button_add(struct acpi_device *device) 74static int atlas_acpi_button_add(struct acpi_device *device)
71{ 75{
72 acpi_status status; 76 acpi_status status;
77 int i;
73 int err; 78 int err;
74 79
75 input_dev = input_allocate_device(); 80 input_dev = input_allocate_device();
@@ -81,17 +86,19 @@ static int atlas_acpi_button_add(struct acpi_device *device)
81 input_dev->name = "Atlas ACPI button driver"; 86 input_dev->name = "Atlas ACPI button driver";
82 input_dev->phys = "ASIM0000/atlas/input0"; 87 input_dev->phys = "ASIM0000/atlas/input0";
83 input_dev->id.bustype = BUS_HOST; 88 input_dev->id.bustype = BUS_HOST;
84 input_dev->evbit[BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY); 89 input_dev->keycode = atlas_keymap;
85 90 input_dev->keycodesize = sizeof(unsigned short);
86 set_bit(KEY_F1, input_dev->keybit); 91 input_dev->keycodemax = ARRAY_SIZE(atlas_keymap);
87 set_bit(KEY_F2, input_dev->keybit); 92
88 set_bit(KEY_F3, input_dev->keybit); 93 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
89 set_bit(KEY_F4, input_dev->keybit); 94 __set_bit(EV_KEY, input_dev->evbit);
90 set_bit(KEY_F5, input_dev->keybit); 95 for (i = 0; i < ARRAY_SIZE(atlas_keymap); i++) {
91 set_bit(KEY_F6, input_dev->keybit); 96 if (i < 9) {
92 set_bit(KEY_F7, input_dev->keybit); 97 atlas_keymap[i] = KEY_F1 + i;
93 set_bit(KEY_F8, input_dev->keybit); 98 __set_bit(KEY_F1 + i, input_dev->keybit);
94 set_bit(KEY_F9, input_dev->keybit); 99 } else
100 atlas_keymap[i] = KEY_RESERVED;
101 }
95 102
96 err = input_register_device(input_dev); 103 err = input_register_device(input_dev);
97 if (err) { 104 if (err) {
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 1aef97ed5e84..4833b1a82623 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -27,55 +27,48 @@
27#define BUTTONS_COUNT_THRESHOLD 3 27#define BUTTONS_COUNT_THRESHOLD 3
28#define BUTTONS_STATUS_MASK 0xfe000000 28#define BUTTONS_STATUS_MASK 0xfe000000
29 29
30static const unsigned short cobalt_map[] = {
31 KEY_RESERVED,
32 KEY_RESTART,
33 KEY_LEFT,
34 KEY_UP,
35 KEY_DOWN,
36 KEY_RIGHT,
37 KEY_ENTER,
38 KEY_SELECT
39};
40
30struct buttons_dev { 41struct buttons_dev {
31 struct input_polled_dev *poll_dev; 42 struct input_polled_dev *poll_dev;
43 unsigned short keymap[ARRAY_SIZE(cobalt_map)];
44 int count[ARRAY_SIZE(cobalt_map)];
32 void __iomem *reg; 45 void __iomem *reg;
33}; 46};
34 47
35struct buttons_map {
36 uint32_t mask;
37 int keycode;
38 int count;
39};
40
41static struct buttons_map buttons_map[] = {
42 { 0x02000000, KEY_RESTART, },
43 { 0x04000000, KEY_LEFT, },
44 { 0x08000000, KEY_UP, },
45 { 0x10000000, KEY_DOWN, },
46 { 0x20000000, KEY_RIGHT, },
47 { 0x40000000, KEY_ENTER, },
48 { 0x80000000, KEY_SELECT, },
49};
50
51static void handle_buttons(struct input_polled_dev *dev) 48static void handle_buttons(struct input_polled_dev *dev)
52{ 49{
53 struct buttons_map *button = buttons_map;
54 struct buttons_dev *bdev = dev->private; 50 struct buttons_dev *bdev = dev->private;
55 struct input_dev *input = dev->input; 51 struct input_dev *input = dev->input;
56 uint32_t status; 52 uint32_t status;
57 int i; 53 int i;
58 54
59 status = readl(bdev->reg); 55 status = ~readl(bdev->reg) >> 24;
60 status = ~status & BUTTONS_STATUS_MASK;
61 56
62 for (i = 0; i < ARRAY_SIZE(buttons_map); i++) { 57 for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) {
63 if (status & button->mask) { 58 if (status & (1UL << i)) {
64 button->count++; 59 if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) {
60 input_event(input, EV_MSC, MSC_SCAN, i);
61 input_report_key(input, bdev->keymap[i], 1);
62 input_sync(input);
63 }
65 } else { 64 } else {
66 if (button->count >= BUTTONS_COUNT_THRESHOLD) { 65 if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) {
67 input_report_key(input, button->keycode, 0); 66 input_event(input, EV_MSC, MSC_SCAN, i);
67 input_report_key(input, bdev->keymap[i], 0);
68 input_sync(input); 68 input_sync(input);
69 } 69 }
70 button->count = 0; 70 bdev->count[i] = 0;
71 }
72
73 if (button->count == BUTTONS_COUNT_THRESHOLD) {
74 input_report_key(input, button->keycode, 1);
75 input_sync(input);
76 } 71 }
77
78 button++;
79 } 72 }
80} 73}
81 74
@@ -94,6 +87,8 @@ static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
94 goto err_free_mem; 87 goto err_free_mem;
95 } 88 }
96 89
90 memcpy(bdev->keymap, cobalt_map, sizeof(bdev->keymap));
91
97 poll_dev->private = bdev; 92 poll_dev->private = bdev;
98 poll_dev->poll = handle_buttons; 93 poll_dev->poll = handle_buttons;
99 poll_dev->poll_interval = BUTTONS_POLL_INTERVAL; 94 poll_dev->poll_interval = BUTTONS_POLL_INTERVAL;
@@ -104,11 +99,15 @@ static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
104 input->id.bustype = BUS_HOST; 99 input->id.bustype = BUS_HOST;
105 input->cdev.dev = &pdev->dev; 100 input->cdev.dev = &pdev->dev;
106 101
107 input->evbit[0] = BIT_MASK(EV_KEY); 102 input->keycode = pdev->keymap;
108 for (i = 0; i < ARRAY_SIZE(buttons_map); i++) { 103 input->keycodemax = ARRAY_SIZE(pdev->keymap);
109 set_bit(buttons_map[i].keycode, input->keybit); 104 input->keycodesize = sizeof(unsigned short);
110 buttons_map[i].count = 0; 105
111 } 106 input_set_capability(input, EV_MSC, MSC_SCAN);
107 __set_bit(EV_KEY, input->evbit);
108 for (i = 0; i < ARRAY_SIZE(buttons_map); i++)
109 __set_bit(input->keycode[i], input->keybit);
110 __clear_bit(KEY_RESERVED, input->keybit);
112 111
113 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 112 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
114 if (!res) { 113 if (!res) {
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index fd74347047dd..952938a8e991 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -16,7 +16,6 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/usb/input.h> 19#include <linux/usb/input.h>
21 20
22#define DRIVER_VERSION "v0.1" 21#define DRIVER_VERSION "v0.1"
@@ -46,53 +45,12 @@ MODULE_PARM_DESC(debug, "Enable extra debug messages and information");
46 45
47#define RECV_SIZE 8 /* The UIA-11 type have a 8 byte limit. */ 46#define RECV_SIZE 8 /* The UIA-11 type have a 8 byte limit. */
48 47
49/* table of devices that work with this driver */
50static struct usb_device_id keyspan_table[] = {
51 { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) },
52 { } /* Terminating entry */
53};
54
55/* Structure to store all the real stuff that a remote sends to us. */
56struct keyspan_message {
57 u16 system;
58 u8 button;
59 u8 toggle;
60};
61
62/* Structure used for all the bit testing magic needed to be done. */
63struct bit_tester {
64 u32 tester;
65 int len;
66 int pos;
67 int bits_left;
68 u8 buffer[32];
69};
70
71/* Structure to hold all of our driver specific stuff */
72struct usb_keyspan {
73 char name[128];
74 char phys[64];
75 struct usb_device* udev;
76 struct input_dev *input;
77 struct usb_interface* interface;
78 struct usb_endpoint_descriptor* in_endpoint;
79 struct urb* irq_urb;
80 int open;
81 dma_addr_t in_dma;
82 unsigned char* in_buffer;
83
84 /* variables used to parse messages from remote. */
85 struct bit_tester data;
86 int stage;
87 int toggle;
88};
89
90/* 48/*
91 * Table that maps the 31 possible keycodes to input keys. 49 * Table that maps the 31 possible keycodes to input keys.
92 * Currently there are 15 and 17 button models so RESERVED codes 50 * Currently there are 15 and 17 button models so RESERVED codes
93 * are blank areas in the mapping. 51 * are blank areas in the mapping.
94 */ 52 */
95static const int keyspan_key_table[] = { 53static const unsigned short keyspan_key_table[] = {
96 KEY_RESERVED, /* 0 is just a place holder. */ 54 KEY_RESERVED, /* 0 is just a place holder. */
97 KEY_RESERVED, 55 KEY_RESERVED,
98 KEY_STOP, 56 KEY_STOP,
@@ -127,6 +85,48 @@ static const int keyspan_key_table[] = {
127 KEY_MENU 85 KEY_MENU
128}; 86};
129 87
88/* table of devices that work with this driver */
89static struct usb_device_id keyspan_table[] = {
90 { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) },
91 { } /* Terminating entry */
92};
93
94/* Structure to store all the real stuff that a remote sends to us. */
95struct keyspan_message {
96 u16 system;
97 u8 button;
98 u8 toggle;
99};
100
101/* Structure used for all the bit testing magic needed to be done. */
102struct bit_tester {
103 u32 tester;
104 int len;
105 int pos;
106 int bits_left;
107 u8 buffer[32];
108};
109
110/* Structure to hold all of our driver specific stuff */
111struct usb_keyspan {
112 char name[128];
113 char phys[64];
114 unsigned short keymap[ARRAY_SIZE(keyspan_key_table)];
115 struct usb_device *udev;
116 struct input_dev *input;
117 struct usb_interface *interface;
118 struct usb_endpoint_descriptor *in_endpoint;
119 struct urb* irq_urb;
120 int open;
121 dma_addr_t in_dma;
122 unsigned char *in_buffer;
123
124 /* variables used to parse messages from remote. */
125 struct bit_tester data;
126 int stage;
127 int toggle;
128};
129
130static struct usb_driver keyspan_driver; 130static struct usb_driver keyspan_driver;
131 131
132/* 132/*
@@ -173,6 +173,15 @@ static int keyspan_load_tester(struct usb_keyspan* dev, int bits_needed)
173 return 0; 173 return 0;
174} 174}
175 175
176static void keyspan_report_button(struct usb_keyspan *remote, int button, int press)
177{
178 struct input_dev *input = remote->input;
179
180 input_event(input, EV_MSC, MSC_SCAN, button);
181 input_report_key(input, remote->keymap[button], press);
182 input_sync(input);
183}
184
176/* 185/*
177 * Routine that handles all the logic needed to parse out the message from the remote. 186 * Routine that handles all the logic needed to parse out the message from the remote.
178 */ 187 */
@@ -311,9 +320,8 @@ static void keyspan_check_data(struct usb_keyspan *remote)
311 __FUNCTION__, message.system, message.button, message.toggle); 320 __FUNCTION__, message.system, message.button, message.toggle);
312 321
313 if (message.toggle != remote->toggle) { 322 if (message.toggle != remote->toggle) {
314 input_report_key(remote->input, keyspan_key_table[message.button], 1); 323 keyspan_report_button(remote, message.button, 1);
315 input_report_key(remote->input, keyspan_key_table[message.button], 0); 324 keyspan_report_button(remote, message.button, 0);
316 input_sync(remote->input);
317 remote->toggle = message.toggle; 325 remote->toggle = message.toggle;
318 } 326 }
319 327
@@ -491,16 +499,21 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
491 499
492 usb_make_path(udev, remote->phys, sizeof(remote->phys)); 500 usb_make_path(udev, remote->phys, sizeof(remote->phys));
493 strlcat(remote->phys, "/input0", sizeof(remote->phys)); 501 strlcat(remote->phys, "/input0", sizeof(remote->phys));
502 memcpy(remote->keymap, keyspan_key_table, sizeof(remote->keymap));
494 503
495 input_dev->name = remote->name; 504 input_dev->name = remote->name;
496 input_dev->phys = remote->phys; 505 input_dev->phys = remote->phys;
497 usb_to_input_id(udev, &input_dev->id); 506 usb_to_input_id(udev, &input_dev->id);
498 input_dev->dev.parent = &interface->dev; 507 input_dev->dev.parent = &interface->dev;
508 input_dev->keycode = remote->keymap;
509 input_dev->keycodesize = sizeof(unsigned short);
510 input_dev->keycodemax = ARRAY_SIZE(remote->keymap);
499 511
500 input_dev->evbit[0] = BIT_MASK(EV_KEY); /* We will only report KEY events. */ 512 input_set_capability(input_dev, EV_MSC, MSC_SCAN);
513 __set_bit(EV_KEY, input_dev->evbit);
501 for (i = 0; i < ARRAY_SIZE(keyspan_key_table); i++) 514 for (i = 0; i < ARRAY_SIZE(keyspan_key_table); i++)
502 if (keyspan_key_table[i] != KEY_RESERVED) 515 __set_bit(keyspan_key_table[i], input_dev->keybit);
503 set_bit(keyspan_key_table[i], input_dev->keybit); 516 __clear_bit(KEY_RESERVED, input_dev->keybit);
504 517
505 input_set_drvdata(input_dev, remote); 518 input_set_drvdata(input_dev, remote);
506 519
@@ -508,12 +521,14 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
508 input_dev->close = keyspan_close; 521 input_dev->close = keyspan_close;
509 522
510 /* 523 /*
511 * Initialize the URB to access the device. The urb gets sent to the device in keyspan_open() 524 * Initialize the URB to access the device.
525 * The urb gets sent to the device in keyspan_open()
512 */ 526 */
513 usb_fill_int_urb(remote->irq_urb, 527 usb_fill_int_urb(remote->irq_urb,
514 remote->udev, usb_rcvintpipe(remote->udev, remote->in_endpoint->bEndpointAddress), 528 remote->udev,
529 usb_rcvintpipe(remote->udev, endpoint->bEndpointAddress),
515 remote->in_buffer, RECV_SIZE, keyspan_irq_recv, remote, 530 remote->in_buffer, RECV_SIZE, keyspan_irq_recv, remote,
516 remote->in_endpoint->bInterval); 531 endpoint->bInterval);
517 remote->irq_urb->transfer_dma = remote->in_dma; 532 remote->irq_urb->transfer_dma = remote->in_dma;
518 remote->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 533 remote->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
519 534
diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c
index 26ec09529b51..06c35fc553c0 100644
--- a/drivers/input/mouse/inport.c
+++ b/drivers/input/mouse/inport.c
@@ -35,7 +35,6 @@
35 */ 35 */
36 36
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/moduleparam.h>
39#include <linux/ioport.h> 38#include <linux/ioport.h>
40#include <linux/init.h> 39#include <linux/init.h>
41#include <linux/interrupt.h> 40#include <linux/interrupt.h>
diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c
index 37e7c75b43bd..9ea895593b27 100644
--- a/drivers/input/mouse/logibm.c
+++ b/drivers/input/mouse/logibm.c
@@ -36,7 +36,6 @@
36 */ 36 */
37 37
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/delay.h> 39#include <linux/delay.h>
41#include <linux/ioport.h> 40#include <linux/ioport.h>
42#include <linux/init.h> 41#include <linux/init.h>
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index b8628252e10c..f5a6be1d3c46 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/slab.h> 16#include <linux/slab.h>
18#include <linux/interrupt.h> 17#include <linux/interrupt.h>
19#include <linux/input.h> 18#include <linux/input.h>
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 9ab5b5ea809d..26b845fc186a 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -11,7 +11,6 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/serio.h> 12#include <linux/serio.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/input.h> 14#include <linux/input.h>
16#include <linux/libps2.h> 15#include <linux/libps2.h>
17#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index be83516c776c..bbbe5e81adc1 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -16,7 +16,6 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/poll.h> 17#include <linux/poll.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/input.h> 20#include <linux/input.h>
22#include <linux/random.h> 21#include <linux/random.h>
@@ -1033,7 +1032,7 @@ static const struct input_device_id mousedev_ids[] = {
1033 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | 1032 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
1034 INPUT_DEVICE_ID_MATCH_KEYBIT | 1033 INPUT_DEVICE_ID_MATCH_KEYBIT |
1035 INPUT_DEVICE_ID_MATCH_ABSBIT, 1034 INPUT_DEVICE_ID_MATCH_ABSBIT,
1036 .evbit = { BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_SYN) }, 1035 .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
1037 .keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) }, 1036 .keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
1038 .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, 1037 .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
1039 }, /* Mouse-like device with absolute X and Y but ordinary 1038 }, /* Mouse-like device with absolute X and Y but ordinary
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index c5e68dcd88ac..662e84482c26 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -63,7 +63,7 @@ static inline void i8042_write_command(int val)
63 outb(val, I8042_COMMAND_REG); 63 outb(val, I8042_COMMAND_REG);
64} 64}
65 65
66#if defined(__i386__) 66#if defined(__i386__) || defined(__x86_64__)
67 67
68#include <linux/dmi.h> 68#include <linux/dmi.h>
69 69
@@ -186,6 +186,13 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
186 }, 186 },
187 }, 187 },
188 { 188 {
189 .ident = "Fujitsu-Siemens Amilo Pro 2010",
190 .matches = {
191 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
192 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
193 },
194 },
195 {
189 /* 196 /*
190 * No data is coming from the touchscreen unless KBC 197 * No data is coming from the touchscreen unless KBC
191 * is in legacy mode. 198 * is in legacy mode.
@@ -277,6 +284,57 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
277 284
278#endif 285#endif
279 286
287#ifdef CONFIG_X86
288
289#include <linux/dmi.h>
290
291/*
292 * Some Wistron based laptops need us to explicitly enable the 'Dritek
293 * keyboard extension' to make their extra keys start generating scancodes.
294 * Originally, this was just confined to older laptops, but a few Acer laptops
295 * have turned up in 2007 that also need this again.
296 */
297static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
298 {
299 .ident = "Acer Aspire 5630",
300 .matches = {
301 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
302 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
303 },
304 },
305 {
306 .ident = "Acer Aspire 5650",
307 .matches = {
308 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
309 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
310 },
311 },
312 {
313 .ident = "Acer Aspire 5680",
314 .matches = {
315 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
316 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
317 },
318 },
319 {
320 .ident = "Acer Aspire 9110",
321 .matches = {
322 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
323 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
324 },
325 },
326 {
327 .ident = "Acer TravelMate 2490",
328 .matches = {
329 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
330 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
331 },
332 },
333 { }
334};
335
336#endif /* CONFIG_X86 */
337
280 338
281#ifdef CONFIG_PNP 339#ifdef CONFIG_PNP
282#include <linux/pnp.h> 340#include <linux/pnp.h>
@@ -512,7 +570,7 @@ static int __init i8042_platform_init(void)
512 i8042_reset = 1; 570 i8042_reset = 1;
513#endif 571#endif
514 572
515#if defined(__i386__) 573#if defined(__i386__) || defined(__x86_64__)
516 if (dmi_check_system(i8042_dmi_noloop_table)) 574 if (dmi_check_system(i8042_dmi_noloop_table))
517 i8042_noloop = 1; 575 i8042_noloop = 1;
518 576
@@ -520,6 +578,11 @@ static int __init i8042_platform_init(void)
520 i8042_nomux = 1; 578 i8042_nomux = 1;
521#endif 579#endif
522 580
581#ifdef CONFIG_X86
582 if (dmi_check_system(i8042_dmi_dritek_table))
583 i8042_dritek = 1;
584#endif /* CONFIG_X86 */
585
523 return retval; 586 return retval;
524} 587}
525 588
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 1a0cea3c5294..2763394869d2 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -12,7 +12,6 @@
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/interrupt.h> 15#include <linux/interrupt.h>
17#include <linux/ioport.h> 16#include <linux/ioport.h>
18#include <linux/init.h> 17#include <linux/init.h>
@@ -64,6 +63,12 @@ static unsigned int i8042_blink_frequency = 500;
64module_param_named(panicblink, i8042_blink_frequency, uint, 0600); 63module_param_named(panicblink, i8042_blink_frequency, uint, 0600);
65MODULE_PARM_DESC(panicblink, "Frequency with which keyboard LEDs should blink when kernel panics"); 64MODULE_PARM_DESC(panicblink, "Frequency with which keyboard LEDs should blink when kernel panics");
66 65
66#ifdef CONFIG_X86
67static unsigned int i8042_dritek;
68module_param_named(dritek, i8042_dritek, bool, 0);
69MODULE_PARM_DESC(dritek, "Force enable the Dritek keyboard extension");
70#endif
71
67#ifdef CONFIG_PNP 72#ifdef CONFIG_PNP
68static int i8042_nopnp; 73static int i8042_nopnp;
69module_param_named(nopnp, i8042_nopnp, bool, 0); 74module_param_named(nopnp, i8042_nopnp, bool, 0);
@@ -280,7 +285,14 @@ static void i8042_stop(struct serio *serio)
280 struct i8042_port *port = serio->port_data; 285 struct i8042_port *port = serio->port_data;
281 286
282 port->exists = 0; 287 port->exists = 0;
283 synchronize_sched(); 288
289 /*
290 * We synchronize with both AUX and KBD IRQs because there is
291 * a (very unlikely) chance that AUX IRQ is raised for KBD port
292 * and vice versa.
293 */
294 synchronize_irq(I8042_AUX_IRQ);
295 synchronize_irq(I8042_KBD_IRQ);
284 port->serio = NULL; 296 port->serio = NULL;
285} 297}
286 298
@@ -1139,6 +1151,7 @@ static int __devinit i8042_setup_kbd(void)
1139static int __devinit i8042_probe(struct platform_device *dev) 1151static int __devinit i8042_probe(struct platform_device *dev)
1140{ 1152{
1141 int error; 1153 int error;
1154 char param;
1142 1155
1143 error = i8042_controller_selftest(); 1156 error = i8042_controller_selftest();
1144 if (error) 1157 if (error)
@@ -1159,7 +1172,14 @@ static int __devinit i8042_probe(struct platform_device *dev)
1159 if (error) 1172 if (error)
1160 goto out_fail; 1173 goto out_fail;
1161 } 1174 }
1162 1175#ifdef CONFIG_X86
1176 if (i8042_dritek) {
1177 param = 0x90;
1178 error = i8042_command(&param, 0x1059);
1179 if (error)
1180 goto out_fail;
1181 }
1182#endif
1163/* 1183/*
1164 * Ok, everything is ready, let's register all serio ports 1184 * Ok, everything is ready, let's register all serio ports
1165 */ 1185 */
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 10d9d74ae43a..b819239d74dc 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/slab.h> 16#include <linux/slab.h>
18#include <linux/interrupt.h> 17#include <linux/interrupt.h>
19#include <linux/input.h> 18#include <linux/input.h>
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index fd9c5d51870a..58934a40f5ce 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -116,6 +116,7 @@ struct ads7846 {
116// FIXME remove "irq_disabled" 116// FIXME remove "irq_disabled"
117 unsigned irq_disabled:1; /* P: lock */ 117 unsigned irq_disabled:1; /* P: lock */
118 unsigned disabled:1; 118 unsigned disabled:1;
119 unsigned is_suspended:1;
119 120
120 int (*filter)(void *data, int data_idx, int *val); 121 int (*filter)(void *data, int data_idx, int *val);
121 void *filter_data; 122 void *filter_data;
@@ -203,7 +204,7 @@ static void ads7846_disable(struct ads7846 *ts);
203static int device_suspended(struct device *dev) 204static int device_suspended(struct device *dev)
204{ 205{
205 struct ads7846 *ts = dev_get_drvdata(dev); 206 struct ads7846 *ts = dev_get_drvdata(dev);
206 return dev->power.power_state.event != PM_EVENT_ON || ts->disabled; 207 return ts->is_suspended || ts->disabled;
207} 208}
208 209
209static int ads7846_read12_ser(struct device *dev, unsigned command) 210static int ads7846_read12_ser(struct device *dev, unsigned command)
@@ -794,7 +795,7 @@ static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
794 795
795 spin_lock_irq(&ts->lock); 796 spin_lock_irq(&ts->lock);
796 797
797 spi->dev.power.power_state = message; 798 ts->is_suspended = 1;
798 ads7846_disable(ts); 799 ads7846_disable(ts);
799 800
800 spin_unlock_irq(&ts->lock); 801 spin_unlock_irq(&ts->lock);
@@ -809,7 +810,7 @@ static int ads7846_resume(struct spi_device *spi)
809 810
810 spin_lock_irq(&ts->lock); 811 spin_lock_irq(&ts->lock);
811 812
812 spi->dev.power.power_state = PMSG_ON; 813 ts->is_suspended = 0;
813 ads7846_enable(ts); 814 ads7846_enable(ts);
814 815
815 spin_unlock_irq(&ts->lock); 816 spin_unlock_irq(&ts->lock);
@@ -871,7 +872,6 @@ static int __devinit ads7846_probe(struct spi_device *spi)
871 } 872 }
872 873
873 dev_set_drvdata(&spi->dev, ts); 874 dev_set_drvdata(&spi->dev, ts);
874 spi->dev.power.power_state = PMSG_ON;
875 875
876 ts->spi = spi; 876 ts->spi = spi;
877 ts->input = input_dev; 877 ts->input = input_dev;
diff --git a/drivers/input/touchscreen/mk712.c b/drivers/input/touchscreen/mk712.c
index 80a658868706..efd3aebaba5f 100644
--- a/drivers/input/touchscreen/mk712.c
+++ b/drivers/input/touchscreen/mk712.c
@@ -36,7 +36,6 @@
36 */ 36 */
37 37
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/kernel.h> 39#include <linux/kernel.h>
41#include <linux/init.h> 40#include <linux/init.h>
42#include <linux/errno.h> 41#include <linux/errno.h>
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 986a8365e37f..607f9933aa1f 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -15,7 +15,6 @@
15 */ 15 */
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/init.h> 18#include <linux/init.h>
20#include <linux/completion.h> 19#include <linux/completion.h>
21#include <linux/delay.h> 20#include <linux/delay.h>
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index 3e93f8058770..719b17ce83f8 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -241,8 +241,6 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
241 input_dev->evbit[0] = BIT_MASK(EV_KEY); 241 input_dev->evbit[0] = BIT_MASK(EV_KEY);
242 input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0); 242 input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
243 243
244 input_dev->private = cam;
245
246 error = input_register_device(cam->input); 244 error = input_register_device(cam->input);
247 if (error) { 245 if (error) {
248 warn("Failed to register camera's input device, err: %d\n", 246 warn("Failed to register camera's input device, err: %d\n",
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index 5e7b79501370..a2acba0bcc47 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -105,8 +105,6 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
105 input_dev->evbit[0] = BIT_MASK(EV_KEY); 105 input_dev->evbit[0] = BIT_MASK(EV_KEY);
106 input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0); 106 input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
107 107
108 input_dev->private = cam;
109
110 error = input_register_device(cam->input); 108 error = input_register_device(cam->input);
111 if (error) { 109 if (error) {
112 warn("Failed to register camera's input device, err: %d\n", 110 warn("Failed to register camera's input device, err: %d\n",
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 78cd33861766..7b5220ca7d7f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -285,4 +285,13 @@ config INTEL_MENLOW
285 285
286 If unsure, say N. 286 If unsure, say N.
287 287
288config ENCLOSURE_SERVICES
289 tristate "Enclosure Services"
290 default n
291 help
292 Provides support for intelligent enclosures (bays which
293 contain storage devices). You also need either a host
294 driver (SCSI/ATA) which supports enclosures
295 or a SCSI enclosure device (SES) to use these services.
296
288endif # MISC_DEVICES 297endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 1f41654aae4d..7f13549cc87e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
20obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o 20obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
21obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o 21obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
22obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o 22obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
23obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
new file mode 100644
index 000000000000..6fcb0e96adf4
--- /dev/null
+++ b/drivers/misc/enclosure.c
@@ -0,0 +1,484 @@
1/*
2 * Enclosure Services
3 *
4 * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
5 *
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or
9** modify it under the terms of the GNU General Public License
10** version 2 as published by the Free Software Foundation.
11**
12** This program is distributed in the hope that it will be useful,
13** but WITHOUT ANY WARRANTY; without even the implied warranty of
14** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15** GNU General Public License for more details.
16**
17** You should have received a copy of the GNU General Public License
18** along with this program; if not, write to the Free Software
19** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20**
21**-----------------------------------------------------------------------------
22*/
23#include <linux/device.h>
24#include <linux/enclosure.h>
25#include <linux/err.h>
26#include <linux/list.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/mutex.h>
30
31static LIST_HEAD(container_list);
32static DEFINE_MUTEX(container_list_lock);
33static struct class enclosure_class;
34static struct class enclosure_component_class;
35
36/**
37 * enclosure_find - find an enclosure given a device
38 * @dev: the device to find for
39 *
40 * Looks through the list of registered enclosures to see
41 * if it can find a match for a device. Returns NULL if no
42 * enclosure is found. Obtains a reference to the enclosure class
43 * device which must be released with class_device_put().
44 */
45struct enclosure_device *enclosure_find(struct device *dev)
46{
47 struct enclosure_device *edev = NULL;
48
49 mutex_lock(&container_list_lock);
50 list_for_each_entry(edev, &container_list, node) {
51 if (edev->cdev.dev == dev) {
52 class_device_get(&edev->cdev);
53 mutex_unlock(&container_list_lock);
54 return edev;
55 }
56 }
57 mutex_unlock(&container_list_lock);
58
59 return NULL;
60}
61EXPORT_SYMBOL_GPL(enclosure_find);
62
63/**
64 * enclosure_for_each_device - calls a function for each enclosure
65 * @fn: the function to call
66 * @data: the data to pass to each call
67 *
68 * Loops over all the enclosures calling the function.
69 *
70 * Note, this function uses a mutex which will be held across calls to
71 * @fn, so it must have non atomic context, and @fn may (although it
72 * should not) sleep or otherwise cause the mutex to be held for
73 * indefinite periods
74 */
75int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
76 void *data)
77{
78 int error = 0;
79 struct enclosure_device *edev;
80
81 mutex_lock(&container_list_lock);
82 list_for_each_entry(edev, &container_list, node) {
83 error = fn(edev, data);
84 if (error)
85 break;
86 }
87 mutex_unlock(&container_list_lock);
88
89 return error;
90}
91EXPORT_SYMBOL_GPL(enclosure_for_each_device);
92
93/**
94 * enclosure_register - register device as an enclosure
95 *
96 * @dev: device containing the enclosure
97 * @components: number of components in the enclosure
98 *
99 * This sets up the device for being an enclosure. Note that @dev does
100 * not have to be a dedicated enclosure device. It may be some other type
101 * of device that additionally responds to enclosure services
102 */
103struct enclosure_device *
104enclosure_register(struct device *dev, const char *name, int components,
105 struct enclosure_component_callbacks *cb)
106{
107 struct enclosure_device *edev =
108 kzalloc(sizeof(struct enclosure_device) +
109 sizeof(struct enclosure_component)*components,
110 GFP_KERNEL);
111 int err, i;
112
113 BUG_ON(!cb);
114
115 if (!edev)
116 return ERR_PTR(-ENOMEM);
117
118 edev->components = components;
119
120 edev->cdev.class = &enclosure_class;
121 edev->cdev.dev = get_device(dev);
122 edev->cb = cb;
123 snprintf(edev->cdev.class_id, BUS_ID_SIZE, "%s", name);
124 err = class_device_register(&edev->cdev);
125 if (err)
126 goto err;
127
128 for (i = 0; i < components; i++)
129 edev->component[i].number = -1;
130
131 mutex_lock(&container_list_lock);
132 list_add_tail(&edev->node, &container_list);
133 mutex_unlock(&container_list_lock);
134
135 return edev;
136
137 err:
138 put_device(edev->cdev.dev);
139 kfree(edev);
140 return ERR_PTR(err);
141}
142EXPORT_SYMBOL_GPL(enclosure_register);
143
144static struct enclosure_component_callbacks enclosure_null_callbacks;
145
146/**
147 * enclosure_unregister - remove an enclosure
148 *
149 * @edev: the registered enclosure to remove;
150 */
151void enclosure_unregister(struct enclosure_device *edev)
152{
153 int i;
154
155 mutex_lock(&container_list_lock);
156 list_del(&edev->node);
157 mutex_unlock(&container_list_lock);
158
159 for (i = 0; i < edev->components; i++)
160 if (edev->component[i].number != -1)
161 class_device_unregister(&edev->component[i].cdev);
162
163 /* prevent any callbacks into service user */
164 edev->cb = &enclosure_null_callbacks;
165 class_device_unregister(&edev->cdev);
166}
167EXPORT_SYMBOL_GPL(enclosure_unregister);
168
169static void enclosure_release(struct class_device *cdev)
170{
171 struct enclosure_device *edev = to_enclosure_device(cdev);
172
173 put_device(cdev->dev);
174 kfree(edev);
175}
176
177static void enclosure_component_release(struct class_device *cdev)
178{
179 if (cdev->dev)
180 put_device(cdev->dev);
181 class_device_put(cdev->parent);
182}
183
184/**
185 * enclosure_component_register - add a particular component to an enclosure
186 * @edev: the enclosure to add the component
187 * @num: the device number
188 * @type: the type of component being added
189 * @name: an optional name to appear in sysfs (leave NULL if none)
190 *
191 * Registers the component. The name is optional for enclosures that
192 * give their components a unique name. If not, leave the field NULL
193 * and a name will be assigned.
194 *
195 * Returns a pointer to the enclosure component or an error.
196 */
197struct enclosure_component *
198enclosure_component_register(struct enclosure_device *edev,
199 unsigned int number,
200 enum enclosure_component_type type,
201 const char *name)
202{
203 struct enclosure_component *ecomp;
204 struct class_device *cdev;
205 int err;
206
207 if (number >= edev->components)
208 return ERR_PTR(-EINVAL);
209
210 ecomp = &edev->component[number];
211
212 if (ecomp->number != -1)
213 return ERR_PTR(-EINVAL);
214
215 ecomp->type = type;
216 ecomp->number = number;
217 cdev = &ecomp->cdev;
218 cdev->parent = class_device_get(&edev->cdev);
219 cdev->class = &enclosure_component_class;
220 if (name)
221 snprintf(cdev->class_id, BUS_ID_SIZE, "%s", name);
222 else
223 snprintf(cdev->class_id, BUS_ID_SIZE, "%u", number);
224
225 err = class_device_register(cdev);
226 if (err)
227 ERR_PTR(err);
228
229 return ecomp;
230}
231EXPORT_SYMBOL_GPL(enclosure_component_register);
232
233/**
234 * enclosure_add_device - add a device as being part of an enclosure
235 * @edev: the enclosure device being added to.
236 * @num: the number of the component
237 * @dev: the device being added
238 *
239 * Declares a real device to reside in slot (or identifier) @num of an
240 * enclosure. This will cause the relevant sysfs links to appear.
241 * This function may also be used to change a device associated with
242 * an enclosure without having to call enclosure_remove_device() in
243 * between.
244 *
245 * Returns zero on success or an error.
246 */
247int enclosure_add_device(struct enclosure_device *edev, int component,
248 struct device *dev)
249{
250 struct class_device *cdev;
251
252 if (!edev || component >= edev->components)
253 return -EINVAL;
254
255 cdev = &edev->component[component].cdev;
256
257 class_device_del(cdev);
258 if (cdev->dev)
259 put_device(cdev->dev);
260 cdev->dev = get_device(dev);
261 return class_device_add(cdev);
262}
263EXPORT_SYMBOL_GPL(enclosure_add_device);
264
265/**
266 * enclosure_remove_device - remove a device from an enclosure
267 * @edev: the enclosure device
268 * @num: the number of the component to remove
269 *
270 * Returns zero on success or an error.
271 *
272 */
273int enclosure_remove_device(struct enclosure_device *edev, int component)
274{
275 struct class_device *cdev;
276
277 if (!edev || component >= edev->components)
278 return -EINVAL;
279
280 cdev = &edev->component[component].cdev;
281
282 class_device_del(cdev);
283 if (cdev->dev)
284 put_device(cdev->dev);
285 cdev->dev = NULL;
286 return class_device_add(cdev);
287}
288EXPORT_SYMBOL_GPL(enclosure_remove_device);
289
290/*
291 * sysfs pieces below
292 */
293
294static ssize_t enclosure_show_components(struct class_device *cdev, char *buf)
295{
296 struct enclosure_device *edev = to_enclosure_device(cdev);
297
298 return snprintf(buf, 40, "%d\n", edev->components);
299}
300
301static struct class_device_attribute enclosure_attrs[] = {
302 __ATTR(components, S_IRUGO, enclosure_show_components, NULL),
303 __ATTR_NULL
304};
305
306static struct class enclosure_class = {
307 .name = "enclosure",
308 .owner = THIS_MODULE,
309 .release = enclosure_release,
310 .class_dev_attrs = enclosure_attrs,
311};
312
313static const char *const enclosure_status [] = {
314 [ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported",
315 [ENCLOSURE_STATUS_OK] = "OK",
316 [ENCLOSURE_STATUS_CRITICAL] = "critical",
317 [ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical",
318 [ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable",
319 [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
320 [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
321 [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
322};
323
324static const char *const enclosure_type [] = {
325 [ENCLOSURE_COMPONENT_DEVICE] = "device",
326 [ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device",
327};
328
329static ssize_t get_component_fault(struct class_device *cdev, char *buf)
330{
331 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
332 struct enclosure_component *ecomp = to_enclosure_component(cdev);
333
334 if (edev->cb->get_fault)
335 edev->cb->get_fault(edev, ecomp);
336 return snprintf(buf, 40, "%d\n", ecomp->fault);
337}
338
339static ssize_t set_component_fault(struct class_device *cdev, const char *buf,
340 size_t count)
341{
342 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
343 struct enclosure_component *ecomp = to_enclosure_component(cdev);
344 int val = simple_strtoul(buf, NULL, 0);
345
346 if (edev->cb->set_fault)
347 edev->cb->set_fault(edev, ecomp, val);
348 return count;
349}
350
351static ssize_t get_component_status(struct class_device *cdev, char *buf)
352{
353 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
354 struct enclosure_component *ecomp = to_enclosure_component(cdev);
355
356 if (edev->cb->get_status)
357 edev->cb->get_status(edev, ecomp);
358 return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]);
359}
360
361static ssize_t set_component_status(struct class_device *cdev, const char *buf,
362 size_t count)
363{
364 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
365 struct enclosure_component *ecomp = to_enclosure_component(cdev);
366 int i;
367
368 for (i = 0; enclosure_status[i]; i++) {
369 if (strncmp(buf, enclosure_status[i],
370 strlen(enclosure_status[i])) == 0 &&
371 (buf[strlen(enclosure_status[i])] == '\n' ||
372 buf[strlen(enclosure_status[i])] == '\0'))
373 break;
374 }
375
376 if (enclosure_status[i] && edev->cb->set_status) {
377 edev->cb->set_status(edev, ecomp, i);
378 return count;
379 } else
380 return -EINVAL;
381}
382
383static ssize_t get_component_active(struct class_device *cdev, char *buf)
384{
385 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
386 struct enclosure_component *ecomp = to_enclosure_component(cdev);
387
388 if (edev->cb->get_active)
389 edev->cb->get_active(edev, ecomp);
390 return snprintf(buf, 40, "%d\n", ecomp->active);
391}
392
393static ssize_t set_component_active(struct class_device *cdev, const char *buf,
394 size_t count)
395{
396 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
397 struct enclosure_component *ecomp = to_enclosure_component(cdev);
398 int val = simple_strtoul(buf, NULL, 0);
399
400 if (edev->cb->set_active)
401 edev->cb->set_active(edev, ecomp, val);
402 return count;
403}
404
405static ssize_t get_component_locate(struct class_device *cdev, char *buf)
406{
407 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
408 struct enclosure_component *ecomp = to_enclosure_component(cdev);
409
410 if (edev->cb->get_locate)
411 edev->cb->get_locate(edev, ecomp);
412 return snprintf(buf, 40, "%d\n", ecomp->locate);
413}
414
415static ssize_t set_component_locate(struct class_device *cdev, const char *buf,
416 size_t count)
417{
418 struct enclosure_device *edev = to_enclosure_device(cdev->parent);
419 struct enclosure_component *ecomp = to_enclosure_component(cdev);
420 int val = simple_strtoul(buf, NULL, 0);
421
422 if (edev->cb->set_locate)
423 edev->cb->set_locate(edev, ecomp, val);
424 return count;
425}
426
427static ssize_t get_component_type(struct class_device *cdev, char *buf)
428{
429 struct enclosure_component *ecomp = to_enclosure_component(cdev);
430
431 return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
432}
433
434
435static struct class_device_attribute enclosure_component_attrs[] = {
436 __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
437 set_component_fault),
438 __ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
439 set_component_status),
440 __ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
441 set_component_active),
442 __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
443 set_component_locate),
444 __ATTR(type, S_IRUGO, get_component_type, NULL),
445 __ATTR_NULL
446};
447
448static struct class enclosure_component_class = {
449 .name = "enclosure_component",
450 .owner = THIS_MODULE,
451 .class_dev_attrs = enclosure_component_attrs,
452 .release = enclosure_component_release,
453};
454
455static int __init enclosure_init(void)
456{
457 int err;
458
459 err = class_register(&enclosure_class);
460 if (err)
461 return err;
462 err = class_register(&enclosure_component_class);
463 if (err)
464 goto err_out;
465
466 return 0;
467 err_out:
468 class_unregister(&enclosure_class);
469
470 return err;
471}
472
473static void __exit enclosure_exit(void)
474{
475 class_unregister(&enclosure_component_class);
476 class_unregister(&enclosure_class);
477}
478
479module_init(enclosure_init);
480module_exit(enclosure_exit);
481
482MODULE_AUTHOR("James Bottomley");
483MODULE_DESCRIPTION("Enclosure Services");
484MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 14fc7f39e83e..a5f0aaaf0dd4 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -179,7 +179,15 @@ config CHR_DEV_SCH
179 say M here and read <file:Documentation/kbuild/modules.txt> and 179 say M here and read <file:Documentation/kbuild/modules.txt> and
180 <file:Documentation/scsi/scsi.txt>. The module will be called ch.o. 180 <file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
181 If unsure, say N. 181 If unsure, say N.
182 182
183config SCSI_ENCLOSURE
184 tristate "SCSI Enclosure Support"
185 depends on SCSI && ENCLOSURE_SERVICES
186 help
187 Enclosures are devices sitting on or in SCSI backplanes that
188 manage devices. If you have a disk cage, the chances are that
189 it has an enclosure device. Selecting this option will just allow
190 certain enclosure conditions to be reported and is not required.
183 191
184comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs" 192comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
185 depends on SCSI 193 depends on SCSI
@@ -350,17 +358,6 @@ config SGIWD93_SCSI
350 If you have a Western Digital WD93 SCSI controller on 358 If you have a Western Digital WD93 SCSI controller on
351 an SGI MIPS system, say Y. Otherwise, say N. 359 an SGI MIPS system, say Y. Otherwise, say N.
352 360
353config SCSI_DECNCR
354 tristate "DEC NCR53C94 Scsi Driver"
355 depends on MACH_DECSTATION && SCSI && TC
356 help
357 Say Y here to support the NCR53C94 SCSI controller chips on IOASIC
358 based TURBOchannel DECstations and TURBOchannel PMAZ-A cards.
359
360config SCSI_DECSII
361 tristate "DEC SII Scsi Driver"
362 depends on MACH_DECSTATION && SCSI && 32BIT
363
364config BLK_DEV_3W_XXXX_RAID 361config BLK_DEV_3W_XXXX_RAID
365 tristate "3ware 5/6/7/8xxx ATA-RAID support" 362 tristate "3ware 5/6/7/8xxx ATA-RAID support"
366 depends on PCI && SCSI 363 depends on PCI && SCSI
@@ -1263,17 +1260,6 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
1263 not allow targets to disconnect is not reasonable if there is more 1260 not allow targets to disconnect is not reasonable if there is more
1264 than 1 device on a SCSI bus. The normal answer therefore is N. 1261 than 1 device on a SCSI bus. The normal answer therefore is N.
1265 1262
1266config SCSI_MCA_53C9X
1267 tristate "NCR MCA 53C9x SCSI support"
1268 depends on MCA_LEGACY && SCSI && BROKEN_ON_SMP
1269 help
1270 Some MicroChannel machines, notably the NCR 35xx line, use a SCSI
1271 controller based on the NCR 53C94. This driver will allow use of
1272 the controller on the 3550, and very possibly others.
1273
1274 To compile this driver as a module, choose M here: the
1275 module will be called mca_53c9x.
1276
1277config SCSI_PAS16 1263config SCSI_PAS16
1278 tristate "PAS16 SCSI support" 1264 tristate "PAS16 SCSI support"
1279 depends on ISA && SCSI 1265 depends on ISA && SCSI
@@ -1600,45 +1586,6 @@ config GVP11_SCSI
1600 To compile this driver as a module, choose M here: the 1586 To compile this driver as a module, choose M here: the
1601 module will be called gvp11. 1587 module will be called gvp11.
1602 1588
1603config CYBERSTORM_SCSI
1604 tristate "CyberStorm SCSI support"
1605 depends on ZORRO && SCSI
1606 help
1607 If you have an Amiga with an original (MkI) Phase5 Cyberstorm
1608 accelerator board and the optional Cyberstorm SCSI controller,
1609 answer Y. Otherwise, say N.
1610
1611config CYBERSTORMII_SCSI
1612 tristate "CyberStorm Mk II SCSI support"
1613 depends on ZORRO && SCSI
1614 help
1615 If you have an Amiga with a Phase5 Cyberstorm MkII accelerator board
1616 and the optional Cyberstorm SCSI controller, say Y. Otherwise,
1617 answer N.
1618
1619config BLZ2060_SCSI
1620 tristate "Blizzard 2060 SCSI support"
1621 depends on ZORRO && SCSI
1622 help
1623 If you have an Amiga with a Phase5 Blizzard 2060 accelerator board
1624 and want to use the onboard SCSI controller, say Y. Otherwise,
1625 answer N.
1626
1627config BLZ1230_SCSI
1628 tristate "Blizzard 1230IV/1260 SCSI support"
1629 depends on ZORRO && SCSI
1630 help
1631 If you have an Amiga 1200 with a Phase5 Blizzard 1230IV or Blizzard
1632 1260 accelerator, and the optional SCSI module, say Y. Otherwise,
1633 say N.
1634
1635config FASTLANE_SCSI
1636 tristate "Fastlane SCSI support"
1637 depends on ZORRO && SCSI
1638 help
1639 If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use
1640 one in the near future, say Y to this question. Otherwise, say N.
1641
1642config SCSI_A4000T 1589config SCSI_A4000T
1643 tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)" 1590 tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
1644 depends on AMIGA && SCSI && EXPERIMENTAL 1591 depends on AMIGA && SCSI && EXPERIMENTAL
@@ -1666,15 +1613,6 @@ config SCSI_ZORRO7XX
1666 accelerator card for the Amiga 1200, 1613 accelerator card for the Amiga 1200,
1667 - the SCSI controller on the GVP Turbo 040/060 accelerator. 1614 - the SCSI controller on the GVP Turbo 040/060 accelerator.
1668 1615
1669config OKTAGON_SCSI
1670 tristate "BSC Oktagon SCSI support (EXPERIMENTAL)"
1671 depends on ZORRO && SCSI && EXPERIMENTAL
1672 help
1673 If you have the BSC Oktagon SCSI disk controller for the Amiga, say
1674 Y to this question. If you're in doubt about whether you have one,
1675 see the picture at
1676 <http://amiga.resource.cx/exp/search.pl?product=oktagon>.
1677
1678config ATARI_SCSI 1616config ATARI_SCSI
1679 tristate "Atari native SCSI support" 1617 tristate "Atari native SCSI support"
1680 depends on ATARI && SCSI 1618 depends on ATARI && SCSI
@@ -1727,18 +1665,6 @@ config MAC_SCSI
1727 SCSI-HOWTO, available from 1665 SCSI-HOWTO, available from
1728 <http://www.tldp.org/docs.html#howto>. 1666 <http://www.tldp.org/docs.html#howto>.
1729 1667
1730config SCSI_MAC_ESP
1731 tristate "Macintosh NCR53c9[46] SCSI"
1732 depends on MAC && SCSI
1733 help
1734 This is the NCR 53c9x SCSI controller found on most of the 68040
1735 based Macintoshes. If you have one of these say Y and read the
1736 SCSI-HOWTO, available from
1737 <http://www.tldp.org/docs.html#howto>.
1738
1739 To compile this driver as a module, choose M here: the
1740 module will be called mac_esp.
1741
1742config MVME147_SCSI 1668config MVME147_SCSI
1743 bool "WD33C93 SCSI driver for MVME147" 1669 bool "WD33C93 SCSI driver for MVME147"
1744 depends on MVME147 && SCSI=y 1670 depends on MVME147 && SCSI=y
@@ -1779,6 +1705,7 @@ config SUN3_SCSI
1779config SUN3X_ESP 1705config SUN3X_ESP
1780 bool "Sun3x ESP SCSI" 1706 bool "Sun3x ESP SCSI"
1781 depends on SUN3X && SCSI=y 1707 depends on SUN3X && SCSI=y
1708 select SCSI_SPI_ATTRS
1782 help 1709 help
1783 The ESP was an on-board SCSI controller used on Sun 3/80 1710 The ESP was an on-board SCSI controller used on Sun 3/80
1784 machines. Say Y here to compile in support for it. 1711 machines. Say Y here to compile in support for it.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 93e1428d03fc..925c26b4fff9 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -44,15 +44,8 @@ obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
44obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o 44obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
45obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o 45obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
46obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o 46obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
47obj-$(CONFIG_CYBERSTORM_SCSI) += NCR53C9x.o cyberstorm.o
48obj-$(CONFIG_CYBERSTORMII_SCSI) += NCR53C9x.o cyberstormII.o
49obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o
50obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o
51obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o
52obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp_mod.o
53obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o 47obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
54obj-$(CONFIG_MAC_SCSI) += mac_scsi.o 48obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
55obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
56obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o 49obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
57obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o 50obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
58obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o 51obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
@@ -95,7 +88,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
95obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o 88obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
96obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o 89obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
97obj-$(CONFIG_SCSI_7000FASST) += wd7000.o 90obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
98obj-$(CONFIG_SCSI_MCA_53C9X) += NCR53C9x.o mca_53c9x.o
99obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o 91obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o
100obj-$(CONFIG_SCSI_EATA) += eata.o 92obj-$(CONFIG_SCSI_EATA) += eata.o
101obj-$(CONFIG_SCSI_DC395x) += dc395x.o 93obj-$(CONFIG_SCSI_DC395x) += dc395x.o
@@ -112,13 +104,12 @@ obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
112obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o 104obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o
113obj-$(CONFIG_SCSI_MESH) += mesh.o 105obj-$(CONFIG_SCSI_MESH) += mesh.o
114obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o 106obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
115obj-$(CONFIG_SCSI_DECNCR) += NCR53C9x.o dec_esp.o
116obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o 107obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
117obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o 108obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
118obj-$(CONFIG_SCSI_PPA) += ppa.o 109obj-$(CONFIG_SCSI_PPA) += ppa.o
119obj-$(CONFIG_SCSI_IMM) += imm.o 110obj-$(CONFIG_SCSI_IMM) += imm.o
120obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o 111obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
121obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o 112obj-$(CONFIG_SUN3X_ESP) += esp_scsi.o sun3x_esp.o
122obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o 113obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
123obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o 114obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
124obj-$(CONFIG_SCSI_NSP32) += nsp32.o 115obj-$(CONFIG_SCSI_NSP32) += nsp32.o
@@ -138,6 +129,7 @@ obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o
138obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o 129obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
139obj-$(CONFIG_CHR_DEV_SG) += sg.o 130obj-$(CONFIG_CHR_DEV_SG) += sg.o
140obj-$(CONFIG_CHR_DEV_SCH) += ch.o 131obj-$(CONFIG_CHR_DEV_SCH) += ch.o
132obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
141 133
142# This goes last, so that "real" scsi devices probe earlier 134# This goes last, so that "real" scsi devices probe earlier
143obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o 135obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
deleted file mode 100644
index 5b0efc903918..000000000000
--- a/drivers/scsi/NCR53C9x.c
+++ /dev/null
@@ -1,3654 +0,0 @@
1/* NCR53C9x.c: Generic SCSI driver code for NCR53C9x chips.
2 *
3 * Originally esp.c : EnhancedScsiProcessor Sun SCSI driver code.
4 *
5 * Copyright (C) 1995, 1998 David S. Miller (davem@caip.rutgers.edu)
6 *
7 * Most DMA dependencies put in driver specific files by
8 * Jesper Skov (jskov@cygnus.co.uk)
9 *
10 * Set up to use esp_read/esp_write (preprocessor macros in NCR53c9x.h) by
11 * Tymm Twillman (tymm@coe.missouri.edu)
12 */
13
14/* TODO:
15 *
16 * 1) Maybe disable parity checking in config register one for SCSI1
17 * targets. (Gilmore says parity error on the SBus can lock up
18 * old sun4c's)
19 * 2) Add support for DMA2 pipelining.
20 * 3) Add tagged queueing.
21 * 4) Maybe change use of "esp" to something more "NCR"'ish.
22 */
23
24#include <linux/module.h>
25
26#include <linux/kernel.h>
27#include <linux/delay.h>
28#include <linux/types.h>
29#include <linux/string.h>
30#include <linux/slab.h>
31#include <linux/blkdev.h>
32#include <linux/interrupt.h>
33#include <linux/proc_fs.h>
34#include <linux/stat.h>
35#include <linux/init.h>
36
37#include "scsi.h"
38#include <scsi/scsi_host.h>
39#include "NCR53C9x.h"
40
41#include <asm/system.h>
42#include <asm/ptrace.h>
43#include <asm/pgtable.h>
44#include <asm/io.h>
45#include <asm/irq.h>
46
47/* Command phase enumeration. */
48enum {
49 not_issued = 0x00, /* Still in the issue_SC queue. */
50
51 /* Various forms of selecting a target. */
52#define in_slct_mask 0x10
53 in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */
54 in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */
55 in_slct_msg = 0x12, /* select, then send a message */
56 in_slct_tag = 0x13, /* select and send tagged queue msg */
57 in_slct_sneg = 0x14, /* select and acquire sync capabilities */
58
59 /* Any post selection activity. */
60#define in_phases_mask 0x20
61 in_datain = 0x20, /* Data is transferring from the bus */
62 in_dataout = 0x21, /* Data is transferring to the bus */
63 in_data_done = 0x22, /* Last DMA data operation done (maybe) */
64 in_msgin = 0x23, /* Eating message from target */
65 in_msgincont = 0x24, /* Eating more msg bytes from target */
66 in_msgindone = 0x25, /* Decide what to do with what we got */
67 in_msgout = 0x26, /* Sending message to target */
68 in_msgoutdone = 0x27, /* Done sending msg out */
69 in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */
70 in_cmdend = 0x29, /* Done sending slow cmd */
71 in_status = 0x2a, /* Was in status phase, finishing cmd */
72 in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */
73 in_the_dark = 0x2c, /* Don't know what bus phase we are in */
74
75 /* Special states, ie. not normal bus transitions... */
76#define in_spec_mask 0x80
77 in_abortone = 0x80, /* Aborting one command currently */
78 in_abortall = 0x81, /* Blowing away all commands we have */
79 in_resetdev = 0x82, /* SCSI target reset in progress */
80 in_resetbus = 0x83, /* SCSI bus reset in progress */
81 in_tgterror = 0x84, /* Target did something stupid */
82};
83
84enum {
85 /* Zero has special meaning, see skipahead[12]. */
86/*0*/ do_never,
87
88/*1*/ do_phase_determine,
89/*2*/ do_reset_bus,
90/*3*/ do_reset_complete,
91/*4*/ do_work_bus,
92/*5*/ do_intr_end
93};
94
95/* The master ring of all esp hosts we are managing in this driver. */
96static struct NCR_ESP *espchain;
97int nesps = 0, esps_in_use = 0, esps_running = 0;
98EXPORT_SYMBOL(nesps);
99EXPORT_SYMBOL(esps_running);
100
101irqreturn_t esp_intr(int irq, void *dev_id);
102
103/* Debugging routines */
104static struct esp_cmdstrings {
105 unchar cmdchar;
106 char *text;
107} esp_cmd_strings[] = {
108 /* Miscellaneous */
109 { ESP_CMD_NULL, "ESP_NOP", },
110 { ESP_CMD_FLUSH, "FIFO_FLUSH", },
111 { ESP_CMD_RC, "RSTESP", },
112 { ESP_CMD_RS, "RSTSCSI", },
113 /* Disconnected State Group */
114 { ESP_CMD_RSEL, "RESLCTSEQ", },
115 { ESP_CMD_SEL, "SLCTNATN", },
116 { ESP_CMD_SELA, "SLCTATN", },
117 { ESP_CMD_SELAS, "SLCTATNSTOP", },
118 { ESP_CMD_ESEL, "ENSLCTRESEL", },
119 { ESP_CMD_DSEL, "DISSELRESEL", },
120 { ESP_CMD_SA3, "SLCTATN3", },
121 { ESP_CMD_RSEL3, "RESLCTSEQ", },
122 /* Target State Group */
123 { ESP_CMD_SMSG, "SNDMSG", },
124 { ESP_CMD_SSTAT, "SNDSTATUS", },
125 { ESP_CMD_SDATA, "SNDDATA", },
126 { ESP_CMD_DSEQ, "DISCSEQ", },
127 { ESP_CMD_TSEQ, "TERMSEQ", },
128 { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", },
129 { ESP_CMD_DCNCT, "DISC", },
130 { ESP_CMD_RMSG, "RCVMSG", },
131 { ESP_CMD_RCMD, "RCVCMD", },
132 { ESP_CMD_RDATA, "RCVDATA", },
133 { ESP_CMD_RCSEQ, "RCVCMDSEQ", },
134 /* Initiator State Group */
135 { ESP_CMD_TI, "TRANSINFO", },
136 { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", },
137 { ESP_CMD_MOK, "MSGACCEPTED", },
138 { ESP_CMD_TPAD, "TPAD", },
139 { ESP_CMD_SATN, "SATN", },
140 { ESP_CMD_RATN, "RATN", },
141};
142#define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings)))
143
144/* Print textual representation of an ESP command */
145static inline void esp_print_cmd(unchar espcmd)
146{
147 unchar dma_bit = espcmd & ESP_CMD_DMA;
148 int i;
149
150 espcmd &= ~dma_bit;
151 for(i=0; i<NUM_ESP_COMMANDS; i++)
152 if(esp_cmd_strings[i].cmdchar == espcmd)
153 break;
154 if(i==NUM_ESP_COMMANDS)
155 printk("ESP_Unknown");
156 else
157 printk("%s%s", esp_cmd_strings[i].text,
158 ((dma_bit) ? "+DMA" : ""));
159}
160
161/* Print the status register's value */
162static inline void esp_print_statreg(unchar statreg)
163{
164 unchar phase;
165
166 printk("STATUS<");
167 phase = statreg & ESP_STAT_PMASK;
168 printk("%s,", (phase == ESP_DOP ? "DATA-OUT" :
169 (phase == ESP_DIP ? "DATA-IN" :
170 (phase == ESP_CMDP ? "COMMAND" :
171 (phase == ESP_STATP ? "STATUS" :
172 (phase == ESP_MOP ? "MSG-OUT" :
173 (phase == ESP_MIP ? "MSG_IN" :
174 "unknown")))))));
175 if(statreg & ESP_STAT_TDONE)
176 printk("TRANS_DONE,");
177 if(statreg & ESP_STAT_TCNT)
178 printk("TCOUNT_ZERO,");
179 if(statreg & ESP_STAT_PERR)
180 printk("P_ERROR,");
181 if(statreg & ESP_STAT_SPAM)
182 printk("SPAM,");
183 if(statreg & ESP_STAT_INTR)
184 printk("IRQ,");
185 printk(">");
186}
187
188/* Print the interrupt register's value */
189static inline void esp_print_ireg(unchar intreg)
190{
191 printk("INTREG< ");
192 if(intreg & ESP_INTR_S)
193 printk("SLCT_NATN ");
194 if(intreg & ESP_INTR_SATN)
195 printk("SLCT_ATN ");
196 if(intreg & ESP_INTR_RSEL)
197 printk("RSLCT ");
198 if(intreg & ESP_INTR_FDONE)
199 printk("FDONE ");
200 if(intreg & ESP_INTR_BSERV)
201 printk("BSERV ");
202 if(intreg & ESP_INTR_DC)
203 printk("DISCNCT ");
204 if(intreg & ESP_INTR_IC)
205 printk("ILL_CMD ");
206 if(intreg & ESP_INTR_SR)
207 printk("SCSI_BUS_RESET ");
208 printk(">");
209}
210
211/* Print the sequence step registers contents */
212static inline void esp_print_seqreg(unchar stepreg)
213{
214 stepreg &= ESP_STEP_VBITS;
215 printk("STEP<%s>",
216 (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" :
217 (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" :
218 (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" :
219 (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" :
220 (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" :
221 "UNKNOWN"))))));
222}
223
224static char *phase_string(int phase)
225{
226 switch(phase) {
227 case not_issued:
228 return "UNISSUED";
229 case in_slct_norm:
230 return "SLCTNORM";
231 case in_slct_stop:
232 return "SLCTSTOP";
233 case in_slct_msg:
234 return "SLCTMSG";
235 case in_slct_tag:
236 return "SLCTTAG";
237 case in_slct_sneg:
238 return "SLCTSNEG";
239 case in_datain:
240 return "DATAIN";
241 case in_dataout:
242 return "DATAOUT";
243 case in_data_done:
244 return "DATADONE";
245 case in_msgin:
246 return "MSGIN";
247 case in_msgincont:
248 return "MSGINCONT";
249 case in_msgindone:
250 return "MSGINDONE";
251 case in_msgout:
252 return "MSGOUT";
253 case in_msgoutdone:
254 return "MSGOUTDONE";
255 case in_cmdbegin:
256 return "CMDBEGIN";
257 case in_cmdend:
258 return "CMDEND";
259 case in_status:
260 return "STATUS";
261 case in_freeing:
262 return "FREEING";
263 case in_the_dark:
264 return "CLUELESS";
265 case in_abortone:
266 return "ABORTONE";
267 case in_abortall:
268 return "ABORTALL";
269 case in_resetdev:
270 return "RESETDEV";
271 case in_resetbus:
272 return "RESETBUS";
273 case in_tgterror:
274 return "TGTERROR";
275 default:
276 return "UNKNOWN";
277 };
278}
279
280#ifdef DEBUG_STATE_MACHINE
281static inline void esp_advance_phase(Scsi_Cmnd *s, int newphase)
282{
283 ESPLOG(("<%s>", phase_string(newphase)));
284 s->SCp.sent_command = s->SCp.phase;
285 s->SCp.phase = newphase;
286}
287#else
288#define esp_advance_phase(__s, __newphase) \
289 (__s)->SCp.sent_command = (__s)->SCp.phase; \
290 (__s)->SCp.phase = (__newphase);
291#endif
292
293#ifdef DEBUG_ESP_CMDS
294static inline void esp_cmd(struct NCR_ESP *esp, struct ESP_regs *eregs,
295 unchar cmd)
296{
297 esp->espcmdlog[esp->espcmdent] = cmd;
298 esp->espcmdent = (esp->espcmdent + 1) & 31;
299 esp_write(eregs->esp_cmnd, cmd);
300}
301#else
302#define esp_cmd(__esp, __eregs, __cmd) esp_write((__eregs)->esp_cmnd, (__cmd))
303#endif
304
305/* How we use the various Linux SCSI data structures for operation.
306 *
307 * struct scsi_cmnd:
308 *
309 * We keep track of the syncronous capabilities of a target
310 * in the device member, using sync_min_period and
311 * sync_max_offset. These are the values we directly write
312 * into the ESP registers while running a command. If offset
313 * is zero the ESP will use asynchronous transfers.
314 * If the borken flag is set we assume we shouldn't even bother
315 * trying to negotiate for synchronous transfer as this target
316 * is really stupid. If we notice the target is dropping the
317 * bus, and we have been allowing it to disconnect, we clear
318 * the disconnect flag.
319 */
320
321/* Manipulation of the ESP command queues. Thanks to the aha152x driver
322 * and its author, Juergen E. Fischer, for the methods used here.
323 * Note that these are per-ESP queues, not global queues like
324 * the aha152x driver uses.
325 */
326static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
327{
328 Scsi_Cmnd *end;
329
330 new_SC->host_scribble = (unsigned char *) NULL;
331 if(!*SC)
332 *SC = new_SC;
333 else {
334 for(end=*SC;end->host_scribble;end=(Scsi_Cmnd *)end->host_scribble)
335 ;
336 end->host_scribble = (unsigned char *) new_SC;
337 }
338}
339
340static inline void prepend_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
341{
342 new_SC->host_scribble = (unsigned char *) *SC;
343 *SC = new_SC;
344}
345
346static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd **SC)
347{
348 Scsi_Cmnd *ptr;
349
350 ptr = *SC;
351 if(ptr)
352 *SC = (Scsi_Cmnd *) (*SC)->host_scribble;
353 return ptr;
354}
355
356static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, int target, int lun)
357{
358 Scsi_Cmnd *ptr, *prev;
359
360 for(ptr = *SC, prev = NULL;
361 ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
362 prev = ptr, ptr = (Scsi_Cmnd *) ptr->host_scribble)
363 ;
364 if(ptr) {
365 if(prev)
366 prev->host_scribble=ptr->host_scribble;
367 else
368 *SC=(Scsi_Cmnd *)ptr->host_scribble;
369 }
370 return ptr;
371}
372
373/* Resetting various pieces of the ESP scsi driver chipset */
374
375/* Reset the ESP chip, _not_ the SCSI bus. */
376static void esp_reset_esp(struct NCR_ESP *esp, struct ESP_regs *eregs)
377{
378 int family_code, version, i;
379 volatile int trash;
380
381 /* Now reset the ESP chip */
382 esp_cmd(esp, eregs, ESP_CMD_RC);
383 esp_cmd(esp, eregs, ESP_CMD_NULL | ESP_CMD_DMA);
384 if(esp->erev == fast)
385 esp_write(eregs->esp_cfg2, ESP_CONFIG2_FENAB);
386 esp_cmd(esp, eregs, ESP_CMD_NULL | ESP_CMD_DMA);
387
388 /* This is the only point at which it is reliable to read
389 * the ID-code for a fast ESP chip variant.
390 */
391 esp->max_period = ((35 * esp->ccycle) / 1000);
392 if(esp->erev == fast) {
393 char *erev2string[] = {
394 "Emulex FAS236",
395 "Emulex FPESP100A",
396 "fast",
397 "QLogic FAS366",
398 "Emulex FAS216",
399 "Symbios Logic 53CF9x-2",
400 "unknown!"
401 };
402
403 version = esp_read(eregs->esp_uid);
404 family_code = (version & 0xf8) >> 3;
405 if(family_code == 0x02) {
406 if ((version & 7) == 2)
407 esp->erev = fas216;
408 else
409 esp->erev = fas236;
410 } else if(family_code == 0x0a)
411 esp->erev = fas366; /* Version is usually '5'. */
412 else if(family_code == 0x00) {
413 if ((version & 7) == 2)
414 esp->erev = fas100a; /* NCR53C9X */
415 else
416 esp->erev = espunknown;
417 } else if(family_code == 0x14) {
418 if ((version & 7) == 2)
419 esp->erev = fsc;
420 else
421 esp->erev = espunknown;
422 } else if(family_code == 0x00) {
423 if ((version & 7) == 2)
424 esp->erev = fas100a; /* NCR53C9X */
425 else
426 esp->erev = espunknown;
427 } else
428 esp->erev = espunknown;
429 ESPLOG(("esp%d: FAST chip is %s (family=%d, version=%d)\n",
430 esp->esp_id, erev2string[esp->erev - fas236],
431 family_code, (version & 7)));
432
433 esp->min_period = ((4 * esp->ccycle) / 1000);
434 } else {
435 esp->min_period = ((5 * esp->ccycle) / 1000);
436 }
437
438 /* Reload the configuration registers */
439 esp_write(eregs->esp_cfact, esp->cfact);
440 esp->prev_stp = 0;
441 esp_write(eregs->esp_stp, 0);
442 esp->prev_soff = 0;
443 esp_write(eregs->esp_soff, 0);
444 esp_write(eregs->esp_timeo, esp->neg_defp);
445 esp->max_period = (esp->max_period + 3)>>2;
446 esp->min_period = (esp->min_period + 3)>>2;
447
448 esp_write(eregs->esp_cfg1, esp->config1);
449 switch(esp->erev) {
450 case esp100:
451 /* nothing to do */
452 break;
453 case esp100a:
454 esp_write(eregs->esp_cfg2, esp->config2);
455 break;
456 case esp236:
457 /* Slow 236 */
458 esp_write(eregs->esp_cfg2, esp->config2);
459 esp->prev_cfg3 = esp->config3[0];
460 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
461 break;
462 case fas366:
463 panic("esp: FAS366 support not present, please notify "
464 "jongk@cs.utwente.nl");
465 break;
466 case fas216:
467 case fas236:
468 case fsc:
469 /* Fast ESP variants */
470 esp_write(eregs->esp_cfg2, esp->config2);
471 for(i=0; i<8; i++)
472 esp->config3[i] |= ESP_CONFIG3_FCLK;
473 esp->prev_cfg3 = esp->config3[0];
474 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
475 if(esp->diff)
476 esp->radelay = 0;
477 else
478 esp->radelay = 16;
479 /* Different timeout constant for these chips */
480 esp->neg_defp =
481 FSC_NEG_DEFP(esp->cfreq,
482 (esp->cfact == ESP_CCF_F0 ?
483 ESP_CCF_F7 + 1 : esp->cfact));
484 esp_write(eregs->esp_timeo, esp->neg_defp);
485 /* Enable Active Negotiation if possible */
486 if((esp->erev == fsc) && !esp->diff)
487 esp_write(eregs->esp_cfg4, ESP_CONFIG4_EAN);
488 break;
489 case fas100a:
490 /* Fast 100a */
491 esp_write(eregs->esp_cfg2, esp->config2);
492 for(i=0; i<8; i++)
493 esp->config3[i] |= ESP_CONFIG3_FCLOCK;
494 esp->prev_cfg3 = esp->config3[0];
495 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
496 esp->radelay = 32;
497 break;
498 default:
499 panic("esp: what could it be... I wonder...");
500 break;
501 };
502
503 /* Eat any bitrot in the chip */
504 trash = esp_read(eregs->esp_intrpt);
505 udelay(100);
506}
507
508/* This places the ESP into a known state at boot time. */
509void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs)
510{
511 volatile unchar trash;
512
513 /* Reset the DMA */
514 if(esp->dma_reset)
515 esp->dma_reset(esp);
516
517 /* Reset the ESP */
518 esp_reset_esp(esp, eregs);
519
520 /* Reset the SCSI bus, but tell ESP not to generate an irq */
521 esp_write(eregs->esp_cfg1, (esp_read(eregs->esp_cfg1) | ESP_CONFIG1_SRRDISAB));
522 esp_cmd(esp, eregs, ESP_CMD_RS);
523 udelay(400);
524 esp_write(eregs->esp_cfg1, esp->config1);
525
526 /* Eat any bitrot in the chip and we are done... */
527 trash = esp_read(eregs->esp_intrpt);
528}
529EXPORT_SYMBOL(esp_bootup_reset);
530
531/* Allocate structure and insert basic data such as SCSI chip frequency
532 * data and a pointer to the device
533 */
534struct NCR_ESP* esp_allocate(struct scsi_host_template *tpnt, void *esp_dev,
535 int hotplug)
536{
537 struct NCR_ESP *esp, *elink;
538 struct Scsi_Host *esp_host;
539
540 if (hotplug)
541 esp_host = scsi_host_alloc(tpnt, sizeof(struct NCR_ESP));
542 else
543 esp_host = scsi_register(tpnt, sizeof(struct NCR_ESP));
544 if(!esp_host)
545 panic("Cannot register ESP SCSI host");
546 esp = (struct NCR_ESP *) esp_host->hostdata;
547 if(!esp)
548 panic("No esp in hostdata");
549 esp->ehost = esp_host;
550 esp->edev = esp_dev;
551 esp->esp_id = nesps++;
552
553 /* Set bitshift value (only used on Amiga with multiple ESPs) */
554 esp->shift = 2;
555
556 /* Put into the chain of esp chips detected */
557 if(espchain) {
558 elink = espchain;
559 while(elink->next) elink = elink->next;
560 elink->next = esp;
561 } else {
562 espchain = esp;
563 }
564 esp->next = NULL;
565
566 return esp;
567}
568
569void esp_deallocate(struct NCR_ESP *esp)
570{
571 struct NCR_ESP *elink;
572
573 if(espchain == esp) {
574 espchain = NULL;
575 } else {
576 for(elink = espchain; elink && (elink->next != esp); elink = elink->next);
577 if(elink)
578 elink->next = esp->next;
579 }
580 nesps--;
581}
582
583/* Complete initialization of ESP structure and device
584 * Caller must have initialized appropriate parts of the ESP structure
585 * between the call to esp_allocate and this function.
586 */
587void esp_initialize(struct NCR_ESP *esp)
588{
589 struct ESP_regs *eregs = esp->eregs;
590 unsigned int fmhz;
591 unchar ccf;
592 int i;
593
594 /* Check out the clock properties of the chip. */
595
596 /* This is getting messy but it has to be done
597 * correctly or else you get weird behavior all
598 * over the place. We are trying to basically
599 * figure out three pieces of information.
600 *
601 * a) Clock Conversion Factor
602 *
603 * This is a representation of the input
604 * crystal clock frequency going into the
605 * ESP on this machine. Any operation whose
606 * timing is longer than 400ns depends on this
607 * value being correct. For example, you'll
608 * get blips for arbitration/selection during
609 * high load or with multiple targets if this
610 * is not set correctly.
611 *
612 * b) Selection Time-Out
613 *
614 * The ESP isn't very bright and will arbitrate
615 * for the bus and try to select a target
616 * forever if you let it. This value tells
617 * the ESP when it has taken too long to
618 * negotiate and that it should interrupt
619 * the CPU so we can see what happened.
620 * The value is computed as follows (from
621 * NCR/Symbios chip docs).
622 *
623 * (Time Out Period) * (Input Clock)
624 * STO = ----------------------------------
625 * (8192) * (Clock Conversion Factor)
626 *
627 * You usually want the time out period to be
628 * around 250ms, I think we'll set it a little
629 * bit higher to account for fully loaded SCSI
630 * bus's and slow devices that don't respond so
631 * quickly to selection attempts. (yeah, I know
632 * this is out of spec. but there is a lot of
633 * buggy pieces of firmware out there so bite me)
634 *
635 * c) Imperical constants for synchronous offset
636 * and transfer period register values
637 *
638 * This entails the smallest and largest sync
639 * period we could ever handle on this ESP.
640 */
641
642 fmhz = esp->cfreq;
643
644 if(fmhz <= (5000000))
645 ccf = 0;
646 else
647 ccf = (((5000000 - 1) + (fmhz))/(5000000));
648 if(!ccf || ccf > 8) {
649 /* If we can't find anything reasonable,
650 * just assume 20MHZ. This is the clock
651 * frequency of the older sun4c's where I've
652 * been unable to find the clock-frequency
653 * PROM property. All other machines provide
654 * useful values it seems.
655 */
656 ccf = ESP_CCF_F4;
657 fmhz = (20000000);
658 }
659 if(ccf==(ESP_CCF_F7+1))
660 esp->cfact = ESP_CCF_F0;
661 else if(ccf == ESP_CCF_NEVER)
662 esp->cfact = ESP_CCF_F2;
663 else
664 esp->cfact = ccf;
665 esp->cfreq = fmhz;
666 esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
667 esp->ctick = ESP_TICK(ccf, esp->ccycle);
668 esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
669 esp->sync_defp = SYNC_DEFP_SLOW;
670
671 printk("SCSI ID %d Clk %dMHz CCF=%d TOut %d ",
672 esp->scsi_id, (esp->cfreq / 1000000),
673 ccf, (int) esp->neg_defp);
674
675 /* Fill in ehost data */
676 esp->ehost->base = (unsigned long)eregs;
677 esp->ehost->this_id = esp->scsi_id;
678 esp->ehost->irq = esp->irq;
679
680 /* SCSI id mask */
681 esp->scsi_id_mask = (1 << esp->scsi_id);
682
683 /* Probe the revision of this esp */
684 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
685 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
686 esp_write(eregs->esp_cfg2, esp->config2);
687 if((esp_read(eregs->esp_cfg2) & ~(ESP_CONFIG2_MAGIC)) !=
688 (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
689 printk("NCR53C90(esp100)\n");
690 esp->erev = esp100;
691 } else {
692 esp->config2 = 0;
693 esp_write(eregs->esp_cfg2, 0);
694 esp_write(eregs->esp_cfg3, 5);
695 if(esp_read(eregs->esp_cfg3) != 5) {
696 printk("NCR53C90A(esp100a)\n");
697 esp->erev = esp100a;
698 } else {
699 int target;
700
701 for(target=0; target<8; target++)
702 esp->config3[target] = 0;
703 esp->prev_cfg3 = 0;
704 esp_write(eregs->esp_cfg3, 0);
705 if(ccf > ESP_CCF_F5) {
706 printk("NCR53C9XF(espfast)\n");
707 esp->erev = fast;
708 esp->sync_defp = SYNC_DEFP_FAST;
709 } else {
710 printk("NCR53C9x(esp236)\n");
711 esp->erev = esp236;
712 }
713 }
714 }
715
716 /* Initialize the command queues */
717 esp->current_SC = NULL;
718 esp->disconnected_SC = NULL;
719 esp->issue_SC = NULL;
720
721 /* Clear the state machines. */
722 esp->targets_present = 0;
723 esp->resetting_bus = 0;
724 esp->snip = 0;
725
726 init_waitqueue_head(&esp->reset_queue);
727
728 esp->fas_premature_intr_workaround = 0;
729 for(i = 0; i < 32; i++)
730 esp->espcmdlog[i] = 0;
731 esp->espcmdent = 0;
732 for(i = 0; i < 16; i++) {
733 esp->cur_msgout[i] = 0;
734 esp->cur_msgin[i] = 0;
735 }
736 esp->prevmsgout = esp->prevmsgin = 0;
737 esp->msgout_len = esp->msgin_len = 0;
738
739 /* Clear the one behind caches to hold unmatchable values. */
740 esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff;
741
742 /* Reset the thing before we try anything... */
743 esp_bootup_reset(esp, eregs);
744
745 esps_in_use++;
746}
747
748/* The info function will return whatever useful
749 * information the developer sees fit. If not provided, then
750 * the name field will be used instead.
751 */
752const char *esp_info(struct Scsi_Host *host)
753{
754 struct NCR_ESP *esp;
755
756 esp = (struct NCR_ESP *) host->hostdata;
757 switch(esp->erev) {
758 case esp100:
759 return "ESP100 (NCR53C90)";
760 case esp100a:
761 return "ESP100A (NCR53C90A)";
762 case esp236:
763 return "ESP236 (NCR53C9x)";
764 case fas216:
765 return "Emulex FAS216";
766 case fas236:
767 return "Emulex FAS236";
768 case fas366:
769 return "QLogic FAS366";
770 case fas100a:
771 return "FPESP100A";
772 case fsc:
773 return "Symbios Logic 53CF9x-2";
774 default:
775 panic("Bogon ESP revision");
776 };
777}
778EXPORT_SYMBOL(esp_info);
779
780/* From Wolfgang Stanglmeier's NCR scsi driver. */
781struct info_str
782{
783 char *buffer;
784 int length;
785 int offset;
786 int pos;
787};
788
789static void copy_mem_info(struct info_str *info, char *data, int len)
790{
791 if (info->pos + len > info->length)
792 len = info->length - info->pos;
793
794 if (info->pos + len < info->offset) {
795 info->pos += len;
796 return;
797 }
798 if (info->pos < info->offset) {
799 data += (info->offset - info->pos);
800 len -= (info->offset - info->pos);
801 }
802
803 if (len > 0) {
804 memcpy(info->buffer + info->pos, data, len);
805 info->pos += len;
806 }
807}
808
809static int copy_info(struct info_str *info, char *fmt, ...)
810{
811 va_list args;
812 char buf[81];
813 int len;
814
815 va_start(args, fmt);
816 len = vsprintf(buf, fmt, args);
817 va_end(args);
818
819 copy_mem_info(info, buf, len);
820 return len;
821}
822
823static int esp_host_info(struct NCR_ESP *esp, char *ptr, off_t offset, int len)
824{
825 struct scsi_device *sdev;
826 struct info_str info;
827 int i;
828
829 info.buffer = ptr;
830 info.length = len;
831 info.offset = offset;
832 info.pos = 0;
833
834 copy_info(&info, "ESP Host Adapter:\n");
835 copy_info(&info, "\tESP Model\t\t");
836 switch(esp->erev) {
837 case esp100:
838 copy_info(&info, "ESP100 (NCR53C90)\n");
839 break;
840 case esp100a:
841 copy_info(&info, "ESP100A (NCR53C90A)\n");
842 break;
843 case esp236:
844 copy_info(&info, "ESP236 (NCR53C9x)\n");
845 break;
846 case fas216:
847 copy_info(&info, "Emulex FAS216\n");
848 break;
849 case fas236:
850 copy_info(&info, "Emulex FAS236\n");
851 break;
852 case fas100a:
853 copy_info(&info, "FPESP100A\n");
854 break;
855 case fast:
856 copy_info(&info, "Generic FAST\n");
857 break;
858 case fas366:
859 copy_info(&info, "QLogic FAS366\n");
860 break;
861 case fsc:
862 copy_info(&info, "Symbios Logic 53C9x-2\n");
863 break;
864 case espunknown:
865 default:
866 copy_info(&info, "Unknown!\n");
867 break;
868 };
869 copy_info(&info, "\tLive Targets\t\t[ ");
870 for(i = 0; i < 15; i++) {
871 if(esp->targets_present & (1 << i))
872 copy_info(&info, "%d ", i);
873 }
874 copy_info(&info, "]\n\n");
875
876 /* Now describe the state of each existing target. */
877 copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\n");
878
879 shost_for_each_device(sdev, esp->ehost) {
880 struct esp_device *esp_dev = sdev->hostdata;
881 uint id = sdev->id;
882
883 if (!(esp->targets_present & (1 << id)))
884 continue;
885
886 copy_info(&info, "%d\t\t", id);
887 copy_info(&info, "%08lx\t", esp->config3[id]);
888 copy_info(&info, "[%02lx,%02lx]\t\t\t",
889 esp_dev->sync_max_offset,
890 esp_dev->sync_min_period);
891 copy_info(&info, "%s\n", esp_dev->disconnect ? "yes" : "no");
892 }
893
894 return info.pos > info.offset? info.pos - info.offset : 0;
895}
896
897/* ESP proc filesystem code. */
898int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length,
899 int inout)
900{
901 struct NCR_ESP *esp = (struct NCR_ESP *)shost->hostdata;
902
903 if(inout)
904 return -EINVAL; /* not yet */
905 if(start)
906 *start = buffer;
907 return esp_host_info(esp, buffer, offset, length);
908}
909EXPORT_SYMBOL(esp_proc_info);
910
911static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
912{
913 if(sp->use_sg == 0) {
914 sp->SCp.this_residual = sp->request_bufflen;
915 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
916 sp->SCp.buffers_residual = 0;
917 if (esp->dma_mmu_get_scsi_one)
918 esp->dma_mmu_get_scsi_one(esp, sp);
919 else
920 sp->SCp.ptr =
921 (char *) virt_to_phys(sp->request_buffer);
922 } else {
923 sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
924 sp->SCp.buffers_residual = sp->use_sg - 1;
925 sp->SCp.this_residual = sp->SCp.buffer->length;
926 if (esp->dma_mmu_get_scsi_sgl)
927 esp->dma_mmu_get_scsi_sgl(esp, sp);
928 else
929 sp->SCp.ptr =
930 (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
931 }
932}
933
934static void esp_release_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
935{
936 if(sp->use_sg == 0) {
937 if (esp->dma_mmu_release_scsi_one)
938 esp->dma_mmu_release_scsi_one(esp, sp);
939 } else {
940 if (esp->dma_mmu_release_scsi_sgl)
941 esp->dma_mmu_release_scsi_sgl(esp, sp);
942 }
943}
944
945static void esp_restore_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
946{
947 struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)];
948
949 sp->SCp.ptr = ep->saved_ptr;
950 sp->SCp.buffer = ep->saved_buffer;
951 sp->SCp.this_residual = ep->saved_this_residual;
952 sp->SCp.buffers_residual = ep->saved_buffers_residual;
953}
954
955static void esp_save_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
956{
957 struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)];
958
959 ep->saved_ptr = sp->SCp.ptr;
960 ep->saved_buffer = sp->SCp.buffer;
961 ep->saved_this_residual = sp->SCp.this_residual;
962 ep->saved_buffers_residual = sp->SCp.buffers_residual;
963}
964
965/* Some rules:
966 *
967 * 1) Never ever panic while something is live on the bus.
968 * If there is to be any chance of syncing the disks this
969 * rule is to be obeyed.
970 *
971 * 2) Any target that causes a foul condition will no longer
972 * have synchronous transfers done to it, no questions
973 * asked.
974 *
975 * 3) Keep register accesses to a minimum. Think about some
976 * day when we have Xbus machines this is running on and
977 * the ESP chip is on the other end of the machine on a
978 * different board from the cpu where this is running.
979 */
980
981/* Fire off a command. We assume the bus is free and that the only
982 * case where we could see an interrupt is where we have disconnected
983 * commands active and they are trying to reselect us.
984 */
985static inline void esp_check_cmd(struct NCR_ESP *esp, Scsi_Cmnd *sp)
986{
987 switch(sp->cmd_len) {
988 case 6:
989 case 10:
990 case 12:
991 esp->esp_slowcmd = 0;
992 break;
993
994 default:
995 esp->esp_slowcmd = 1;
996 esp->esp_scmdleft = sp->cmd_len;
997 esp->esp_scmdp = &sp->cmnd[0];
998 break;
999 };
1000}
1001
1002static inline void build_sync_nego_msg(struct NCR_ESP *esp, int period, int offset)
1003{
1004 esp->cur_msgout[0] = EXTENDED_MESSAGE;
1005 esp->cur_msgout[1] = 3;
1006 esp->cur_msgout[2] = EXTENDED_SDTR;
1007 esp->cur_msgout[3] = period;
1008 esp->cur_msgout[4] = offset;
1009 esp->msgout_len = 5;
1010}
1011
1012static void esp_exec_cmd(struct NCR_ESP *esp)
1013{
1014 struct ESP_regs *eregs = esp->eregs;
1015 struct esp_device *esp_dev;
1016 Scsi_Cmnd *SCptr;
1017 struct scsi_device *SDptr;
1018 volatile unchar *cmdp = esp->esp_command;
1019 unsigned char the_esp_command;
1020 int lun, target;
1021 int i;
1022
1023 /* Hold off if we have disconnected commands and
1024 * an IRQ is showing...
1025 */
1026 if(esp->disconnected_SC && esp->dma_irq_p(esp))
1027 return;
1028
1029 /* Grab first member of the issue queue. */
1030 SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC);
1031
1032 /* Safe to panic here because current_SC is null. */
1033 if(!SCptr)
1034 panic("esp: esp_exec_cmd and issue queue is NULL");
1035
1036 SDptr = SCptr->device;
1037 esp_dev = SDptr->hostdata;
1038 lun = SCptr->device->lun;
1039 target = SCptr->device->id;
1040
1041 esp->snip = 0;
1042 esp->msgout_len = 0;
1043
1044 /* Send it out whole, or piece by piece? The ESP
1045 * only knows how to automatically send out 6, 10,
1046 * and 12 byte commands. I used to think that the
1047 * Linux SCSI code would never throw anything other
1048 * than that to us, but then again there is the
1049 * SCSI generic driver which can send us anything.
1050 */
1051 esp_check_cmd(esp, SCptr);
1052
1053 /* If arbitration/selection is successful, the ESP will leave
1054 * ATN asserted, causing the target to go into message out
1055 * phase. The ESP will feed the target the identify and then
1056 * the target can only legally go to one of command,
1057 * datain/out, status, or message in phase, or stay in message
1058 * out phase (should we be trying to send a sync negotiation
1059 * message after the identify). It is not allowed to drop
1060 * BSY, but some buggy targets do and we check for this
1061 * condition in the selection complete code. Most of the time
1062 * we'll make the command bytes available to the ESP and it
1063 * will not interrupt us until it finishes command phase, we
1064 * cannot do this for command sizes the ESP does not
1065 * understand and in this case we'll get interrupted right
1066 * when the target goes into command phase.
1067 *
1068 * It is absolutely _illegal_ in the presence of SCSI-2 devices
1069 * to use the ESP select w/o ATN command. When SCSI-2 devices are
1070 * present on the bus we _must_ always go straight to message out
1071 * phase with an identify message for the target. Being that
1072 * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2
1073 * selections should not confuse SCSI-1 we hope.
1074 */
1075
1076 if(esp_dev->sync) {
1077 /* this targets sync is known */
1078#ifdef CONFIG_SCSI_MAC_ESP
1079do_sync_known:
1080#endif
1081 if(esp_dev->disconnect)
1082 *cmdp++ = IDENTIFY(1, lun);
1083 else
1084 *cmdp++ = IDENTIFY(0, lun);
1085
1086 if(esp->esp_slowcmd) {
1087 the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
1088 esp_advance_phase(SCptr, in_slct_stop);
1089 } else {
1090 the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
1091 esp_advance_phase(SCptr, in_slct_norm);
1092 }
1093 } else if(!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) {
1094 /* After the bootup SCSI code sends both the
1095 * TEST_UNIT_READY and INQUIRY commands we want
1096 * to at least attempt allowing the device to
1097 * disconnect.
1098 */
1099 ESPMISC(("esp: Selecting device for first time. target=%d "
1100 "lun=%d\n", target, SCptr->device->lun));
1101 if(!SDptr->borken && !esp_dev->disconnect)
1102 esp_dev->disconnect = 1;
1103
1104 *cmdp++ = IDENTIFY(0, lun);
1105 esp->prevmsgout = NOP;
1106 esp_advance_phase(SCptr, in_slct_norm);
1107 the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
1108
1109 /* Take no chances... */
1110 esp_dev->sync_max_offset = 0;
1111 esp_dev->sync_min_period = 0;
1112 } else {
1113 int toshiba_cdrom_hwbug_wkaround = 0;
1114
1115#ifdef CONFIG_SCSI_MAC_ESP
1116 /* Never allow synchronous transfers (disconnect OK) on
1117 * Macintosh. Well, maybe later when we figured out how to
1118 * do DMA on the machines that support it ...
1119 */
1120 esp_dev->disconnect = 1;
1121 esp_dev->sync_max_offset = 0;
1122 esp_dev->sync_min_period = 0;
1123 esp_dev->sync = 1;
1124 esp->snip = 0;
1125 goto do_sync_known;
1126#endif
1127 /* We've talked to this guy before,
1128 * but never negotiated. Let's try
1129 * sync negotiation.
1130 */
1131 if(!SDptr->borken) {
1132 if((SDptr->type == TYPE_ROM) &&
1133 (!strncmp(SDptr->vendor, "TOSHIBA", 7))) {
1134 /* Nice try sucker... */
1135 ESPMISC(("esp%d: Disabling sync for buggy "
1136 "Toshiba CDROM.\n", esp->esp_id));
1137 toshiba_cdrom_hwbug_wkaround = 1;
1138 build_sync_nego_msg(esp, 0, 0);
1139 } else {
1140 build_sync_nego_msg(esp, esp->sync_defp, 15);
1141 }
1142 } else {
1143 build_sync_nego_msg(esp, 0, 0);
1144 }
1145 esp_dev->sync = 1;
1146 esp->snip = 1;
1147
1148 /* A fix for broken SCSI1 targets, when they disconnect
1149 * they lock up the bus and confuse ESP. So disallow
1150 * disconnects for SCSI1 targets for now until we
1151 * find a better fix.
1152 *
1153 * Addendum: This is funny, I figured out what was going
1154 * on. The blotzed SCSI1 target would disconnect,
1155 * one of the other SCSI2 targets or both would be
1156 * disconnected as well. The SCSI1 target would
1157 * stay disconnected long enough that we start
1158 * up a command on one of the SCSI2 targets. As
1159 * the ESP is arbitrating for the bus the SCSI1
1160 * target begins to arbitrate as well to reselect
1161 * the ESP. The SCSI1 target refuses to drop it's
1162 * ID bit on the data bus even though the ESP is
1163 * at ID 7 and is the obvious winner for any
1164 * arbitration. The ESP is a poor sport and refuses
1165 * to lose arbitration, it will continue indefinitely
1166 * trying to arbitrate for the bus and can only be
1167 * stopped via a chip reset or SCSI bus reset.
1168 * Therefore _no_ disconnects for SCSI1 targets
1169 * thank you very much. ;-)
1170 */
1171 if(((SDptr->scsi_level < 3) && (SDptr->type != TYPE_TAPE)) ||
1172 toshiba_cdrom_hwbug_wkaround || SDptr->borken) {
1173 ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d "
1174 "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
1175 esp_dev->disconnect = 0;
1176 *cmdp++ = IDENTIFY(0, lun);
1177 } else {
1178 *cmdp++ = IDENTIFY(1, lun);
1179 }
1180
1181 /* ESP fifo is only so big...
1182 * Make this look like a slow command.
1183 */
1184 esp->esp_slowcmd = 1;
1185 esp->esp_scmdleft = SCptr->cmd_len;
1186 esp->esp_scmdp = &SCptr->cmnd[0];
1187
1188 the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
1189 esp_advance_phase(SCptr, in_slct_msg);
1190 }
1191
1192 if(!esp->esp_slowcmd)
1193 for(i = 0; i < SCptr->cmd_len; i++)
1194 *cmdp++ = SCptr->cmnd[i];
1195
1196 esp_write(eregs->esp_busid, (target & 7));
1197 if (esp->prev_soff != esp_dev->sync_max_offset ||
1198 esp->prev_stp != esp_dev->sync_min_period ||
1199 (esp->erev > esp100a &&
1200 esp->prev_cfg3 != esp->config3[target])) {
1201 esp->prev_soff = esp_dev->sync_max_offset;
1202 esp_write(eregs->esp_soff, esp->prev_soff);
1203 esp->prev_stp = esp_dev->sync_min_period;
1204 esp_write(eregs->esp_stp, esp->prev_stp);
1205 if(esp->erev > esp100a) {
1206 esp->prev_cfg3 = esp->config3[target];
1207 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
1208 }
1209 }
1210 i = (cmdp - esp->esp_command);
1211
1212 /* Set up the DMA and ESP counters */
1213 if(esp->do_pio_cmds){
1214 int j = 0;
1215
1216 /*
1217 * XXX MSch:
1218 *
1219 * It seems this is required, at least to clean up
1220 * after failed commands when using PIO mode ...
1221 */
1222 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
1223
1224 for(;j<i;j++)
1225 esp_write(eregs->esp_fdata, esp->esp_command[j]);
1226 the_esp_command &= ~ESP_CMD_DMA;
1227
1228 /* Tell ESP to "go". */
1229 esp_cmd(esp, eregs, the_esp_command);
1230 } else {
1231 /* Set up the ESP counters */
1232 esp_write(eregs->esp_tclow, i);
1233 esp_write(eregs->esp_tcmed, 0);
1234 esp->dma_init_write(esp, esp->esp_command_dvma, i);
1235
1236 /* Tell ESP to "go". */
1237 esp_cmd(esp, eregs, the_esp_command);
1238 }
1239}
1240
1241/* Queue a SCSI command delivered from the mid-level Linux SCSI code. */
1242int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
1243{
1244 struct NCR_ESP *esp;
1245
1246 /* Set up func ptr and initial driver cmd-phase. */
1247 SCpnt->scsi_done = done;
1248 SCpnt->SCp.phase = not_issued;
1249
1250 esp = (struct NCR_ESP *) SCpnt->device->host->hostdata;
1251
1252 if(esp->dma_led_on)
1253 esp->dma_led_on(esp);
1254
1255 /* We use the scratch area. */
1256 ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->lun));
1257 ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->lun));
1258
1259 esp_get_dmabufs(esp, SCpnt);
1260 esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */
1261
1262 SCpnt->SCp.Status = CHECK_CONDITION;
1263 SCpnt->SCp.Message = 0xff;
1264 SCpnt->SCp.sent_command = 0;
1265
1266 /* Place into our queue. */
1267 if(SCpnt->cmnd[0] == REQUEST_SENSE) {
1268 ESPQUEUE(("RQSENSE\n"));
1269 prepend_SC(&esp->issue_SC, SCpnt);
1270 } else {
1271 ESPQUEUE(("\n"));
1272 append_SC(&esp->issue_SC, SCpnt);
1273 }
1274
1275 /* Run it now if we can. */
1276 if(!esp->current_SC && !esp->resetting_bus)
1277 esp_exec_cmd(esp);
1278
1279 return 0;
1280}
1281
1282/* Dump driver state. */
1283static void esp_dump_cmd(Scsi_Cmnd *SCptr)
1284{
1285 ESPLOG(("[tgt<%02x> lun<%02x> "
1286 "pphase<%s> cphase<%s>]",
1287 SCptr->device->id, SCptr->device->lun,
1288 phase_string(SCptr->SCp.sent_command),
1289 phase_string(SCptr->SCp.phase)));
1290}
1291
1292static void esp_dump_state(struct NCR_ESP *esp,
1293 struct ESP_regs *eregs)
1294{
1295 Scsi_Cmnd *SCptr = esp->current_SC;
1296#ifdef DEBUG_ESP_CMDS
1297 int i;
1298#endif
1299
1300 ESPLOG(("esp%d: dumping state\n", esp->esp_id));
1301
1302 /* Print DMA status */
1303 esp->dma_dump_state(esp);
1304
1305 ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
1306 esp->esp_id, esp->sreg, esp->seqreg, esp->ireg));
1307 ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
1308 esp->esp_id, esp_read(eregs->esp_status), esp_read(eregs->esp_sstep),
1309 esp_read(eregs->esp_intrpt)));
1310#ifdef DEBUG_ESP_CMDS
1311 printk("esp%d: last ESP cmds [", esp->esp_id);
1312 i = (esp->espcmdent - 1) & 31;
1313 printk("<");
1314 esp_print_cmd(esp->espcmdlog[i]);
1315 printk(">");
1316 i = (i - 1) & 31;
1317 printk("<");
1318 esp_print_cmd(esp->espcmdlog[i]);
1319 printk(">");
1320 i = (i - 1) & 31;
1321 printk("<");
1322 esp_print_cmd(esp->espcmdlog[i]);
1323 printk(">");
1324 i = (i - 1) & 31;
1325 printk("<");
1326 esp_print_cmd(esp->espcmdlog[i]);
1327 printk(">");
1328 printk("]\n");
1329#endif /* (DEBUG_ESP_CMDS) */
1330
1331 if(SCptr) {
1332 ESPLOG(("esp%d: current command ", esp->esp_id));
1333 esp_dump_cmd(SCptr);
1334 }
1335 ESPLOG(("\n"));
1336 SCptr = esp->disconnected_SC;
1337 ESPLOG(("esp%d: disconnected ", esp->esp_id));
1338 while(SCptr) {
1339 esp_dump_cmd(SCptr);
1340 SCptr = (Scsi_Cmnd *) SCptr->host_scribble;
1341 }
1342 ESPLOG(("\n"));
1343}
1344
1345/* Abort a command. The host_lock is acquired by caller. */
1346int esp_abort(Scsi_Cmnd *SCptr)
1347{
1348 struct NCR_ESP *esp = (struct NCR_ESP *) SCptr->device->host->hostdata;
1349 struct ESP_regs *eregs = esp->eregs;
1350 int don;
1351
1352 ESPLOG(("esp%d: Aborting command\n", esp->esp_id));
1353 esp_dump_state(esp, eregs);
1354
1355 /* Wheee, if this is the current command on the bus, the
1356 * best we can do is assert ATN and wait for msgout phase.
1357 * This should even fix a hung SCSI bus when we lose state
1358 * in the driver and timeout because the eventual phase change
1359 * will cause the ESP to (eventually) give an interrupt.
1360 */
1361 if(esp->current_SC == SCptr) {
1362 esp->cur_msgout[0] = ABORT;
1363 esp->msgout_len = 1;
1364 esp->msgout_ctr = 0;
1365 esp_cmd(esp, eregs, ESP_CMD_SATN);
1366 return SUCCESS;
1367 }
1368
1369 /* If it is still in the issue queue then we can safely
1370 * call the completion routine and report abort success.
1371 */
1372 don = esp->dma_ports_p(esp);
1373 if(don) {
1374 esp->dma_ints_off(esp);
1375 synchronize_irq(esp->irq);
1376 }
1377 if(esp->issue_SC) {
1378 Scsi_Cmnd **prev, *this;
1379 for(prev = (&esp->issue_SC), this = esp->issue_SC;
1380 this;
1381 prev = (Scsi_Cmnd **) &(this->host_scribble),
1382 this = (Scsi_Cmnd *) this->host_scribble) {
1383 if(this == SCptr) {
1384 *prev = (Scsi_Cmnd *) this->host_scribble;
1385 this->host_scribble = NULL;
1386 esp_release_dmabufs(esp, this);
1387 this->result = DID_ABORT << 16;
1388 this->scsi_done(this);
1389 if(don)
1390 esp->dma_ints_on(esp);
1391 return SUCCESS;
1392 }
1393 }
1394 }
1395
1396 /* Yuck, the command to abort is disconnected, it is not
1397 * worth trying to abort it now if something else is live
1398 * on the bus at this time. So, we let the SCSI code wait
1399 * a little bit and try again later.
1400 */
1401 if(esp->current_SC) {
1402 if(don)
1403 esp->dma_ints_on(esp);
1404 return FAILED;
1405 }
1406
1407 /* It's disconnected, we have to reconnect to re-establish
1408 * the nexus and tell the device to abort. However, we really
1409 * cannot 'reconnect' per se. Don't try to be fancy, just
1410 * indicate failure, which causes our caller to reset the whole
1411 * bus.
1412 */
1413
1414 if(don)
1415 esp->dma_ints_on(esp);
1416 return FAILED;
1417}
1418
1419/* We've sent ESP_CMD_RS to the ESP, the interrupt had just
1420 * arrived indicating the end of the SCSI bus reset. Our job
1421 * is to clean out the command queues and begin re-execution
1422 * of SCSI commands once more.
1423 */
1424static int esp_finish_reset(struct NCR_ESP *esp,
1425 struct ESP_regs *eregs)
1426{
1427 Scsi_Cmnd *sp = esp->current_SC;
1428
1429 /* Clean up currently executing command, if any. */
1430 if (sp != NULL) {
1431 esp_release_dmabufs(esp, sp);
1432 sp->result = (DID_RESET << 16);
1433 sp->scsi_done(sp);
1434 esp->current_SC = NULL;
1435 }
1436
1437 /* Clean up disconnected queue, they have been invalidated
1438 * by the bus reset.
1439 */
1440 if (esp->disconnected_SC) {
1441 while((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) {
1442 esp_release_dmabufs(esp, sp);
1443 sp->result = (DID_RESET << 16);
1444 sp->scsi_done(sp);
1445 }
1446 }
1447
1448 /* SCSI bus reset is complete. */
1449 esp->resetting_bus = 0;
1450 wake_up(&esp->reset_queue);
1451
1452 /* Ok, now it is safe to get commands going once more. */
1453 if(esp->issue_SC)
1454 esp_exec_cmd(esp);
1455
1456 return do_intr_end;
1457}
1458
1459static int esp_do_resetbus(struct NCR_ESP *esp,
1460 struct ESP_regs *eregs)
1461{
1462 ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id));
1463 esp->resetting_bus = 1;
1464 esp_cmd(esp, eregs, ESP_CMD_RS);
1465
1466 return do_intr_end;
1467}
1468
1469/* Reset ESP chip, reset hanging bus, then kill active and
1470 * disconnected commands for targets without soft reset.
1471 *
1472 * The host_lock is acquired by caller.
1473 */
1474int esp_reset(Scsi_Cmnd *SCptr)
1475{
1476 struct NCR_ESP *esp = (struct NCR_ESP *) SCptr->device->host->hostdata;
1477
1478 spin_lock_irq(esp->ehost->host_lock);
1479 (void) esp_do_resetbus(esp, esp->eregs);
1480 spin_unlock_irq(esp->ehost->host_lock);
1481
1482 wait_event(esp->reset_queue, (esp->resetting_bus == 0));
1483
1484 return SUCCESS;
1485}
1486
1487/* Internal ESP done function. */
1488static void esp_done(struct NCR_ESP *esp, int error)
1489{
1490 Scsi_Cmnd *done_SC;
1491
1492 if(esp->current_SC) {
1493 done_SC = esp->current_SC;
1494 esp->current_SC = NULL;
1495 esp_release_dmabufs(esp, done_SC);
1496 done_SC->result = error;
1497 done_SC->scsi_done(done_SC);
1498
1499 /* Bus is free, issue any commands in the queue. */
1500 if(esp->issue_SC && !esp->current_SC)
1501 esp_exec_cmd(esp);
1502 } else {
1503 /* Panic is safe as current_SC is null so we may still
1504 * be able to accept more commands to sync disk buffers.
1505 */
1506 ESPLOG(("panicing\n"));
1507 panic("esp: done() called with NULL esp->current_SC");
1508 }
1509}
1510
1511/* Wheee, ESP interrupt engine. */
1512
1513/* Forward declarations. */
1514static int esp_do_phase_determine(struct NCR_ESP *esp,
1515 struct ESP_regs *eregs);
1516static int esp_do_data_finale(struct NCR_ESP *esp, struct ESP_regs *eregs);
1517static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs);
1518static int esp_do_status(struct NCR_ESP *esp, struct ESP_regs *eregs);
1519static int esp_do_msgin(struct NCR_ESP *esp, struct ESP_regs *eregs);
1520static int esp_do_msgindone(struct NCR_ESP *esp, struct ESP_regs *eregs);
1521static int esp_do_msgout(struct NCR_ESP *esp, struct ESP_regs *eregs);
1522static int esp_do_cmdbegin(struct NCR_ESP *esp, struct ESP_regs *eregs);
1523
1524#define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP)
1525#define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP)
1526
1527/* We try to avoid some interrupts by jumping ahead and see if the ESP
1528 * has gotten far enough yet. Hence the following.
1529 */
1530static inline int skipahead1(struct NCR_ESP *esp, struct ESP_regs *eregs,
1531 Scsi_Cmnd *scp, int prev_phase, int new_phase)
1532{
1533 if(scp->SCp.sent_command != prev_phase)
1534 return 0;
1535
1536 if(esp->dma_irq_p(esp)) {
1537 /* Yes, we are able to save an interrupt. */
1538 esp->sreg = (esp_read(eregs->esp_status) & ~(ESP_STAT_INTR));
1539 esp->ireg = esp_read(eregs->esp_intrpt);
1540 if(!(esp->ireg & ESP_INTR_SR))
1541 return 0;
1542 else
1543 return do_reset_complete;
1544 }
1545 /* Ho hum, target is taking forever... */
1546 scp->SCp.sent_command = new_phase; /* so we don't recurse... */
1547 return do_intr_end;
1548}
1549
1550static inline int skipahead2(struct NCR_ESP *esp,
1551 struct ESP_regs *eregs,
1552 Scsi_Cmnd *scp, int prev_phase1, int prev_phase2,
1553 int new_phase)
1554{
1555 if(scp->SCp.sent_command != prev_phase1 &&
1556 scp->SCp.sent_command != prev_phase2)
1557 return 0;
1558 if(esp->dma_irq_p(esp)) {
1559 /* Yes, we are able to save an interrupt. */
1560 esp->sreg = (esp_read(eregs->esp_status) & ~(ESP_STAT_INTR));
1561 esp->ireg = esp_read(eregs->esp_intrpt);
1562 if(!(esp->ireg & ESP_INTR_SR))
1563 return 0;
1564 else
1565 return do_reset_complete;
1566 }
1567 /* Ho hum, target is taking forever... */
1568 scp->SCp.sent_command = new_phase; /* so we don't recurse... */
1569 return do_intr_end;
1570}
1571
1572/* Misc. esp helper macros. */
1573#define esp_setcount(__eregs, __cnt) \
1574 esp_write((__eregs)->esp_tclow, ((__cnt) & 0xff)); \
1575 esp_write((__eregs)->esp_tcmed, (((__cnt) >> 8) & 0xff))
1576
1577#define esp_getcount(__eregs) \
1578 ((esp_read((__eregs)->esp_tclow)&0xff) | \
1579 ((esp_read((__eregs)->esp_tcmed)&0xff) << 8))
1580
1581#define fcount(__esp, __eregs) \
1582 (esp_read((__eregs)->esp_fflags) & ESP_FF_FBYTES)
1583
1584#define fnzero(__esp, __eregs) \
1585 (esp_read((__eregs)->esp_fflags) & ESP_FF_ONOTZERO)
1586
1587/* XXX speculative nops unnecessary when continuing amidst a data phase
1588 * XXX even on esp100!!! another case of flooding the bus with I/O reg
1589 * XXX writes...
1590 */
1591#define esp_maybe_nop(__esp, __eregs) \
1592 if((__esp)->erev == esp100) \
1593 esp_cmd((__esp), (__eregs), ESP_CMD_NULL)
1594
1595#define sreg_to_dataphase(__sreg) \
1596 ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain)
1597
1598/* The ESP100 when in synchronous data phase, can mistake a long final
1599 * REQ pulse from the target as an extra byte, it places whatever is on
1600 * the data lines into the fifo. For now, we will assume when this
1601 * happens that the target is a bit quirky and we don't want to
1602 * be talking synchronously to it anyways. Regardless, we need to
1603 * tell the ESP to eat the extraneous byte so that we can proceed
1604 * to the next phase.
1605 */
1606static inline int esp100_sync_hwbug(struct NCR_ESP *esp, struct ESP_regs *eregs,
1607 Scsi_Cmnd *sp, int fifocnt)
1608{
1609 /* Do not touch this piece of code. */
1610 if((!(esp->erev == esp100)) ||
1611 (!(sreg_datainp((esp->sreg = esp_read(eregs->esp_status))) && !fifocnt) &&
1612 !(sreg_dataoutp(esp->sreg) && !fnzero(esp, eregs)))) {
1613 if(sp->SCp.phase == in_dataout)
1614 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
1615 return 0;
1616 } else {
1617 /* Async mode for this guy. */
1618 build_sync_nego_msg(esp, 0, 0);
1619
1620 /* Ack the bogus byte, but set ATN first. */
1621 esp_cmd(esp, eregs, ESP_CMD_SATN);
1622 esp_cmd(esp, eregs, ESP_CMD_MOK);
1623 return 1;
1624 }
1625}
1626
1627/* This closes the window during a selection with a reselect pending, because
1628 * we use DMA for the selection process the FIFO should hold the correct
1629 * contents if we get reselected during this process. So we just need to
1630 * ack the possible illegal cmd interrupt pending on the esp100.
1631 */
1632static inline int esp100_reconnect_hwbug(struct NCR_ESP *esp,
1633 struct ESP_regs *eregs)
1634{
1635 volatile unchar junk;
1636
1637 if(esp->erev != esp100)
1638 return 0;
1639 junk = esp_read(eregs->esp_intrpt);
1640
1641 if(junk & ESP_INTR_SR)
1642 return 1;
1643 return 0;
1644}
1645
1646/* This verifies the BUSID bits during a reselection so that we know which
1647 * target is talking to us.
1648 */
1649static inline int reconnect_target(struct NCR_ESP *esp, struct ESP_regs *eregs)
1650{
1651 int it, me = esp->scsi_id_mask, targ = 0;
1652
1653 if(2 != fcount(esp, eregs))
1654 return -1;
1655 it = esp_read(eregs->esp_fdata);
1656 if(!(it & me))
1657 return -1;
1658 it &= ~me;
1659 if(it & (it - 1))
1660 return -1;
1661 while(!(it & 1))
1662 targ++, it >>= 1;
1663 return targ;
1664}
1665
1666/* This verifies the identify from the target so that we know which lun is
1667 * being reconnected.
1668 */
1669static inline int reconnect_lun(struct NCR_ESP *esp, struct ESP_regs *eregs)
1670{
1671 int lun;
1672
1673 if((esp->sreg & ESP_STAT_PMASK) != ESP_MIP)
1674 return -1;
1675 lun = esp_read(eregs->esp_fdata);
1676
1677 /* Yes, you read this correctly. We report lun of zero
1678 * if we see parity error. ESP reports parity error for
1679 * the lun byte, and this is the only way to hope to recover
1680 * because the target is connected.
1681 */
1682 if(esp->sreg & ESP_STAT_PERR)
1683 return 0;
1684
1685 /* Check for illegal bits being set in the lun. */
1686 if((lun & 0x40) || !(lun & 0x80))
1687 return -1;
1688
1689 return lun & 7;
1690}
1691
1692/* This puts the driver in a state where it can revitalize a command that
1693 * is being continued due to reselection.
1694 */
1695static inline void esp_connect(struct NCR_ESP *esp, struct ESP_regs *eregs,
1696 Scsi_Cmnd *sp)
1697{
1698 struct scsi_device *dp = sp->device;
1699 struct esp_device *esp_dev = dp->hostdata;
1700
1701 if(esp->prev_soff != esp_dev->sync_max_offset ||
1702 esp->prev_stp != esp_dev->sync_min_period ||
1703 (esp->erev > esp100a &&
1704 esp->prev_cfg3 != esp->config3[scmd_id(sp)])) {
1705 esp->prev_soff = esp_dev->sync_max_offset;
1706 esp_write(eregs->esp_soff, esp->prev_soff);
1707 esp->prev_stp = esp_dev->sync_min_period;
1708 esp_write(eregs->esp_stp, esp->prev_stp);
1709 if(esp->erev > esp100a) {
1710 esp->prev_cfg3 = esp->config3[scmd_id(sp)];
1711 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
1712 }
1713 }
1714 esp->current_SC = sp;
1715}
1716
1717/* This will place the current working command back into the issue queue
1718 * if we are to receive a reselection amidst a selection attempt.
1719 */
1720static inline void esp_reconnect(struct NCR_ESP *esp, Scsi_Cmnd *sp)
1721{
1722 if(!esp->disconnected_SC)
1723 ESPLOG(("esp%d: Weird, being reselected but disconnected "
1724 "command queue is empty.\n", esp->esp_id));
1725 esp->snip = 0;
1726 esp->current_SC = NULL;
1727 sp->SCp.phase = not_issued;
1728 append_SC(&esp->issue_SC, sp);
1729}
1730
1731/* Begin message in phase. */
1732static int esp_do_msgin(struct NCR_ESP *esp, struct ESP_regs *eregs)
1733{
1734 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
1735 esp_maybe_nop(esp, eregs);
1736 esp_cmd(esp, eregs, ESP_CMD_TI);
1737 esp->msgin_len = 1;
1738 esp->msgin_ctr = 0;
1739 esp_advance_phase(esp->current_SC, in_msgindone);
1740 return do_work_bus;
1741}
1742
1743static inline void advance_sg(struct NCR_ESP *esp, Scsi_Cmnd *sp)
1744{
1745 ++sp->SCp.buffer;
1746 --sp->SCp.buffers_residual;
1747 sp->SCp.this_residual = sp->SCp.buffer->length;
1748 if (esp->dma_advance_sg)
1749 esp->dma_advance_sg (sp);
1750 else
1751 sp->SCp.ptr = (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
1752
1753}
1754
1755/* Please note that the way I've coded these routines is that I _always_
1756 * check for a disconnect during any and all information transfer
1757 * phases. The SCSI standard states that the target _can_ cause a BUS
1758 * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note
1759 * that during information transfer phases the target controls every
1760 * change in phase, the only thing the initiator can do is "ask" for
1761 * a message out phase by driving ATN true. The target can, and sometimes
1762 * will, completely ignore this request so we cannot assume anything when
1763 * we try to force a message out phase to abort/reset a target. Most of
1764 * the time the target will eventually be nice and go to message out, so
1765 * we may have to hold on to our state about what we want to tell the target
1766 * for some period of time.
1767 */
1768
1769/* I think I have things working here correctly. Even partial transfers
1770 * within a buffer or sub-buffer should not upset us at all no matter
1771 * how bad the target and/or ESP fucks things up.
1772 */
1773static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
1774{
1775 Scsi_Cmnd *SCptr = esp->current_SC;
1776 int thisphase, hmuch;
1777
1778 ESPDATA(("esp_do_data: "));
1779 esp_maybe_nop(esp, eregs);
1780 thisphase = sreg_to_dataphase(esp->sreg);
1781 esp_advance_phase(SCptr, thisphase);
1782 ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT"));
1783 hmuch = esp->dma_can_transfer(esp, SCptr);
1784
1785 /*
1786 * XXX MSch: cater for PIO transfer here; PIO used if hmuch == 0
1787 */
1788 if (hmuch) { /* DMA */
1789 /*
1790 * DMA
1791 */
1792 ESPDATA(("hmuch<%d> ", hmuch));
1793 esp->current_transfer_size = hmuch;
1794 esp_setcount(eregs, (esp->fas_premature_intr_workaround ?
1795 (hmuch + 0x40) : hmuch));
1796 esp->dma_setup(esp, (__u32)((unsigned long)SCptr->SCp.ptr),
1797 hmuch, (thisphase == in_datain));
1798 ESPDATA(("DMA|TI --> do_intr_end\n"));
1799 esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
1800 return do_intr_end;
1801 /*
1802 * end DMA
1803 */
1804 } else {
1805 /*
1806 * PIO
1807 */
1808 int oldphase, i = 0; /* or where we left off last time ?? esp->current_data ?? */
1809 int fifocnt = 0;
1810 unsigned char *p = phys_to_virt((unsigned long)SCptr->SCp.ptr);
1811
1812 oldphase = esp_read(eregs->esp_status) & ESP_STAT_PMASK;
1813
1814 /*
1815 * polled transfer; ugly, can we make this happen in a DRQ
1816 * interrupt handler ??
1817 * requires keeping track of state information in host or
1818 * command struct!
1819 * Problem: I've never seen a DRQ happen on Mac, not even
1820 * with ESP_CMD_DMA ...
1821 */
1822
1823 /* figure out how much needs to be transferred */
1824 hmuch = SCptr->SCp.this_residual;
1825 ESPDATA(("hmuch<%d> pio ", hmuch));
1826 esp->current_transfer_size = hmuch;
1827
1828 /* tell the ESP ... */
1829 esp_setcount(eregs, hmuch);
1830
1831 /* loop */
1832 while (hmuch) {
1833 int j, fifo_stuck = 0, newphase;
1834 unsigned long timeout;
1835#if 0
1836 unsigned long flags;
1837#endif
1838#if 0
1839 if ( i % 10 )
1840 ESPDATA(("\r"));
1841 else
1842 ESPDATA(( /*"\n"*/ "\r"));
1843#endif
1844#if 0
1845 local_irq_save(flags);
1846#endif
1847 if(thisphase == in_datain) {
1848 /* 'go' ... */
1849 esp_cmd(esp, eregs, ESP_CMD_TI);
1850
1851 /* wait for data */
1852 timeout = 1000000;
1853 while (!((esp->sreg=esp_read(eregs->esp_status)) & ESP_STAT_INTR) && --timeout)
1854 udelay(2);
1855 if (timeout == 0)
1856 printk("DRQ datain timeout! \n");
1857
1858 newphase = esp->sreg & ESP_STAT_PMASK;
1859
1860 /* see how much we got ... */
1861 fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
1862
1863 if (!fifocnt)
1864 fifo_stuck++;
1865 else
1866 fifo_stuck = 0;
1867
1868 ESPDATA(("\rgot %d st %x ph %x", fifocnt, esp->sreg, newphase));
1869
1870 /* read fifo */
1871 for(j=0;j<fifocnt;j++)
1872 p[i++] = esp_read(eregs->esp_fdata);
1873
1874 ESPDATA(("(%d) ", i));
1875
1876 /* how many to go ?? */
1877 hmuch -= fifocnt;
1878
1879 /* break if status phase !! */
1880 if(newphase == ESP_STATP) {
1881 /* clear int. */
1882 esp->ireg = esp_read(eregs->esp_intrpt);
1883 break;
1884 }
1885 } else {
1886#define MAX_FIFO 8
1887 /* how much will fit ? */
1888 int this_count = MAX_FIFO - fifocnt;
1889 if (this_count > hmuch)
1890 this_count = hmuch;
1891
1892 /* fill fifo */
1893 for(j=0;j<this_count;j++)
1894 esp_write(eregs->esp_fdata, p[i++]);
1895
1896 /* how many left if this goes out ?? */
1897 hmuch -= this_count;
1898
1899 /* 'go' ... */
1900 esp_cmd(esp, eregs, ESP_CMD_TI);
1901
1902 /* wait for 'got it' */
1903 timeout = 1000000;
1904 while (!((esp->sreg=esp_read(eregs->esp_status)) & ESP_STAT_INTR) && --timeout)
1905 udelay(2);
1906 if (timeout == 0)
1907 printk("DRQ dataout timeout! \n");
1908
1909 newphase = esp->sreg & ESP_STAT_PMASK;
1910
1911 /* need to check how much was sent ?? */
1912 fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
1913
1914 ESPDATA(("\rsent %d st %x ph %x", this_count - fifocnt, esp->sreg, newphase));
1915
1916 ESPDATA(("(%d) ", i));
1917
1918 /* break if status phase !! */
1919 if(newphase == ESP_STATP) {
1920 /* clear int. */
1921 esp->ireg = esp_read(eregs->esp_intrpt);
1922 break;
1923 }
1924
1925 }
1926
1927 /* clear int. */
1928 esp->ireg = esp_read(eregs->esp_intrpt);
1929
1930 ESPDATA(("ir %x ... ", esp->ireg));
1931
1932 if (hmuch == 0)
1933 ESPDATA(("done! \n"));
1934
1935#if 0
1936 local_irq_restore(flags);
1937#endif
1938
1939 /* check new bus phase */
1940 if (newphase != oldphase && i < esp->current_transfer_size) {
1941 /* something happened; disconnect ?? */
1942 ESPDATA(("phase change, dropped out with %d done ... ", i));
1943 break;
1944 }
1945
1946 /* check int. status */
1947 if (esp->ireg & ESP_INTR_DC) {
1948 /* disconnect */
1949 ESPDATA(("disconnect; %d transferred ... ", i));
1950 break;
1951 } else if (esp->ireg & ESP_INTR_FDONE) {
1952 /* function done */
1953 ESPDATA(("function done; %d transferred ... ", i));
1954 break;
1955 }
1956
1957 /* XXX fixme: bail out on stall */
1958 if (fifo_stuck > 10) {
1959 /* we're stuck */
1960 ESPDATA(("fifo stall; %d transferred ... ", i));
1961 break;
1962 }
1963 }
1964
1965 ESPDATA(("\n"));
1966 /* check successful completion ?? */
1967
1968 if (thisphase == in_dataout)
1969 hmuch += fifocnt; /* stuck?? adjust data pointer ...*/
1970
1971 /* tell do_data_finale how much was transferred */
1972 esp->current_transfer_size -= hmuch;
1973
1974 /* still not completely sure on this one ... */
1975 return /*do_intr_end*/ do_work_bus /*do_phase_determine*/ ;
1976
1977 /*
1978 * end PIO
1979 */
1980 }
1981 return do_intr_end;
1982}
1983
1984/* See how successful the data transfer was. */
1985static int esp_do_data_finale(struct NCR_ESP *esp,
1986 struct ESP_regs *eregs)
1987{
1988 Scsi_Cmnd *SCptr = esp->current_SC;
1989 struct esp_device *esp_dev = SCptr->device->hostdata;
1990 int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0;
1991
1992 if(esp->dma_led_off)
1993 esp->dma_led_off(esp);
1994
1995 ESPDATA(("esp_do_data_finale: "));
1996
1997 if(SCptr->SCp.phase == in_datain) {
1998 if(esp->sreg & ESP_STAT_PERR) {
1999 /* Yuck, parity error. The ESP asserts ATN
2000 * so that we can go to message out phase
2001 * immediately and inform the target that
2002 * something bad happened.
2003 */
2004 ESPLOG(("esp%d: data bad parity detected.\n",
2005 esp->esp_id));
2006 esp->cur_msgout[0] = INITIATOR_ERROR;
2007 esp->msgout_len = 1;
2008 }
2009 if(esp->dma_drain)
2010 esp->dma_drain(esp);
2011 }
2012 if(esp->dma_invalidate)
2013 esp->dma_invalidate(esp);
2014
2015 /* This could happen for the above parity error case. */
2016 if(!(esp->ireg == ESP_INTR_BSERV)) {
2017 /* Please go to msgout phase, please please please... */
2018 ESPLOG(("esp%d: !BSERV after data, probably to msgout\n",
2019 esp->esp_id));
2020 return esp_do_phase_determine(esp, eregs);
2021 }
2022
2023 /* Check for partial transfers and other horrible events. */
2024 fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
2025 ecount = esp_getcount(eregs);
2026 if(esp->fas_premature_intr_workaround)
2027 ecount -= 0x40;
2028 bytes_sent = esp->current_transfer_size;
2029
2030 ESPDATA(("trans_sz=%d, ", bytes_sent));
2031 if(!(esp->sreg & ESP_STAT_TCNT))
2032 bytes_sent -= ecount;
2033 if(SCptr->SCp.phase == in_dataout)
2034 bytes_sent -= fifocnt;
2035
2036 ESPDATA(("bytes_sent=%d (ecount=%d, fifocnt=%d), ", bytes_sent,
2037 ecount, fifocnt));
2038
2039 /* If we were in synchronous mode, check for peculiarities. */
2040 if(esp_dev->sync_max_offset)
2041 bogus_data = esp100_sync_hwbug(esp, eregs, SCptr, fifocnt);
2042 else
2043 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
2044
2045 /* Until we are sure of what has happened, we are certainly
2046 * in the dark.
2047 */
2048 esp_advance_phase(SCptr, in_the_dark);
2049
2050 /* Check for premature interrupt condition. Can happen on FAS2x6
2051 * chips. QLogic recommends a workaround by overprogramming the
2052 * transfer counters, but this makes doing scatter-gather impossible.
2053 * Until there is a way to disable scatter-gather for a single target,
2054 * and not only for the entire host adapter as it is now, the workaround
2055 * is way to expensive performance wise.
2056 * Instead, it turns out that when this happens the target has disconnected
2057 * already but it doesn't show in the interrupt register. Compensate for
2058 * that here to try and avoid a SCSI bus reset.
2059 */
2060 if(!esp->fas_premature_intr_workaround && (fifocnt == 1) &&
2061 sreg_dataoutp(esp->sreg)) {
2062 ESPLOG(("esp%d: Premature interrupt, enabling workaround\n",
2063 esp->esp_id));
2064#if 0
2065 /* Disable scatter-gather operations, they are not possible
2066 * when using this workaround.
2067 */
2068 esp->ehost->sg_tablesize = 0;
2069 esp->ehost->use_clustering = ENABLE_CLUSTERING;
2070 esp->fas_premature_intr_workaround = 1;
2071 bytes_sent = 0;
2072 if(SCptr->use_sg) {
2073 ESPLOG(("esp%d: Aborting scatter-gather operation\n",
2074 esp->esp_id));
2075 esp->cur_msgout[0] = ABORT;
2076 esp->msgout_len = 1;
2077 esp->msgout_ctr = 0;
2078 esp_cmd(esp, eregs, ESP_CMD_SATN);
2079 esp_setcount(eregs, 0xffff);
2080 esp_cmd(esp, eregs, ESP_CMD_NULL);
2081 esp_cmd(esp, eregs, ESP_CMD_TPAD | ESP_CMD_DMA);
2082 return do_intr_end;
2083 }
2084#else
2085 /* Just set the disconnected bit. That's what appears to
2086 * happen anyway. The state machine will pick it up when
2087 * we return.
2088 */
2089 esp->ireg |= ESP_INTR_DC;
2090#endif
2091 }
2092
2093 if(bytes_sent < 0) {
2094 /* I've seen this happen due to lost state in this
2095 * driver. No idea why it happened, but allowing
2096 * this value to be negative caused things to
2097 * lock up. This allows greater chance of recovery.
2098 * In fact every time I've seen this, it has been
2099 * a driver bug without question.
2100 */
2101 ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id));
2102 ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n",
2103 esp->esp_id,
2104 esp->current_transfer_size, fifocnt, ecount));
2105 ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n",
2106 esp->esp_id,
2107 SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual));
2108 ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id,
2109 SCptr->device->id));
2110 SCptr->device->borken = 1;
2111 esp_dev->sync = 0;
2112 bytes_sent = 0;
2113 }
2114
2115 /* Update the state of our transfer. */
2116 SCptr->SCp.ptr += bytes_sent;
2117 SCptr->SCp.this_residual -= bytes_sent;
2118 if(SCptr->SCp.this_residual < 0) {
2119 /* shit */
2120 ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id));
2121 SCptr->SCp.this_residual = 0;
2122 }
2123
2124 /* Maybe continue. */
2125 if(!bogus_data) {
2126 ESPDATA(("!bogus_data, "));
2127 /* NO MATTER WHAT, we advance the scatterlist,
2128 * if the target should decide to disconnect
2129 * in between scatter chunks (which is common)
2130 * we could die horribly! I used to have the sg
2131 * advance occur only if we are going back into
2132 * (or are staying in) a data phase, you can
2133 * imagine the hell I went through trying to
2134 * figure this out.
2135 */
2136 if(!SCptr->SCp.this_residual && SCptr->SCp.buffers_residual)
2137 advance_sg(esp, SCptr);
2138#ifdef DEBUG_ESP_DATA
2139 if(sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) {
2140 ESPDATA(("to more data\n"));
2141 } else {
2142 ESPDATA(("to new phase\n"));
2143 }
2144#endif
2145 return esp_do_phase_determine(esp, eregs);
2146 }
2147 /* Bogus data, just wait for next interrupt. */
2148 ESPLOG(("esp%d: bogus_data during end of data phase\n",
2149 esp->esp_id));
2150 return do_intr_end;
2151}
2152
2153/* We received a non-good status return at the end of
2154 * running a SCSI command. This is used to decide if
2155 * we should clear our synchronous transfer state for
2156 * such a device when that happens.
2157 *
2158 * The idea is that when spinning up a disk or rewinding
2159 * a tape, we don't want to go into a loop re-negotiating
2160 * synchronous capabilities over and over.
2161 */
2162static int esp_should_clear_sync(Scsi_Cmnd *sp)
2163{
2164 unchar cmd = sp->cmnd[0];
2165
2166 /* These cases are for spinning up a disk and
2167 * waiting for that spinup to complete.
2168 */
2169 if(cmd == START_STOP)
2170 return 0;
2171
2172 if(cmd == TEST_UNIT_READY)
2173 return 0;
2174
2175 /* One more special case for SCSI tape drives,
2176 * this is what is used to probe the device for
2177 * completion of a rewind or tape load operation.
2178 */
2179 if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
2180 return 0;
2181
2182 return 1;
2183}
2184
2185/* Either a command is completing or a target is dropping off the bus
2186 * to continue the command in the background so we can do other work.
2187 */
2188static int esp_do_freebus(struct NCR_ESP *esp, struct ESP_regs *eregs)
2189{
2190 Scsi_Cmnd *SCptr = esp->current_SC;
2191 int rval;
2192
2193 rval = skipahead2(esp, eregs, SCptr, in_status, in_msgindone, in_freeing);
2194 if(rval)
2195 return rval;
2196
2197 if(esp->ireg != ESP_INTR_DC) {
2198 ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id));
2199 return do_reset_bus; /* target will not drop BSY... */
2200 }
2201 esp->msgout_len = 0;
2202 esp->prevmsgout = NOP;
2203 if(esp->prevmsgin == COMMAND_COMPLETE) {
2204 struct esp_device *esp_dev = SCptr->device->hostdata;
2205 /* Normal end of nexus. */
2206 if(esp->disconnected_SC)
2207 esp_cmd(esp, eregs, ESP_CMD_ESEL);
2208
2209 if(SCptr->SCp.Status != GOOD &&
2210 SCptr->SCp.Status != CONDITION_GOOD &&
2211 ((1<<scmd_id(SCptr)) & esp->targets_present) &&
2212 esp_dev->sync && esp_dev->sync_max_offset) {
2213 /* SCSI standard says that the synchronous capabilities
2214 * should be renegotiated at this point. Most likely
2215 * we are about to request sense from this target
2216 * in which case we want to avoid using sync
2217 * transfers until we are sure of the current target
2218 * state.
2219 */
2220 ESPMISC(("esp: Status <%d> for target %d lun %d\n",
2221 SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun));
2222
2223 /* But don't do this when spinning up a disk at
2224 * boot time while we poll for completion as it
2225 * fills up the console with messages. Also, tapes
2226 * can report not ready many times right after
2227 * loading up a tape.
2228 */
2229 if(esp_should_clear_sync(SCptr) != 0)
2230 esp_dev->sync = 0;
2231 }
2232 ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
2233 esp_done(esp, ((SCptr->SCp.Status & 0xff) |
2234 ((SCptr->SCp.Message & 0xff)<<8) |
2235 (DID_OK << 16)));
2236 } else if(esp->prevmsgin == DISCONNECT) {
2237 /* Normal disconnect. */
2238 esp_cmd(esp, eregs, ESP_CMD_ESEL);
2239 ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
2240 append_SC(&esp->disconnected_SC, SCptr);
2241 esp->current_SC = NULL;
2242 if(esp->issue_SC)
2243 esp_exec_cmd(esp);
2244 } else {
2245 /* Driver bug, we do not expect a disconnect here
2246 * and should not have advanced the state engine
2247 * to in_freeing.
2248 */
2249 ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n",
2250 esp->esp_id));
2251 return do_reset_bus;
2252 }
2253 return do_intr_end;
2254}
2255
2256/* When a reselect occurs, and we cannot find the command to
2257 * reconnect to in our queues, we do this.
2258 */
2259static int esp_bad_reconnect(struct NCR_ESP *esp)
2260{
2261 Scsi_Cmnd *sp;
2262
2263 ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n",
2264 esp->esp_id));
2265 ESPLOG(("QUEUE DUMP\n"));
2266 sp = esp->issue_SC;
2267 ESPLOG(("esp%d: issue_SC[", esp->esp_id));
2268 while(sp) {
2269 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2270 sp = (Scsi_Cmnd *) sp->host_scribble;
2271 }
2272 ESPLOG(("]\n"));
2273 sp = esp->current_SC;
2274 ESPLOG(("esp%d: current_SC[", esp->esp_id));
2275 while(sp) {
2276 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2277 sp = (Scsi_Cmnd *) sp->host_scribble;
2278 }
2279 ESPLOG(("]\n"));
2280 sp = esp->disconnected_SC;
2281 ESPLOG(("esp%d: disconnected_SC[", esp->esp_id));
2282 while(sp) {
2283 ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
2284 sp = (Scsi_Cmnd *) sp->host_scribble;
2285 }
2286 ESPLOG(("]\n"));
2287 return do_reset_bus;
2288}
2289
2290/* Do the needy when a target tries to reconnect to us. */
2291static int esp_do_reconnect(struct NCR_ESP *esp,
2292 struct ESP_regs *eregs)
2293{
2294 int lun, target;
2295 Scsi_Cmnd *SCptr;
2296
2297 /* Check for all bogus conditions first. */
2298 target = reconnect_target(esp, eregs);
2299 if(target < 0) {
2300 ESPDISC(("bad bus bits\n"));
2301 return do_reset_bus;
2302 }
2303 lun = reconnect_lun(esp, eregs);
2304 if(lun < 0) {
2305 ESPDISC(("target=%2x, bad identify msg\n", target));
2306 return do_reset_bus;
2307 }
2308
2309 /* Things look ok... */
2310 ESPDISC(("R<%02x,%02x>", target, lun));
2311
2312 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
2313 if(esp100_reconnect_hwbug(esp, eregs))
2314 return do_reset_bus;
2315 esp_cmd(esp, eregs, ESP_CMD_NULL);
2316
2317 SCptr = remove_SC(&esp->disconnected_SC, (unchar) target, (unchar) lun);
2318 if(!SCptr)
2319 return esp_bad_reconnect(esp);
2320
2321 esp_connect(esp, eregs, SCptr);
2322 esp_cmd(esp, eregs, ESP_CMD_MOK);
2323
2324 /* Reconnect implies a restore pointers operation. */
2325 esp_restore_pointers(esp, SCptr);
2326
2327 esp->snip = 0;
2328 esp_advance_phase(SCptr, in_the_dark);
2329 return do_intr_end;
2330}
2331
2332/* End of NEXUS (hopefully), pick up status + message byte then leave if
2333 * all goes well.
2334 */
2335static int esp_do_status(struct NCR_ESP *esp, struct ESP_regs *eregs)
2336{
2337 Scsi_Cmnd *SCptr = esp->current_SC;
2338 int intr, rval;
2339
2340 rval = skipahead1(esp, eregs, SCptr, in_the_dark, in_status);
2341 if(rval)
2342 return rval;
2343
2344 intr = esp->ireg;
2345 ESPSTAT(("esp_do_status: "));
2346 if(intr != ESP_INTR_DC) {
2347 int message_out = 0; /* for parity problems */
2348
2349 /* Ack the message. */
2350 ESPSTAT(("ack msg, "));
2351 esp_cmd(esp, eregs, ESP_CMD_MOK);
2352
2353 if(esp->dma_poll)
2354 esp->dma_poll(esp, (unsigned char *) esp->esp_command);
2355
2356 ESPSTAT(("got something, "));
2357 /* ESP chimes in with one of
2358 *
2359 * 1) function done interrupt:
2360 * both status and message in bytes
2361 * are available
2362 *
2363 * 2) bus service interrupt:
2364 * only status byte was acquired
2365 *
2366 * 3) Anything else:
2367 * can't happen, but we test for it
2368 * anyways
2369 *
2370 * ALSO: If bad parity was detected on either
2371 * the status _or_ the message byte then
2372 * the ESP has asserted ATN on the bus
2373 * and we must therefore wait for the
2374 * next phase change.
2375 */
2376 if(intr & ESP_INTR_FDONE) {
2377 /* We got it all, hallejulia. */
2378 ESPSTAT(("got both, "));
2379 SCptr->SCp.Status = esp->esp_command[0];
2380 SCptr->SCp.Message = esp->esp_command[1];
2381 esp->prevmsgin = SCptr->SCp.Message;
2382 esp->cur_msgin[0] = SCptr->SCp.Message;
2383 if(esp->sreg & ESP_STAT_PERR) {
2384 /* There was bad parity for the
2385 * message byte, the status byte
2386 * was ok.
2387 */
2388 message_out = MSG_PARITY_ERROR;
2389 }
2390 } else if(intr == ESP_INTR_BSERV) {
2391 /* Only got status byte. */
2392 ESPLOG(("esp%d: got status only, ", esp->esp_id));
2393 if(!(esp->sreg & ESP_STAT_PERR)) {
2394 SCptr->SCp.Status = esp->esp_command[0];
2395 SCptr->SCp.Message = 0xff;
2396 } else {
2397 /* The status byte had bad parity.
2398 * we leave the scsi_pointer Status
2399 * field alone as we set it to a default
2400 * of CHECK_CONDITION in esp_queue.
2401 */
2402 message_out = INITIATOR_ERROR;
2403 }
2404 } else {
2405 /* This shouldn't happen ever. */
2406 ESPSTAT(("got bolixed\n"));
2407 esp_advance_phase(SCptr, in_the_dark);
2408 return esp_do_phase_determine(esp, eregs);
2409 }
2410
2411 if(!message_out) {
2412 ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status,
2413 SCptr->SCp.Message));
2414 if(SCptr->SCp.Message == COMMAND_COMPLETE) {
2415 ESPSTAT(("and was COMMAND_COMPLETE\n"));
2416 esp_advance_phase(SCptr, in_freeing);
2417 return esp_do_freebus(esp, eregs);
2418 } else {
2419 ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n",
2420 esp->esp_id));
2421 esp->msgin_len = esp->msgin_ctr = 1;
2422 esp_advance_phase(SCptr, in_msgindone);
2423 return esp_do_msgindone(esp, eregs);
2424 }
2425 } else {
2426 /* With luck we'll be able to let the target
2427 * know that bad parity happened, it will know
2428 * which byte caused the problems and send it
2429 * again. For the case where the status byte
2430 * receives bad parity, I do not believe most
2431 * targets recover very well. We'll see.
2432 */
2433 ESPLOG(("esp%d: bad parity somewhere mout=%2x\n",
2434 esp->esp_id, message_out));
2435 esp->cur_msgout[0] = message_out;
2436 esp->msgout_len = esp->msgout_ctr = 1;
2437 esp_advance_phase(SCptr, in_the_dark);
2438 return esp_do_phase_determine(esp, eregs);
2439 }
2440 } else {
2441 /* If we disconnect now, all hell breaks loose. */
2442 ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id));
2443 esp_advance_phase(SCptr, in_the_dark);
2444 return esp_do_phase_determine(esp, eregs);
2445 }
2446}
2447
2448static int esp_enter_status(struct NCR_ESP *esp,
2449 struct ESP_regs *eregs)
2450{
2451 unchar thecmd = ESP_CMD_ICCSEQ;
2452
2453 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
2454
2455 if(esp->do_pio_cmds) {
2456 esp_advance_phase(esp->current_SC, in_status);
2457 esp_cmd(esp, eregs, thecmd);
2458 while(!(esp_read(esp->eregs->esp_status) & ESP_STAT_INTR));
2459 esp->esp_command[0] = esp_read(eregs->esp_fdata);
2460 while(!(esp_read(esp->eregs->esp_status) & ESP_STAT_INTR));
2461 esp->esp_command[1] = esp_read(eregs->esp_fdata);
2462 } else {
2463 esp->esp_command[0] = esp->esp_command[1] = 0xff;
2464 esp_write(eregs->esp_tclow, 2);
2465 esp_write(eregs->esp_tcmed, 0);
2466 esp->dma_init_read(esp, esp->esp_command_dvma, 2);
2467 thecmd |= ESP_CMD_DMA;
2468 esp_cmd(esp, eregs, thecmd);
2469 esp_advance_phase(esp->current_SC, in_status);
2470 }
2471
2472 return esp_do_status(esp, eregs);
2473}
2474
2475static int esp_disconnect_amidst_phases(struct NCR_ESP *esp,
2476 struct ESP_regs *eregs)
2477{
2478 Scsi_Cmnd *sp = esp->current_SC;
2479 struct esp_device *esp_dev = sp->device->hostdata;
2480
2481 /* This means real problems if we see this
2482 * here. Unless we were actually trying
2483 * to force the device to abort/reset.
2484 */
2485 ESPLOG(("esp%d: Disconnect amidst phases, ", esp->esp_id));
2486 ESPLOG(("pphase<%s> cphase<%s>, ",
2487 phase_string(sp->SCp.phase),
2488 phase_string(sp->SCp.sent_command)));
2489
2490 if(esp->disconnected_SC)
2491 esp_cmd(esp, eregs, ESP_CMD_ESEL);
2492
2493 switch(esp->cur_msgout[0]) {
2494 default:
2495 /* We didn't expect this to happen at all. */
2496 ESPLOG(("device is bolixed\n"));
2497 esp_advance_phase(sp, in_tgterror);
2498 esp_done(esp, (DID_ERROR << 16));
2499 break;
2500
2501 case BUS_DEVICE_RESET:
2502 ESPLOG(("device reset successful\n"));
2503 esp_dev->sync_max_offset = 0;
2504 esp_dev->sync_min_period = 0;
2505 esp_dev->sync = 0;
2506 esp_advance_phase(sp, in_resetdev);
2507 esp_done(esp, (DID_RESET << 16));
2508 break;
2509
2510 case ABORT:
2511 ESPLOG(("device abort successful\n"));
2512 esp_advance_phase(sp, in_abortone);
2513 esp_done(esp, (DID_ABORT << 16));
2514 break;
2515
2516 };
2517 return do_intr_end;
2518}
2519
2520static int esp_enter_msgout(struct NCR_ESP *esp,
2521 struct ESP_regs *eregs)
2522{
2523 esp_advance_phase(esp->current_SC, in_msgout);
2524 return esp_do_msgout(esp, eregs);
2525}
2526
2527static int esp_enter_msgin(struct NCR_ESP *esp,
2528 struct ESP_regs *eregs)
2529{
2530 esp_advance_phase(esp->current_SC, in_msgin);
2531 return esp_do_msgin(esp, eregs);
2532}
2533
2534static int esp_enter_cmd(struct NCR_ESP *esp,
2535 struct ESP_regs *eregs)
2536{
2537 esp_advance_phase(esp->current_SC, in_cmdbegin);
2538 return esp_do_cmdbegin(esp, eregs);
2539}
2540
2541static int esp_enter_badphase(struct NCR_ESP *esp,
2542 struct ESP_regs *eregs)
2543{
2544 ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id,
2545 esp->sreg & ESP_STAT_PMASK));
2546 return do_reset_bus;
2547}
2548
2549typedef int (*espfunc_t)(struct NCR_ESP *,
2550 struct ESP_regs *);
2551
2552static espfunc_t phase_vector[] = {
2553 esp_do_data, /* ESP_DOP */
2554 esp_do_data, /* ESP_DIP */
2555 esp_enter_cmd, /* ESP_CMDP */
2556 esp_enter_status, /* ESP_STATP */
2557 esp_enter_badphase, /* ESP_STAT_PMSG */
2558 esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */
2559 esp_enter_msgout, /* ESP_MOP */
2560 esp_enter_msgin, /* ESP_MIP */
2561};
2562
2563/* The target has control of the bus and we have to see where it has
2564 * taken us.
2565 */
2566static int esp_do_phase_determine(struct NCR_ESP *esp,
2567 struct ESP_regs *eregs)
2568{
2569 if ((esp->ireg & ESP_INTR_DC) != 0)
2570 return esp_disconnect_amidst_phases(esp, eregs);
2571 return phase_vector[esp->sreg & ESP_STAT_PMASK](esp, eregs);
2572}
2573
2574/* First interrupt after exec'ing a cmd comes here. */
2575static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs)
2576{
2577 Scsi_Cmnd *SCptr = esp->current_SC;
2578 struct esp_device *esp_dev = SCptr->device->hostdata;
2579 int cmd_bytes_sent, fcnt;
2580
2581 fcnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
2582 cmd_bytes_sent = esp->dma_bytes_sent(esp, fcnt);
2583 if(esp->dma_invalidate)
2584 esp->dma_invalidate(esp);
2585
2586 /* Let's check to see if a reselect happened
2587 * while we we're trying to select. This must
2588 * be checked first.
2589 */
2590 if(esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
2591 esp_reconnect(esp, SCptr);
2592 return esp_do_reconnect(esp, eregs);
2593 }
2594
2595 /* Looks like things worked, we should see a bus service &
2596 * a function complete interrupt at this point. Note we
2597 * are doing a direct comparison because we don't want to
2598 * be fooled into thinking selection was successful if
2599 * ESP_INTR_DC is set, see below.
2600 */
2601 if(esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
2602 /* target speaks... */
2603 esp->targets_present |= (1<<scmd_id(SCptr));
2604
2605 /* What if the target ignores the sdtr? */
2606 if(esp->snip)
2607 esp_dev->sync = 1;
2608
2609 /* See how far, if at all, we got in getting
2610 * the information out to the target.
2611 */
2612 switch(esp->seqreg) {
2613 default:
2614
2615 case ESP_STEP_ASEL:
2616 /* Arbitration won, target selected, but
2617 * we are in some phase which is not command
2618 * phase nor is it message out phase.
2619 *
2620 * XXX We've confused the target, obviously.
2621 * XXX So clear it's state, but we also end
2622 * XXX up clearing everyone elses. That isn't
2623 * XXX so nice. I'd like to just reset this
2624 * XXX target, but if I cannot even get it's
2625 * XXX attention and finish selection to talk
2626 * XXX to it, there is not much more I can do.
2627 * XXX If we have a loaded bus we're going to
2628 * XXX spend the next second or so renegotiating
2629 * XXX for synchronous transfers.
2630 */
2631 ESPLOG(("esp%d: STEP_ASEL for tgt %d\n",
2632 esp->esp_id, SCptr->device->id));
2633
2634 case ESP_STEP_SID:
2635 /* Arbitration won, target selected, went
2636 * to message out phase, sent one message
2637 * byte, then we stopped. ATN is asserted
2638 * on the SCSI bus and the target is still
2639 * there hanging on. This is a legal
2640 * sequence step if we gave the ESP a select
2641 * and stop command.
2642 *
2643 * XXX See above, I could set the borken flag
2644 * XXX in the device struct and retry the
2645 * XXX command. But would that help for
2646 * XXX tagged capable targets?
2647 */
2648
2649 case ESP_STEP_NCMD:
2650 /* Arbitration won, target selected, maybe
2651 * sent the one message byte in message out
2652 * phase, but we did not go to command phase
2653 * in the end. Actually, we could have sent
2654 * only some of the message bytes if we tried
2655 * to send out the entire identify and tag
2656 * message using ESP_CMD_SA3.
2657 */
2658 cmd_bytes_sent = 0;
2659 break;
2660
2661 case ESP_STEP_PPC:
2662 /* No, not the powerPC pinhead. Arbitration
2663 * won, all message bytes sent if we went to
2664 * message out phase, went to command phase
2665 * but only part of the command was sent.
2666 *
2667 * XXX I've seen this, but usually in conjunction
2668 * XXX with a gross error which appears to have
2669 * XXX occurred between the time I told the
2670 * XXX ESP to arbitrate and when I got the
2671 * XXX interrupt. Could I have misloaded the
2672 * XXX command bytes into the fifo? Actually,
2673 * XXX I most likely missed a phase, and therefore
2674 * XXX went into never never land and didn't even
2675 * XXX know it. That was the old driver though.
2676 * XXX What is even more peculiar is that the ESP
2677 * XXX showed the proper function complete and
2678 * XXX bus service bits in the interrupt register.
2679 */
2680
2681 case ESP_STEP_FINI4:
2682 case ESP_STEP_FINI5:
2683 case ESP_STEP_FINI6:
2684 case ESP_STEP_FINI7:
2685 /* Account for the identify message */
2686 if(SCptr->SCp.phase == in_slct_norm)
2687 cmd_bytes_sent -= 1;
2688 };
2689 esp_cmd(esp, eregs, ESP_CMD_NULL);
2690
2691 /* Be careful, we could really get fucked during synchronous
2692 * data transfers if we try to flush the fifo now.
2693 */
2694 if(!fcnt && /* Fifo is empty and... */
2695 /* either we are not doing synchronous transfers or... */
2696 (!esp_dev->sync_max_offset ||
2697 /* We are not going into data in phase. */
2698 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
2699 esp_cmd(esp, eregs, ESP_CMD_FLUSH); /* flush is safe */
2700
2701 /* See how far we got if this is not a slow command. */
2702 if(!esp->esp_slowcmd) {
2703 if(cmd_bytes_sent < 0)
2704 cmd_bytes_sent = 0;
2705 if(cmd_bytes_sent != SCptr->cmd_len) {
2706 /* Crapola, mark it as a slowcmd
2707 * so that we have some chance of
2708 * keeping the command alive with
2709 * good luck.
2710 *
2711 * XXX Actually, if we didn't send it all
2712 * XXX this means either we didn't set things
2713 * XXX up properly (driver bug) or the target
2714 * XXX or the ESP detected parity on one of
2715 * XXX the command bytes. This makes much
2716 * XXX more sense, and therefore this code
2717 * XXX should be changed to send out a
2718 * XXX parity error message or if the status
2719 * XXX register shows no parity error then
2720 * XXX just expect the target to bring the
2721 * XXX bus into message in phase so that it
2722 * XXX can send us the parity error message.
2723 * XXX SCSI sucks...
2724 */
2725 esp->esp_slowcmd = 1;
2726 esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]);
2727 esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent);
2728 }
2729 }
2730
2731 /* Now figure out where we went. */
2732 esp_advance_phase(SCptr, in_the_dark);
2733 return esp_do_phase_determine(esp, eregs);
2734 }
2735
2736 /* Did the target even make it? */
2737 if(esp->ireg == ESP_INTR_DC) {
2738 /* wheee... nobody there or they didn't like
2739 * what we told it to do, clean up.
2740 */
2741
2742 /* If anyone is off the bus, but working on
2743 * a command in the background for us, tell
2744 * the ESP to listen for them.
2745 */
2746 if(esp->disconnected_SC)
2747 esp_cmd(esp, eregs, ESP_CMD_ESEL);
2748
2749 if(((1<<SCptr->device->id) & esp->targets_present) &&
2750 esp->seqreg && esp->cur_msgout[0] == EXTENDED_MESSAGE &&
2751 (SCptr->SCp.phase == in_slct_msg ||
2752 SCptr->SCp.phase == in_slct_stop)) {
2753 /* shit */
2754 esp->snip = 0;
2755 ESPLOG(("esp%d: Failed synchronous negotiation for target %d "
2756 "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
2757 esp_dev->sync_max_offset = 0;
2758 esp_dev->sync_min_period = 0;
2759 esp_dev->sync = 1; /* so we don't negotiate again */
2760
2761 /* Run the command again, this time though we
2762 * won't try to negotiate for synchronous transfers.
2763 *
2764 * XXX I'd like to do something like send an
2765 * XXX INITIATOR_ERROR or ABORT message to the
2766 * XXX target to tell it, "Sorry I confused you,
2767 * XXX please come back and I will be nicer next
2768 * XXX time". But that requires having the target
2769 * XXX on the bus, and it has dropped BSY on us.
2770 */
2771 esp->current_SC = NULL;
2772 esp_advance_phase(SCptr, not_issued);
2773 prepend_SC(&esp->issue_SC, SCptr);
2774 esp_exec_cmd(esp);
2775 return do_intr_end;
2776 }
2777
2778 /* Ok, this is normal, this is what we see during boot
2779 * or whenever when we are scanning the bus for targets.
2780 * But first make sure that is really what is happening.
2781 */
2782 if(((1<<SCptr->device->id) & esp->targets_present)) {
2783 ESPLOG(("esp%d: Warning, live target %d not responding to "
2784 "selection.\n", esp->esp_id, SCptr->device->id));
2785
2786 /* This _CAN_ happen. The SCSI standard states that
2787 * the target is to _not_ respond to selection if
2788 * _it_ detects bad parity on the bus for any reason.
2789 * Therefore, we assume that if we've talked successfully
2790 * to this target before, bad parity is the problem.
2791 */
2792 esp_done(esp, (DID_PARITY << 16));
2793 } else {
2794 /* Else, there really isn't anyone there. */
2795 ESPMISC(("esp: selection failure, maybe nobody there?\n"));
2796 ESPMISC(("esp: target %d lun %d\n",
2797 SCptr->device->id, SCptr->device->lun));
2798 esp_done(esp, (DID_BAD_TARGET << 16));
2799 }
2800 return do_intr_end;
2801 }
2802
2803
2804 ESPLOG(("esp%d: Selection failure.\n", esp->esp_id));
2805 printk("esp%d: Currently -- ", esp->esp_id);
2806 esp_print_ireg(esp->ireg);
2807 printk(" ");
2808 esp_print_statreg(esp->sreg);
2809 printk(" ");
2810 esp_print_seqreg(esp->seqreg);
2811 printk("\n");
2812 printk("esp%d: New -- ", esp->esp_id);
2813 esp->sreg = esp_read(eregs->esp_status);
2814 esp->seqreg = esp_read(eregs->esp_sstep);
2815 esp->ireg = esp_read(eregs->esp_intrpt);
2816 esp_print_ireg(esp->ireg);
2817 printk(" ");
2818 esp_print_statreg(esp->sreg);
2819 printk(" ");
2820 esp_print_seqreg(esp->seqreg);
2821 printk("\n");
2822 ESPLOG(("esp%d: resetting bus\n", esp->esp_id));
2823 return do_reset_bus; /* ugh... */
2824}
2825
2826/* Continue reading bytes for msgin phase. */
2827static int esp_do_msgincont(struct NCR_ESP *esp, struct ESP_regs *eregs)
2828{
2829 if(esp->ireg & ESP_INTR_BSERV) {
2830 /* in the right phase too? */
2831 if((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) {
2832 /* phew... */
2833 esp_cmd(esp, eregs, ESP_CMD_TI);
2834 esp_advance_phase(esp->current_SC, in_msgindone);
2835 return do_intr_end;
2836 }
2837
2838 /* We changed phase but ESP shows bus service,
2839 * in this case it is most likely that we, the
2840 * hacker who has been up for 20hrs straight
2841 * staring at the screen, drowned in coffee
2842 * smelling like retched cigarette ashes
2843 * have miscoded something..... so, try to
2844 * recover as best we can.
2845 */
2846 ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id));
2847 }
2848 esp_advance_phase(esp->current_SC, in_the_dark);
2849 return do_phase_determine;
2850}
2851
2852static int check_singlebyte_msg(struct NCR_ESP *esp,
2853 struct ESP_regs *eregs)
2854{
2855 esp->prevmsgin = esp->cur_msgin[0];
2856 if(esp->cur_msgin[0] & 0x80) {
2857 /* wheee... */
2858 ESPLOG(("esp%d: target sends identify amidst phases\n",
2859 esp->esp_id));
2860 esp_advance_phase(esp->current_SC, in_the_dark);
2861 return 0;
2862 } else if(((esp->cur_msgin[0] & 0xf0) == 0x20) ||
2863 (esp->cur_msgin[0] == EXTENDED_MESSAGE)) {
2864 esp->msgin_len = 2;
2865 esp_advance_phase(esp->current_SC, in_msgincont);
2866 return 0;
2867 }
2868 esp_advance_phase(esp->current_SC, in_the_dark);
2869 switch(esp->cur_msgin[0]) {
2870 default:
2871 /* We don't want to hear about it. */
2872 ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id,
2873 esp->cur_msgin[0]));
2874 return MESSAGE_REJECT;
2875
2876 case NOP:
2877 ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id,
2878 esp->current_SC->device->id));
2879 return 0;
2880
2881 case RESTORE_POINTERS:
2882 /* In this case we might also have to backup the
2883 * "slow command" pointer. It is rare to get such
2884 * a save/restore pointer sequence so early in the
2885 * bus transition sequences, but cover it.
2886 */
2887 if(esp->esp_slowcmd) {
2888 esp->esp_scmdleft = esp->current_SC->cmd_len;
2889 esp->esp_scmdp = &esp->current_SC->cmnd[0];
2890 }
2891 esp_restore_pointers(esp, esp->current_SC);
2892 return 0;
2893
2894 case SAVE_POINTERS:
2895 esp_save_pointers(esp, esp->current_SC);
2896 return 0;
2897
2898 case COMMAND_COMPLETE:
2899 case DISCONNECT:
2900 /* Freeing the bus, let it go. */
2901 esp->current_SC->SCp.phase = in_freeing;
2902 return 0;
2903
2904 case MESSAGE_REJECT:
2905 ESPMISC(("msg reject, "));
2906 if(esp->prevmsgout == EXTENDED_MESSAGE) {
2907 struct esp_device *esp_dev = esp->current_SC->device->hostdata;
2908
2909 /* Doesn't look like this target can
2910 * do synchronous or WIDE transfers.
2911 */
2912 ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n"));
2913 esp_dev->sync = 1;
2914 esp_dev->wide = 1;
2915 esp_dev->sync_min_period = 0;
2916 esp_dev->sync_max_offset = 0;
2917 return 0;
2918 } else {
2919 ESPMISC(("not sync nego, sending ABORT\n"));
2920 return ABORT;
2921 }
2922 };
2923}
2924
2925/* Target negotiates for synchronous transfers before we do, this
2926 * is legal although very strange. What is even funnier is that
2927 * the SCSI2 standard specifically recommends against targets doing
2928 * this because so many initiators cannot cope with this occurring.
2929 */
2930static int target_with_ants_in_pants(struct NCR_ESP *esp,
2931 Scsi_Cmnd *SCptr,
2932 struct esp_device *esp_dev)
2933{
2934 if(esp_dev->sync || SCptr->device->borken) {
2935 /* sorry, no can do */
2936 ESPSDTR(("forcing to async, "));
2937 build_sync_nego_msg(esp, 0, 0);
2938 esp_dev->sync = 1;
2939 esp->snip = 1;
2940 ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id));
2941 esp_advance_phase(SCptr, in_the_dark);
2942 return EXTENDED_MESSAGE;
2943 }
2944
2945 /* Ok, we'll check them out... */
2946 return 0;
2947}
2948
2949static void sync_report(struct NCR_ESP *esp)
2950{
2951 int msg3, msg4;
2952 char *type;
2953
2954 msg3 = esp->cur_msgin[3];
2955 msg4 = esp->cur_msgin[4];
2956 if(msg4) {
2957 int hz = 1000000000 / (msg3 * 4);
2958 int integer = hz / 1000000;
2959 int fraction = (hz - (integer * 1000000)) / 10000;
2960 if((msg3 * 4) < 200) {
2961 type = "FAST";
2962 } else {
2963 type = "synchronous";
2964 }
2965
2966 /* Do not transform this back into one big printk
2967 * again, it triggers a bug in our sparc64-gcc272
2968 * sibling call optimization. -DaveM
2969 */
2970 ESPLOG((KERN_INFO "esp%d: target %d ",
2971 esp->esp_id, esp->current_SC->device->id));
2972 ESPLOG(("[period %dns offset %d %d.%02dMHz ",
2973 (int) msg3 * 4, (int) msg4,
2974 integer, fraction));
2975 ESPLOG(("%s SCSI%s]\n", type,
2976 (((msg3 * 4) < 200) ? "-II" : "")));
2977 } else {
2978 ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n",
2979 esp->esp_id, esp->current_SC->device->id));
2980 }
2981}
2982
2983static int check_multibyte_msg(struct NCR_ESP *esp,
2984 struct ESP_regs *eregs)
2985{
2986 Scsi_Cmnd *SCptr = esp->current_SC;
2987 struct esp_device *esp_dev = SCptr->device->hostdata;
2988 unchar regval = 0;
2989 int message_out = 0;
2990
2991 ESPSDTR(("chk multibyte msg: "));
2992 if(esp->cur_msgin[2] == EXTENDED_SDTR) {
2993 int period = esp->cur_msgin[3];
2994 int offset = esp->cur_msgin[4];
2995
2996 ESPSDTR(("is sync nego response, "));
2997 if(!esp->snip) {
2998 int rval;
2999
3000 /* Target negotiates first! */
3001 ESPSDTR(("target jumps the gun, "));
3002 message_out = EXTENDED_MESSAGE; /* we must respond */
3003 rval = target_with_ants_in_pants(esp, SCptr, esp_dev);
3004 if(rval)
3005 return rval;
3006 }
3007
3008 ESPSDTR(("examining sdtr, "));
3009
3010 /* Offset cannot be larger than ESP fifo size. */
3011 if(offset > 15) {
3012 ESPSDTR(("offset too big %2x, ", offset));
3013 offset = 15;
3014 ESPSDTR(("sending back new offset\n"));
3015 build_sync_nego_msg(esp, period, offset);
3016 return EXTENDED_MESSAGE;
3017 }
3018
3019 if(offset && period > esp->max_period) {
3020 /* Yeee, async for this slow device. */
3021 ESPSDTR(("period too long %2x, ", period));
3022 build_sync_nego_msg(esp, 0, 0);
3023 ESPSDTR(("hoping for msgout\n"));
3024 esp_advance_phase(esp->current_SC, in_the_dark);
3025 return EXTENDED_MESSAGE;
3026 } else if (offset && period < esp->min_period) {
3027 ESPSDTR(("period too short %2x, ", period));
3028 period = esp->min_period;
3029 if(esp->erev > esp236)
3030 regval = 4;
3031 else
3032 regval = 5;
3033 } else if(offset) {
3034 int tmp;
3035
3036 ESPSDTR(("period is ok, "));
3037 tmp = esp->ccycle / 1000;
3038 regval = (((period << 2) + tmp - 1) / tmp);
3039 if(regval && (esp->erev > esp236)) {
3040 if(period >= 50)
3041 regval--;
3042 }
3043 }
3044
3045 if(offset) {
3046 unchar bit;
3047
3048 esp_dev->sync_min_period = (regval & 0x1f);
3049 esp_dev->sync_max_offset = (offset | esp->radelay);
3050 if(esp->erev > esp236) {
3051 if(esp->erev == fas100a)
3052 bit = ESP_CONFIG3_FAST;
3053 else
3054 bit = ESP_CONFIG3_FSCSI;
3055 if(period < 50)
3056 esp->config3[SCptr->device->id] |= bit;
3057 else
3058 esp->config3[SCptr->device->id] &= ~bit;
3059 esp->prev_cfg3 = esp->config3[SCptr->device->id];
3060 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
3061 }
3062 esp->prev_soff = esp_dev->sync_min_period;
3063 esp_write(eregs->esp_soff, esp->prev_soff);
3064 esp->prev_stp = esp_dev->sync_max_offset;
3065 esp_write(eregs->esp_stp, esp->prev_stp);
3066
3067 ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n",
3068 esp_dev->sync_max_offset,
3069 esp_dev->sync_min_period,
3070 esp->config3[scmd_id(SCptr)]));
3071
3072 esp->snip = 0;
3073 } else if(esp_dev->sync_max_offset) {
3074 unchar bit;
3075
3076 /* back to async mode */
3077 ESPSDTR(("unaccaptable sync nego, forcing async\n"));
3078 esp_dev->sync_max_offset = 0;
3079 esp_dev->sync_min_period = 0;
3080 esp->prev_soff = 0;
3081 esp_write(eregs->esp_soff, 0);
3082 esp->prev_stp = 0;
3083 esp_write(eregs->esp_stp, 0);
3084 if(esp->erev > esp236) {
3085 if(esp->erev == fas100a)
3086 bit = ESP_CONFIG3_FAST;
3087 else
3088 bit = ESP_CONFIG3_FSCSI;
3089 esp->config3[SCptr->device->id] &= ~bit;
3090 esp->prev_cfg3 = esp->config3[SCptr->device->id];
3091 esp_write(eregs->esp_cfg3, esp->prev_cfg3);
3092 }
3093 }
3094
3095 sync_report(esp);
3096
3097 ESPSDTR(("chk multibyte msg: sync is known, "));
3098 esp_dev->sync = 1;
3099
3100 if(message_out) {
3101 ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n",
3102 esp->esp_id));
3103 build_sync_nego_msg(esp, period, offset);
3104 esp_advance_phase(SCptr, in_the_dark);
3105 return EXTENDED_MESSAGE;
3106 }
3107
3108 ESPSDTR(("returning zero\n"));
3109 esp_advance_phase(SCptr, in_the_dark); /* ...or else! */
3110 return 0;
3111 } else if(esp->cur_msgin[2] == EXTENDED_WDTR) {
3112 ESPLOG(("esp%d: AIEEE wide msg received\n", esp->esp_id));
3113 message_out = MESSAGE_REJECT;
3114 } else if(esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) {
3115 ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id));
3116 message_out = MESSAGE_REJECT;
3117 }
3118 esp_advance_phase(SCptr, in_the_dark);
3119 return message_out;
3120}
3121
3122static int esp_do_msgindone(struct NCR_ESP *esp, struct ESP_regs *eregs)
3123{
3124 Scsi_Cmnd *SCptr = esp->current_SC;
3125 int message_out = 0, it = 0, rval;
3126
3127 rval = skipahead1(esp, eregs, SCptr, in_msgin, in_msgindone);
3128 if(rval)
3129 return rval;
3130 if(SCptr->SCp.sent_command != in_status) {
3131 if(!(esp->ireg & ESP_INTR_DC)) {
3132 if(esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) {
3133 message_out = MSG_PARITY_ERROR;
3134 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3135 } else if((it = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES))!=1) {
3136 /* We certainly dropped the ball somewhere. */
3137 message_out = INITIATOR_ERROR;
3138 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3139 } else if(!esp->msgin_len) {
3140 it = esp_read(eregs->esp_fdata);
3141 esp_advance_phase(SCptr, in_msgincont);
3142 } else {
3143 /* it is ok and we want it */
3144 it = esp->cur_msgin[esp->msgin_ctr] =
3145 esp_read(eregs->esp_fdata);
3146 esp->msgin_ctr++;
3147 }
3148 } else {
3149 esp_advance_phase(SCptr, in_the_dark);
3150 return do_work_bus;
3151 }
3152 } else {
3153 it = esp->cur_msgin[0];
3154 }
3155 if(!message_out && esp->msgin_len) {
3156 if(esp->msgin_ctr < esp->msgin_len) {
3157 esp_advance_phase(SCptr, in_msgincont);
3158 } else if(esp->msgin_len == 1) {
3159 message_out = check_singlebyte_msg(esp, eregs);
3160 } else if(esp->msgin_len == 2) {
3161 if(esp->cur_msgin[0] == EXTENDED_MESSAGE) {
3162 if((it+2) >= 15) {
3163 message_out = MESSAGE_REJECT;
3164 } else {
3165 esp->msgin_len = (it + 2);
3166 esp_advance_phase(SCptr, in_msgincont);
3167 }
3168 } else {
3169 message_out = MESSAGE_REJECT; /* foo on you */
3170 }
3171 } else {
3172 message_out = check_multibyte_msg(esp, eregs);
3173 }
3174 }
3175 if(message_out < 0) {
3176 return -message_out;
3177 } else if(message_out) {
3178 if(((message_out != 1) &&
3179 ((message_out < 0x20) || (message_out & 0x80))))
3180 esp->msgout_len = 1;
3181 esp->cur_msgout[0] = message_out;
3182 esp_cmd(esp, eregs, ESP_CMD_SATN);
3183 esp_advance_phase(SCptr, in_the_dark);
3184 esp->msgin_len = 0;
3185 }
3186 esp->sreg = esp_read(eregs->esp_status);
3187 esp->sreg &= ~(ESP_STAT_INTR);
3188 if((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD))
3189 esp_cmd(esp, eregs, ESP_CMD_MOK);
3190 if((SCptr->SCp.sent_command == in_msgindone) &&
3191 (SCptr->SCp.phase == in_freeing))
3192 return esp_do_freebus(esp, eregs);
3193 return do_intr_end;
3194}
3195
3196static int esp_do_cmdbegin(struct NCR_ESP *esp, struct ESP_regs *eregs)
3197{
3198 unsigned char tmp;
3199 Scsi_Cmnd *SCptr = esp->current_SC;
3200
3201 esp_advance_phase(SCptr, in_cmdend);
3202 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3203 tmp = *esp->esp_scmdp++;
3204 esp->esp_scmdleft--;
3205 esp_write(eregs->esp_fdata, tmp);
3206 esp_cmd(esp, eregs, ESP_CMD_TI);
3207 return do_intr_end;
3208}
3209
3210static int esp_do_cmddone(struct NCR_ESP *esp, struct ESP_regs *eregs)
3211{
3212 esp_cmd(esp, eregs, ESP_CMD_NULL);
3213 if(esp->ireg & ESP_INTR_BSERV) {
3214 esp_advance_phase(esp->current_SC, in_the_dark);
3215 return esp_do_phase_determine(esp, eregs);
3216 }
3217 ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n",
3218 esp->esp_id));
3219 return do_reset_bus;
3220}
3221
3222static int esp_do_msgout(struct NCR_ESP *esp, struct ESP_regs *eregs)
3223{
3224 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3225 switch(esp->msgout_len) {
3226 case 1:
3227 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3228 esp_cmd(esp, eregs, ESP_CMD_TI);
3229 break;
3230
3231 case 2:
3232 if(esp->do_pio_cmds){
3233 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3234 esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
3235 esp_cmd(esp, eregs, ESP_CMD_TI);
3236 } else {
3237 esp->esp_command[0] = esp->cur_msgout[0];
3238 esp->esp_command[1] = esp->cur_msgout[1];
3239 esp->dma_setup(esp, esp->esp_command_dvma, 2, 0);
3240 esp_setcount(eregs, 2);
3241 esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
3242 }
3243 break;
3244
3245 case 4:
3246 esp->snip = 1;
3247 if(esp->do_pio_cmds){
3248 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3249 esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
3250 esp_write(eregs->esp_fdata, esp->cur_msgout[2]);
3251 esp_write(eregs->esp_fdata, esp->cur_msgout[3]);
3252 esp_cmd(esp, eregs, ESP_CMD_TI);
3253 } else {
3254 esp->esp_command[0] = esp->cur_msgout[0];
3255 esp->esp_command[1] = esp->cur_msgout[1];
3256 esp->esp_command[2] = esp->cur_msgout[2];
3257 esp->esp_command[3] = esp->cur_msgout[3];
3258 esp->dma_setup(esp, esp->esp_command_dvma, 4, 0);
3259 esp_setcount(eregs, 4);
3260 esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
3261 }
3262 break;
3263
3264 case 5:
3265 esp->snip = 1;
3266 if(esp->do_pio_cmds){
3267 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3268 esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
3269 esp_write(eregs->esp_fdata, esp->cur_msgout[2]);
3270 esp_write(eregs->esp_fdata, esp->cur_msgout[3]);
3271 esp_write(eregs->esp_fdata, esp->cur_msgout[4]);
3272 esp_cmd(esp, eregs, ESP_CMD_TI);
3273 } else {
3274 esp->esp_command[0] = esp->cur_msgout[0];
3275 esp->esp_command[1] = esp->cur_msgout[1];
3276 esp->esp_command[2] = esp->cur_msgout[2];
3277 esp->esp_command[3] = esp->cur_msgout[3];
3278 esp->esp_command[4] = esp->cur_msgout[4];
3279 esp->dma_setup(esp, esp->esp_command_dvma, 5, 0);
3280 esp_setcount(eregs, 5);
3281 esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
3282 }
3283 break;
3284
3285 default:
3286 /* whoops */
3287 ESPMISC(("bogus msgout sending NOP\n"));
3288 esp->cur_msgout[0] = NOP;
3289 esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
3290 esp->msgout_len = 1;
3291 esp_cmd(esp, eregs, ESP_CMD_TI);
3292 break;
3293 }
3294 esp_advance_phase(esp->current_SC, in_msgoutdone);
3295 return do_intr_end;
3296}
3297
3298static int esp_do_msgoutdone(struct NCR_ESP *esp,
3299 struct ESP_regs *eregs)
3300{
3301 if((esp->msgout_len > 1) && esp->dma_barrier)
3302 esp->dma_barrier(esp);
3303
3304 if(!(esp->ireg & ESP_INTR_DC)) {
3305 esp_cmd(esp, eregs, ESP_CMD_NULL);
3306 switch(esp->sreg & ESP_STAT_PMASK) {
3307 case ESP_MOP:
3308 /* whoops, parity error */
3309 ESPLOG(("esp%d: still in msgout, parity error assumed\n",
3310 esp->esp_id));
3311 if(esp->msgout_len > 1)
3312 esp_cmd(esp, eregs, ESP_CMD_SATN);
3313 esp_advance_phase(esp->current_SC, in_msgout);
3314 return do_work_bus;
3315
3316 case ESP_DIP:
3317 break;
3318
3319 default:
3320 if(!fcount(esp, eregs) &&
3321 !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset))
3322 esp_cmd(esp, eregs, ESP_CMD_FLUSH);
3323 break;
3324
3325 };
3326 }
3327
3328 /* If we sent out a synchronous negotiation message, update
3329 * our state.
3330 */
3331 if(esp->cur_msgout[2] == EXTENDED_MESSAGE &&
3332 esp->cur_msgout[4] == EXTENDED_SDTR) {
3333 esp->snip = 1; /* anal retentiveness... */
3334 }
3335
3336 esp->prevmsgout = esp->cur_msgout[0];
3337 esp->msgout_len = 0;
3338 esp_advance_phase(esp->current_SC, in_the_dark);
3339 return esp_do_phase_determine(esp, eregs);
3340}
3341
3342static int esp_bus_unexpected(struct NCR_ESP *esp, struct ESP_regs *eregs)
3343{
3344 ESPLOG(("esp%d: command in weird state %2x\n",
3345 esp->esp_id, esp->current_SC->SCp.phase));
3346 return do_reset_bus;
3347}
3348
3349static espfunc_t bus_vector[] = {
3350 esp_do_data_finale,
3351 esp_do_data_finale,
3352 esp_bus_unexpected,
3353 esp_do_msgin,
3354 esp_do_msgincont,
3355 esp_do_msgindone,
3356 esp_do_msgout,
3357 esp_do_msgoutdone,
3358 esp_do_cmdbegin,
3359 esp_do_cmddone,
3360 esp_do_status,
3361 esp_do_freebus,
3362 esp_do_phase_determine,
3363 esp_bus_unexpected,
3364 esp_bus_unexpected,
3365 esp_bus_unexpected,
3366};
3367
3368/* This is the second tier in our dual-level SCSI state machine. */
3369static int esp_work_bus(struct NCR_ESP *esp, struct ESP_regs *eregs)
3370{
3371 Scsi_Cmnd *SCptr = esp->current_SC;
3372 unsigned int phase;
3373
3374 ESPBUS(("esp_work_bus: "));
3375 if(!SCptr) {
3376 ESPBUS(("reconnect\n"));
3377 return esp_do_reconnect(esp, eregs);
3378 }
3379 phase = SCptr->SCp.phase;
3380 if ((phase & 0xf0) == in_phases_mask)
3381 return bus_vector[(phase & 0x0f)](esp, eregs);
3382 else if((phase & 0xf0) == in_slct_mask)
3383 return esp_select_complete(esp, eregs);
3384 else
3385 return esp_bus_unexpected(esp, eregs);
3386}
3387
3388static espfunc_t isvc_vector[] = {
3389 NULL,
3390 esp_do_phase_determine,
3391 esp_do_resetbus,
3392 esp_finish_reset,
3393 esp_work_bus
3394};
3395
3396/* Main interrupt handler for an esp adapter. */
3397void esp_handle(struct NCR_ESP *esp)
3398{
3399 struct ESP_regs *eregs;
3400 Scsi_Cmnd *SCptr;
3401 int what_next = do_intr_end;
3402 eregs = esp->eregs;
3403 SCptr = esp->current_SC;
3404
3405 if(esp->dma_irq_entry)
3406 esp->dma_irq_entry(esp);
3407
3408 /* Check for errors. */
3409 esp->sreg = esp_read(eregs->esp_status);
3410 esp->sreg &= (~ESP_STAT_INTR);
3411 esp->seqreg = (esp_read(eregs->esp_sstep) & ESP_STEP_VBITS);
3412 esp->ireg = esp_read(eregs->esp_intrpt); /* Unlatch intr and stat regs */
3413 ESPIRQ(("handle_irq: [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
3414 esp->sreg, esp->seqreg, esp->ireg));
3415 if(esp->sreg & (ESP_STAT_SPAM)) {
3416 /* Gross error, could be due to one of:
3417 *
3418 * - top of fifo overwritten, could be because
3419 * we tried to do a synchronous transfer with
3420 * an offset greater than ESP fifo size
3421 *
3422 * - top of command register overwritten
3423 *
3424 * - DMA setup to go in one direction, SCSI
3425 * bus points in the other, whoops
3426 *
3427 * - weird phase change during asynchronous
3428 * data phase while we are initiator
3429 */
3430 ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg));
3431
3432 /* If a command is live on the bus we cannot safely
3433 * reset the bus, so we'll just let the pieces fall
3434 * where they may. Here we are hoping that the
3435 * target will be able to cleanly go away soon
3436 * so we can safely reset things.
3437 */
3438 if(!SCptr) {
3439 ESPLOG(("esp%d: No current cmd during gross error, "
3440 "resetting bus\n", esp->esp_id));
3441 what_next = do_reset_bus;
3442 goto state_machine;
3443 }
3444 }
3445
3446 /* No current cmd is only valid at this point when there are
3447 * commands off the bus or we are trying a reset.
3448 */
3449 if(!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) {
3450 /* Panic is safe, since current_SC is null. */
3451 ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id));
3452 panic("esp_handle: current_SC == penguin within interrupt!");
3453 }
3454
3455 if(esp->ireg & (ESP_INTR_IC)) {
3456 /* Illegal command fed to ESP. Outside of obvious
3457 * software bugs that could cause this, there is
3458 * a condition with ESP100 where we can confuse the
3459 * ESP into an erroneous illegal command interrupt
3460 * because it does not scrape the FIFO properly
3461 * for reselection. See esp100_reconnect_hwbug()
3462 * to see how we try very hard to avoid this.
3463 */
3464 ESPLOG(("esp%d: invalid command\n", esp->esp_id));
3465
3466 esp_dump_state(esp, eregs);
3467
3468 if(SCptr) {
3469 /* Devices with very buggy firmware can drop BSY
3470 * during a scatter list interrupt when using sync
3471 * mode transfers. We continue the transfer as
3472 * expected, the target drops the bus, the ESP
3473 * gets confused, and we get a illegal command
3474 * interrupt because the bus is in the disconnected
3475 * state now and ESP_CMD_TI is only allowed when
3476 * a nexus is alive on the bus.
3477 */
3478 ESPLOG(("esp%d: Forcing async and disabling disconnect for "
3479 "target %d\n", esp->esp_id, SCptr->device->id));
3480 SCptr->device->borken = 1; /* foo on you */
3481 }
3482
3483 what_next = do_reset_bus;
3484 } else if(!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) {
3485 int phase;
3486
3487 if(SCptr) {
3488 phase = SCptr->SCp.phase;
3489 if(phase & in_phases_mask) {
3490 what_next = esp_work_bus(esp, eregs);
3491 } else if(phase & in_slct_mask) {
3492 what_next = esp_select_complete(esp, eregs);
3493 } else {
3494 ESPLOG(("esp%d: interrupt for no good reason...\n",
3495 esp->esp_id));
3496 what_next = do_intr_end;
3497 }
3498 } else {
3499 ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n",
3500 esp->esp_id));
3501 what_next = do_reset_bus;
3502 }
3503 } else if(esp->ireg & ESP_INTR_SR) {
3504 ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id));
3505 what_next = do_reset_complete;
3506 } else if(esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) {
3507 ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n",
3508 esp->esp_id));
3509 what_next = do_reset_bus;
3510 } else if(esp->ireg & ESP_INTR_RSEL) {
3511 if(!SCptr) {
3512 /* This is ok. */
3513 what_next = esp_do_reconnect(esp, eregs);
3514 } else if(SCptr->SCp.phase & in_slct_mask) {
3515 /* Only selection code knows how to clean
3516 * up properly.
3517 */
3518 ESPDISC(("Reselected during selection attempt\n"));
3519 what_next = esp_select_complete(esp, eregs);
3520 } else {
3521 ESPLOG(("esp%d: Reselected while bus is busy\n",
3522 esp->esp_id));
3523 what_next = do_reset_bus;
3524 }
3525 }
3526
3527 /* This is tier-one in our dual level SCSI state machine. */
3528state_machine:
3529 while(what_next != do_intr_end) {
3530 if (what_next >= do_phase_determine &&
3531 what_next < do_intr_end)
3532 what_next = isvc_vector[what_next](esp, eregs);
3533 else {
3534 /* state is completely lost ;-( */
3535 ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n",
3536 esp->esp_id));
3537 what_next = do_reset_bus;
3538 }
3539 }
3540 if(esp->dma_irq_exit)
3541 esp->dma_irq_exit(esp);
3542}
3543EXPORT_SYMBOL(esp_handle);
3544
3545#ifndef CONFIG_SMP
3546irqreturn_t esp_intr(int irq, void *dev_id)
3547{
3548 struct NCR_ESP *esp;
3549 unsigned long flags;
3550 int again;
3551 struct Scsi_Host *dev = dev_id;
3552
3553 /* Handle all ESP interrupts showing at this IRQ level. */
3554 spin_lock_irqsave(dev->host_lock, flags);
3555repeat:
3556 again = 0;
3557 for_each_esp(esp) {
3558#ifndef __mips__
3559 if(((esp)->irq & 0xff) == irq) {
3560#endif
3561 if(esp->dma_irq_p(esp)) {
3562 again = 1;
3563
3564 esp->dma_ints_off(esp);
3565
3566 ESPIRQ(("I%d(", esp->esp_id));
3567 esp_handle(esp);
3568 ESPIRQ((")"));
3569
3570 esp->dma_ints_on(esp);
3571 }
3572#ifndef __mips__
3573 }
3574#endif
3575 }
3576 if(again)
3577 goto repeat;
3578 spin_unlock_irqrestore(dev->host_lock, flags);
3579 return IRQ_HANDLED;
3580}
3581#else
3582/* For SMP we only service one ESP on the list list at our IRQ level! */
3583irqreturn_t esp_intr(int irq, void *dev_id)
3584{
3585 struct NCR_ESP *esp;
3586 unsigned long flags;
3587 struct Scsi_Host *dev = dev_id;
3588
3589 /* Handle all ESP interrupts showing at this IRQ level. */
3590 spin_lock_irqsave(dev->host_lock, flags);
3591 for_each_esp(esp) {
3592 if(((esp)->irq & 0xf) == irq) {
3593 if(esp->dma_irq_p(esp)) {
3594 esp->dma_ints_off(esp);
3595
3596 ESPIRQ(("I[%d:%d](",
3597 smp_processor_id(), esp->esp_id));
3598 esp_handle(esp);
3599 ESPIRQ((")"));
3600
3601 esp->dma_ints_on(esp);
3602 goto out;
3603 }
3604 }
3605 }
3606out:
3607 spin_unlock_irqrestore(dev->host_lock, flags);
3608 return IRQ_HANDLED;
3609}
3610#endif
3611
3612int esp_slave_alloc(struct scsi_device *SDptr)
3613{
3614 struct esp_device *esp_dev =
3615 kzalloc(sizeof(struct esp_device), GFP_ATOMIC);
3616
3617 if (!esp_dev)
3618 return -ENOMEM;
3619 SDptr->hostdata = esp_dev;
3620 return 0;
3621}
3622
3623void esp_slave_destroy(struct scsi_device *SDptr)
3624{
3625 struct NCR_ESP *esp = (struct NCR_ESP *) SDptr->host->hostdata;
3626
3627 esp->targets_present &= ~(1 << sdev_id(SDptr));
3628 kfree(SDptr->hostdata);
3629 SDptr->hostdata = NULL;
3630}
3631
3632#ifdef MODULE
3633int init_module(void) { return 0; }
3634void cleanup_module(void) {}
3635void esp_release(void)
3636{
3637 esps_in_use--;
3638 esps_running = esps_in_use;
3639}
3640EXPORT_SYMBOL(esp_release);
3641#endif
3642
3643EXPORT_SYMBOL(esp_abort);
3644EXPORT_SYMBOL(esp_allocate);
3645EXPORT_SYMBOL(esp_deallocate);
3646EXPORT_SYMBOL(esp_initialize);
3647EXPORT_SYMBOL(esp_intr);
3648EXPORT_SYMBOL(esp_queue);
3649EXPORT_SYMBOL(esp_reset);
3650EXPORT_SYMBOL(esp_slave_alloc);
3651EXPORT_SYMBOL(esp_slave_destroy);
3652EXPORT_SYMBOL(esps_in_use);
3653
3654MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/NCR53C9x.h b/drivers/scsi/NCR53C9x.h
deleted file mode 100644
index 00a0ba040dba..000000000000
--- a/drivers/scsi/NCR53C9x.h
+++ /dev/null
@@ -1,668 +0,0 @@
1/* NCR53C9x.c: Defines and structures for the NCR53C9x generic driver.
2 *
3 * Originally esp.h: Defines and structures for the Sparc ESP
4 * (Enhanced SCSI Processor) driver under Linux.
5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 *
8 * Generalization by Jesper Skov (jskov@cygnus.co.uk)
9 *
10 * More generalization (for i386 stuff) by Tymm Twillman (tymm@computer.org)
11 */
12
13#ifndef NCR53C9X_H
14#define NCR53C9X_H
15
16#include <linux/interrupt.h>
17
18/* djweis for mac driver */
19#if defined(CONFIG_MAC)
20#define PAD_SIZE 15
21#else
22#define PAD_SIZE 3
23#endif
24
25/* Handle multiple hostadapters on Amiga
26 * generally PAD_SIZE = 3
27 * but there is one exception: Oktagon (PAD_SIZE = 1) */
28#if defined(CONFIG_OKTAGON_SCSI) || defined(CONFIG_OKTAGON_SCSI_MODULE)
29#undef PAD_SIZE
30#if defined(CONFIG_BLZ1230_SCSI) || defined(CONFIG_BLZ1230_SCSI_MODULE) || \
31 defined(CONFIG_BLZ2060_SCSI) || defined(CONFIG_BLZ2060_SCSI_MODULE) || \
32 defined(CONFIG_CYBERSTORM_SCSI) || defined(CONFIG_CYBERSTORM_SCSI_MODULE) || \
33 defined(CONFIG_CYBERSTORMII_SCSI) || defined(CONFIG_CYBERSTORMII_SCSI_MODULE) || \
34 defined(CONFIG_FASTLANE_SCSI) || defined(CONFIG_FASTLANE_SCSI_MODULE)
35#define MULTIPLE_PAD_SIZES
36#else
37#define PAD_SIZE 1
38#endif
39#endif
40
41/* Macros for debugging messages */
42
43#define DEBUG_ESP
44/* #define DEBUG_ESP_DATA */
45/* #define DEBUG_ESP_QUEUE */
46/* #define DEBUG_ESP_DISCONNECT */
47/* #define DEBUG_ESP_STATUS */
48/* #define DEBUG_ESP_PHASES */
49/* #define DEBUG_ESP_WORKBUS */
50/* #define DEBUG_STATE_MACHINE */
51/* #define DEBUG_ESP_CMDS */
52/* #define DEBUG_ESP_IRQS */
53/* #define DEBUG_SDTR */
54/* #define DEBUG_ESP_SG */
55
56/* Use the following to sprinkle debugging messages in a way which
57 * suits you if combinations of the above become too verbose when
58 * trying to track down a specific problem.
59 */
60/* #define DEBUG_ESP_MISC */
61
62#if defined(DEBUG_ESP)
63#define ESPLOG(foo) printk foo
64#else
65#define ESPLOG(foo)
66#endif /* (DEBUG_ESP) */
67
68#if defined(DEBUG_ESP_DATA)
69#define ESPDATA(foo) printk foo
70#else
71#define ESPDATA(foo)
72#endif
73
74#if defined(DEBUG_ESP_QUEUE)
75#define ESPQUEUE(foo) printk foo
76#else
77#define ESPQUEUE(foo)
78#endif
79
80#if defined(DEBUG_ESP_DISCONNECT)
81#define ESPDISC(foo) printk foo
82#else
83#define ESPDISC(foo)
84#endif
85
86#if defined(DEBUG_ESP_STATUS)
87#define ESPSTAT(foo) printk foo
88#else
89#define ESPSTAT(foo)
90#endif
91
92#if defined(DEBUG_ESP_PHASES)
93#define ESPPHASE(foo) printk foo
94#else
95#define ESPPHASE(foo)
96#endif
97
98#if defined(DEBUG_ESP_WORKBUS)
99#define ESPBUS(foo) printk foo
100#else
101#define ESPBUS(foo)
102#endif
103
104#if defined(DEBUG_ESP_IRQS)
105#define ESPIRQ(foo) printk foo
106#else
107#define ESPIRQ(foo)
108#endif
109
110#if defined(DEBUG_SDTR)
111#define ESPSDTR(foo) printk foo
112#else
113#define ESPSDTR(foo)
114#endif
115
116#if defined(DEBUG_ESP_MISC)
117#define ESPMISC(foo) printk foo
118#else
119#define ESPMISC(foo)
120#endif
121
122/*
123 * padding for register structure
124 */
125#ifdef CONFIG_JAZZ_ESP
126#define EREGS_PAD(n)
127#else
128#ifndef MULTIPLE_PAD_SIZES
129#define EREGS_PAD(n) unchar n[PAD_SIZE];
130#endif
131#endif
132
133/* The ESP SCSI controllers have their register sets in three
134 * "classes":
135 *
136 * 1) Registers which are both read and write.
137 * 2) Registers which are read only.
138 * 3) Registers which are write only.
139 *
140 * Yet, they all live within the same IO space.
141 */
142
143#if !defined(__i386__) && !defined(__x86_64__)
144
145#ifndef MULTIPLE_PAD_SIZES
146
147#ifdef CONFIG_CPU_HAS_WB
148#include <asm/wbflush.h>
149#define esp_write(__reg, __val) do{(__reg) = (__val); wbflush();} while(0)
150#else
151#define esp_write(__reg, __val) ((__reg) = (__val))
152#endif
153#define esp_read(__reg) (__reg)
154
155struct ESP_regs {
156 /* Access Description Offset */
157 volatile unchar esp_tclow; /* rw Low bits of the transfer count 0x00 */
158 EREGS_PAD(tlpad1);
159 volatile unchar esp_tcmed; /* rw Mid bits of the transfer count 0x04 */
160 EREGS_PAD(fdpad);
161 volatile unchar esp_fdata; /* rw FIFO data bits 0x08 */
162 EREGS_PAD(cbpad);
163 volatile unchar esp_cmnd; /* rw SCSI command bits 0x0c */
164 EREGS_PAD(stpad);
165 volatile unchar esp_status; /* ro ESP status register 0x10 */
166#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
167 EREGS_PAD(irqpd);
168 volatile unchar esp_intrpt; /* ro Kind of interrupt 0x14 */
169#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
170 EREGS_PAD(sspad);
171 volatile unchar esp_sstep; /* ro Sequence step register 0x18 */
172#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
173 EREGS_PAD(ffpad);
174 volatile unchar esp_fflags; /* ro Bits of current FIFO info 0x1c */
175#define esp_soff esp_fflags /* wo Sync offset 0x1c */
176 EREGS_PAD(cf1pd);
177 volatile unchar esp_cfg1; /* rw First configuration register 0x20 */
178 EREGS_PAD(cfpad);
179 volatile unchar esp_cfact; /* wo Clock conversion factor 0x24 */
180 EREGS_PAD(ctpad);
181 volatile unchar esp_ctest; /* wo Chip test register 0x28 */
182 EREGS_PAD(cf2pd);
183 volatile unchar esp_cfg2; /* rw Second configuration register 0x2c */
184 EREGS_PAD(cf3pd);
185
186 /* The following is only found on the 53C9X series SCSI chips */
187 volatile unchar esp_cfg3; /* rw Third configuration register 0x30 */
188 EREGS_PAD(cf4pd);
189 volatile unchar esp_cfg4; /* rw Fourth configuration register 0x34 */
190 EREGS_PAD(thpd);
191 /* The following is found on all chips except the NCR53C90 (ESP100) */
192 volatile unchar esp_tchi; /* rw High bits of transfer count 0x38 */
193#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
194 EREGS_PAD(fgpad);
195 volatile unchar esp_fgrnd; /* rw Data base for fifo 0x3c */
196};
197
198#else /* MULTIPLE_PAD_SIZES */
199
200#define esp_write(__reg, __val) (*(__reg) = (__val))
201#define esp_read(__reg) (*(__reg))
202
203struct ESP_regs {
204 unsigned char io_addr[64]; /* dummy */
205 /* Access Description Offset */
206#define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */
207#define esp_tcmed io_addr + (1<<(esp->shift)) /* rw Mid bits of the transfer count 0x04 */
208#define esp_fdata io_addr + (2<<(esp->shift)) /* rw FIFO data bits 0x08 */
209#define esp_cmnd io_addr + (3<<(esp->shift)) /* rw SCSI command bits 0x0c */
210#define esp_status io_addr + (4<<(esp->shift)) /* ro ESP status register 0x10 */
211#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
212#define esp_intrpt io_addr + (5<<(esp->shift)) /* ro Kind of interrupt 0x14 */
213#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
214#define esp_sstep io_addr + (6<<(esp->shift)) /* ro Sequence step register 0x18 */
215#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
216#define esp_fflags io_addr + (7<<(esp->shift)) /* ro Bits of current FIFO info 0x1c */
217#define esp_soff esp_fflags /* wo Sync offset 0x1c */
218#define esp_cfg1 io_addr + (8<<(esp->shift)) /* rw First configuration register 0x20 */
219#define esp_cfact io_addr + (9<<(esp->shift)) /* wo Clock conversion factor 0x24 */
220#define esp_ctest io_addr + (10<<(esp->shift)) /* wo Chip test register 0x28 */
221#define esp_cfg2 io_addr + (11<<(esp->shift)) /* rw Second configuration register 0x2c */
222
223 /* The following is only found on the 53C9X series SCSI chips */
224#define esp_cfg3 io_addr + (12<<(esp->shift)) /* rw Third configuration register 0x30 */
225#define esp_cfg4 io_addr + (13<<(esp->shift)) /* rw Fourth configuration register 0x34 */
226
227 /* The following is found on all chips except the NCR53C90 (ESP100) */
228#define esp_tchi io_addr + (14<<(esp->shift)) /* rw High bits of transfer count 0x38 */
229#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
230#define esp_fgrnd io_addr + (15<<(esp->shift)) /* rw Data base for fifo 0x3c */
231};
232
233#endif
234
235#else /* !defined(__i386__) && !defined(__x86_64__) */
236
237#define esp_write(__reg, __val) outb((__val), (__reg))
238#define esp_read(__reg) inb((__reg))
239
240struct ESP_regs {
241 unsigned int io_addr;
242 /* Access Description Offset */
243#define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */
244#define esp_tcmed io_addr + 1 /* rw Mid bits of the transfer count 0x04 */
245#define esp_fdata io_addr + 2 /* rw FIFO data bits 0x08 */
246#define esp_cmnd io_addr + 3 /* rw SCSI command bits 0x0c */
247#define esp_status io_addr + 4 /* ro ESP status register 0x10 */
248#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
249#define esp_intrpt io_addr + 5 /* ro Kind of interrupt 0x14 */
250#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
251#define esp_sstep io_addr + 6 /* ro Sequence step register 0x18 */
252#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
253#define esp_fflags io_addr + 7 /* ro Bits of current FIFO info 0x1c */
254#define esp_soff esp_fflags /* wo Sync offset 0x1c */
255#define esp_cfg1 io_addr + 8 /* rw First configuration register 0x20 */
256#define esp_cfact io_addr + 9 /* wo Clock conversion factor 0x24 */
257#define esp_ctest io_addr + 10 /* wo Chip test register 0x28 */
258#define esp_cfg2 io_addr + 11 /* rw Second configuration register 0x2c */
259
260 /* The following is only found on the 53C9X series SCSI chips */
261#define esp_cfg3 io_addr + 12 /* rw Third configuration register 0x30 */
262#define esp_cfg4 io_addr + 13 /* rw Fourth configuration register 0x34 */
263
264 /* The following is found on all chips except the NCR53C90 (ESP100) */
265#define esp_tchi io_addr + 14 /* rw High bits of transfer count 0x38 */
266#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
267#define esp_fgrnd io_addr + 15 /* rw Data base for fifo 0x3c */
268};
269
270#endif /* !defined(__i386__) && !defined(__x86_64__) */
271
272/* Various revisions of the ESP board. */
273enum esp_rev {
274 esp100 = 0x00, /* NCR53C90 - very broken */
275 esp100a = 0x01, /* NCR53C90A */
276 esp236 = 0x02,
277 fas236 = 0x03,
278 fas100a = 0x04,
279 fast = 0x05,
280 fas366 = 0x06,
281 fas216 = 0x07,
282 fsc = 0x08, /* SYM53C94-2 */
283 espunknown = 0x09
284};
285
286/* We allocate one of these for each scsi device and attach it to
287 * SDptr->hostdata for use in the driver
288 */
289struct esp_device {
290 unsigned char sync_min_period;
291 unsigned char sync_max_offset;
292 unsigned sync:1;
293 unsigned wide:1;
294 unsigned disconnect:1;
295};
296
297/* We get one of these for each ESP probed. */
298struct NCR_ESP {
299 struct NCR_ESP *next; /* Next ESP on probed or NULL */
300 struct ESP_regs *eregs; /* All esp registers */
301 int dma; /* Who I do transfers with. */
302 void *dregs; /* And his registers. */
303 struct Scsi_Host *ehost; /* Backpointer to SCSI Host */
304
305 void *edev; /* Pointer to controller base/SBus */
306 int esp_id; /* Unique per-ESP ID number */
307
308 /* ESP Configuration Registers */
309 unsigned char config1; /* Copy of the 1st config register */
310 unsigned char config2; /* Copy of the 2nd config register */
311 unsigned char config3[16]; /* Copy of the 3rd config register */
312
313 /* The current command we are sending to the ESP chip. This esp_command
314 * ptr needs to be mapped in DVMA area so we can send commands and read
315 * from the ESP fifo without burning precious CPU cycles. Programmed I/O
316 * sucks when we have the DVMA to do it for us. The ESP is stupid and will
317 * only send out 6, 10, and 12 byte SCSI commands, others we need to send
318 * one byte at a time. esp_slowcmd being set says that we are doing one
319 * of the command types ESP doesn't understand, esp_scmdp keeps track of
320 * which byte we are sending, esp_scmdleft says how many bytes to go.
321 */
322 volatile unchar *esp_command; /* Location of command (CPU view) */
323 __u32 esp_command_dvma; /* Location of command (DVMA view) */
324 unsigned char esp_clen; /* Length of this command */
325 unsigned char esp_slowcmd;
326 unsigned char *esp_scmdp;
327 unsigned char esp_scmdleft;
328
329 /* The following are used to determine the cause of an IRQ. Upon every
330 * IRQ entry we synchronize these with the hardware registers.
331 */
332 unchar ireg; /* Copy of ESP interrupt register */
333 unchar sreg; /* Same for ESP status register */
334 unchar seqreg; /* The ESP sequence register */
335
336 /* The following is set when a premature interrupt condition is detected
337 * in some FAS revisions.
338 */
339 unchar fas_premature_intr_workaround;
340
341 /* To save register writes to the ESP, which can be expensive, we
342 * keep track of the previous value that various registers had for
343 * the last target we connected to. If they are the same for the
344 * current target, we skip the register writes as they are not needed.
345 */
346 unchar prev_soff, prev_stp, prev_cfg3;
347
348 /* For each target we keep track of save/restore data
349 * pointer information. This needs to be updated majorly
350 * when we add support for tagged queueing. -DaveM
351 */
352 struct esp_pointers {
353 char *saved_ptr;
354 struct scatterlist *saved_buffer;
355 int saved_this_residual;
356 int saved_buffers_residual;
357 } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/;
358
359 /* Clock periods, frequencies, synchronization, etc. */
360 unsigned int cfreq; /* Clock frequency in HZ */
361 unsigned int cfact; /* Clock conversion factor */
362 unsigned int ccycle; /* One ESP clock cycle */
363 unsigned int ctick; /* One ESP clock time */
364 unsigned int radelay; /* FAST chip req/ack delay */
365 unsigned int neg_defp; /* Default negotiation period */
366 unsigned int sync_defp; /* Default sync transfer period */
367 unsigned int max_period; /* longest our period can be */
368 unsigned int min_period; /* shortest period we can withstand */
369 /* For slow to medium speed input clock rates we shoot for 5mb/s,
370 * but for high input clock rates we try to do 10mb/s although I
371 * don't think a transfer can even run that fast with an ESP even
372 * with DMA2 scatter gather pipelining.
373 */
374#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
375#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
376
377 unsigned int snip; /* Sync. negotiation in progress */
378 unsigned int wnip; /* WIDE negotiation in progress */
379 unsigned int targets_present; /* targets spoken to before */
380
381 int current_transfer_size; /* Set at beginning of data dma */
382
383 unchar espcmdlog[32]; /* Log of current esp cmds sent. */
384 unchar espcmdent; /* Current entry in esp cmd log. */
385
386 /* Misc. info about this ESP */
387 enum esp_rev erev; /* ESP revision */
388 int irq; /* IRQ for this ESP */
389 int scsi_id; /* Who am I as initiator? */
390 int scsi_id_mask; /* Bitmask of 'me'. */
391 int diff; /* Differential SCSI bus? */
392 int slot; /* Slot the adapter occupies */
393
394 /* Our command queues, only one cmd lives in the current_SC queue. */
395 Scsi_Cmnd *issue_SC; /* Commands to be issued */
396 Scsi_Cmnd *current_SC; /* Who is currently working the bus */
397 Scsi_Cmnd *disconnected_SC; /* Commands disconnected from the bus */
398
399 /* Message goo */
400 unchar cur_msgout[16];
401 unchar cur_msgin[16];
402 unchar prevmsgout, prevmsgin;
403 unchar msgout_len, msgin_len;
404 unchar msgout_ctr, msgin_ctr;
405
406 /* States that we cannot keep in the per cmd structure because they
407 * cannot be assosciated with any specific command.
408 */
409 unchar resetting_bus;
410 wait_queue_head_t reset_queue;
411
412 unchar do_pio_cmds; /* Do command transfer with pio */
413
414 /* How much bits do we have to shift the registers */
415 unsigned char shift;
416
417 /* Functions handling DMA
418 */
419 /* Required functions */
420 int (*dma_bytes_sent)(struct NCR_ESP *, int);
421 int (*dma_can_transfer)(struct NCR_ESP *, Scsi_Cmnd *);
422 void (*dma_dump_state)(struct NCR_ESP *);
423 void (*dma_init_read)(struct NCR_ESP *, __u32, int);
424 void (*dma_init_write)(struct NCR_ESP *, __u32, int);
425 void (*dma_ints_off)(struct NCR_ESP *);
426 void (*dma_ints_on)(struct NCR_ESP *);
427 int (*dma_irq_p)(struct NCR_ESP *);
428 int (*dma_ports_p)(struct NCR_ESP *);
429 void (*dma_setup)(struct NCR_ESP *, __u32, int, int);
430
431 /* Optional functions (i.e. may be initialized to 0) */
432 void (*dma_barrier)(struct NCR_ESP *);
433 void (*dma_drain)(struct NCR_ESP *);
434 void (*dma_invalidate)(struct NCR_ESP *);
435 void (*dma_irq_entry)(struct NCR_ESP *);
436 void (*dma_irq_exit)(struct NCR_ESP *);
437 void (*dma_led_off)(struct NCR_ESP *);
438 void (*dma_led_on)(struct NCR_ESP *);
439 void (*dma_poll)(struct NCR_ESP *, unsigned char *);
440 void (*dma_reset)(struct NCR_ESP *);
441
442 /* Optional virtual DMA functions */
443 void (*dma_mmu_get_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *);
444 void (*dma_mmu_get_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *);
445 void (*dma_mmu_release_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *);
446 void (*dma_mmu_release_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *);
447 void (*dma_advance_sg)(Scsi_Cmnd *);
448};
449
450/* Bitfield meanings for the above registers. */
451
452/* ESP config reg 1, read-write, found on all ESP chips */
453#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
454#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
455#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
456#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
457#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
458#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
459
460/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236+fsc chips */
461#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236,fsc) */
462#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236,fsc) */
463#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
464#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */
465#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
466#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
467#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236,fsc) */
468#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216,fsc) */
469#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */
470#define ESP_CONFIG2_RFB 0x80 /* Reserve FIFO byte (fsc) */
471#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
472
473/* ESP config register 3 read-write, found only esp236+fas236+fas100a+fsc chips */
474#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/fas366) */
475#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236/fsc) */
476#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a) */
477#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236/fsc) */
478#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a) */
479#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236/fsc) */
480#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a) */
481#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236/fsc) */
482#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a) */
483#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236/fsc) */
484#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236/fsc) */
485#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236/fsc) */
486#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236/fsc) */
487
488/* ESP config register 4 read-write, found only on fsc chips */
489#define ESP_CONFIG4_BBTE 0x01 /* Back-to-Back transfer enable */
490#define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode */
491#define ESP_CONFIG4_EAN 0x04 /* Enable Active Negotiation */
492
493/* ESP command register read-write */
494/* Group 1 commands: These may be sent at any point in time to the ESP
495 * chip. None of them can generate interrupts 'cept
496 * the "SCSI bus reset" command if you have not disabled
497 * SCSI reset interrupts in the config1 ESP register.
498 */
499#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
500#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
501#define ESP_CMD_RC 0x02 /* Chip reset */
502#define ESP_CMD_RS 0x03 /* SCSI bus reset */
503
504/* Group 2 commands: ESP must be an initiator and connected to a target
505 * for these commands to work.
506 */
507#define ESP_CMD_TI 0x10 /* Transfer Information */
508#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
509#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
510#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
511#define ESP_CMD_SATN 0x1a /* Set ATN */
512#define ESP_CMD_RATN 0x1b /* De-assert ATN */
513
514/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
515 * to a target as the initiator for these commands to work.
516 */
517#define ESP_CMD_SMSG 0x20 /* Send message */
518#define ESP_CMD_SSTAT 0x21 /* Send status */
519#define ESP_CMD_SDATA 0x22 /* Send data */
520#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
521#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
522#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
523#define ESP_CMD_DCNCT 0x27 /* Disconnect */
524#define ESP_CMD_RMSG 0x28 /* Receive Message */
525#define ESP_CMD_RCMD 0x29 /* Receive Command */
526#define ESP_CMD_RDATA 0x2a /* Receive Data */
527#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
528
529/* Group 4 commands: The ESP must be in the disconnected state and must
530 * not be connected to any targets as initiator for
531 * these commands to work.
532 */
533#define ESP_CMD_RSEL 0x40 /* Reselect */
534#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
535#define ESP_CMD_SELA 0x42 /* Select w/ATN */
536#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
537#define ESP_CMD_ESEL 0x44 /* Enable selection */
538#define ESP_CMD_DSEL 0x45 /* Disable selections */
539#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
540#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
541
542/* This bit enables the ESP's DMA */
543#define ESP_CMD_DMA 0x80 /* Do DMA? */
544
545/* ESP status register read-only */
546#define ESP_STAT_PIO 0x01 /* IO phase bit */
547#define ESP_STAT_PCD 0x02 /* CD phase bit */
548#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
549#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
550#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
551#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
552#define ESP_STAT_PERR 0x20 /* Parity error */
553#define ESP_STAT_SPAM 0x40 /* Real bad error */
554/* This indicates the 'interrupt pending' condition, it is a reserved
555 * bit on old revs of the ESP (ESP100, ESP100A, FAS100A).
556 */
557#define ESP_STAT_INTR 0x80 /* Interrupt */
558
559/* The status register can be masked with ESP_STAT_PMASK and compared
560 * with the following values to determine the current phase the ESP
561 * (at least thinks it) is in. For our purposes we also add our own
562 * software 'done' bit for our phase management engine.
563 */
564#define ESP_DOP (0) /* Data Out */
565#define ESP_DIP (ESP_STAT_PIO) /* Data In */
566#define ESP_CMDP (ESP_STAT_PCD) /* Command */
567#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
568#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
569#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
570
571/* ESP interrupt register read-only */
572#define ESP_INTR_S 0x01 /* Select w/o ATN */
573#define ESP_INTR_SATN 0x02 /* Select w/ATN */
574#define ESP_INTR_RSEL 0x04 /* Reselected */
575#define ESP_INTR_FDONE 0x08 /* Function done */
576#define ESP_INTR_BSERV 0x10 /* Bus service */
577#define ESP_INTR_DC 0x20 /* Disconnect */
578#define ESP_INTR_IC 0x40 /* Illegal command given */
579#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
580
581/* Interrupt status macros */
582#define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR))
583#define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC))
584#define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN))
585#define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S))
586#define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \
587 (ESP_SELECT_WITHOUT_ATN_IRQ(esp)))
588#define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL))
589
590/* ESP sequence step register read-only */
591#define ESP_STEP_VBITS 0x07 /* Valid bits */
592#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
593#define ESP_STEP_SID 0x01 /* One msg byte sent */
594#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
595#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
596 * bytes to be lost
597 */
598#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
599
600/* Ho hum, some ESP's set the step register to this as well... */
601#define ESP_STEP_FINI5 0x05
602#define ESP_STEP_FINI6 0x06
603#define ESP_STEP_FINI7 0x07
604#define ESP_STEP_SOM 0x08 /* Synchronous Offset Max */
605
606/* ESP chip-test register read-write */
607#define ESP_TEST_TARG 0x01 /* Target test mode */
608#define ESP_TEST_INI 0x02 /* Initiator test mode */
609#define ESP_TEST_TS 0x04 /* Tristate test mode */
610
611/* ESP unique ID register read-only, found on fas236+fas100a+fsc only */
612#define ESP_UID_F100A 0x00 /* FAS100A */
613#define ESP_UID_F236 0x02 /* FAS236 */
614#define ESP_UID_FSC 0xa2 /* NCR53CF9x-2 */
615#define ESP_UID_REV 0x07 /* ESP revision */
616#define ESP_UID_FAM 0xf8 /* ESP family */
617
618/* ESP fifo flags register read-only */
619/* Note that the following implies a 16 byte FIFO on the ESP. */
620#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
621#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100,fsc) */
622#define ESP_FF_SSTEP 0xe0 /* Sequence step */
623
624/* ESP clock conversion factor register write-only */
625#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
626#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
627#define ESP_CCF_F2 0x02 /* 10MHz */
628#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
629#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
630#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
631#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
632#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
633
634#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
635#define ESP_TIMEO_CONST 8192
636#define FSC_TIMEO_CONST 7668
637#define ESP_NEG_DEFP(mhz, cfact) \
638 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
639#define FSC_NEG_DEFP(mhz, cfact) \
640 ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (7668 * (cfact)))
641#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
642#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
643
644
645/* UGLY, UGLY, UGLY! */
646extern int nesps, esps_in_use, esps_running;
647
648/* For our interrupt engine. */
649#define for_each_esp(esp) \
650 for((esp) = espchain; (esp); (esp) = (esp)->next)
651
652
653/* External functions */
654extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs);
655extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *, int);
656extern void esp_deallocate(struct NCR_ESP *);
657extern void esp_release(void);
658extern void esp_initialize(struct NCR_ESP *);
659extern irqreturn_t esp_intr(int, void *);
660extern const char *esp_info(struct Scsi_Host *);
661extern int esp_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
662extern int esp_abort(Scsi_Cmnd *);
663extern int esp_reset(Scsi_Cmnd *);
664extern int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length,
665 int inout);
666extern int esp_slave_alloc(struct scsi_device *);
667extern void esp_slave_destroy(struct scsi_device *);
668#endif /* !(NCR53C9X_H) */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index d7235f42cf5f..bfd0e64964ac 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -859,44 +859,31 @@ static int setinqserial(struct aac_dev *dev, void *data, int cid)
859 le32_to_cpu(dev->adapter_info.serial[0]), cid); 859 le32_to_cpu(dev->adapter_info.serial[0]), cid);
860} 860}
861 861
862static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code, 862static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
863 u8 a_sense_code, u8 incorrect_length, 863 u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
864 u8 bit_pointer, u16 field_pointer,
865 u32 residue)
866{ 864{
867 sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */ 865 u8 *sense_buf = (u8 *)sense_data;
866 /* Sense data valid, err code 70h */
867 sense_buf[0] = 0x70; /* No info field */
868 sense_buf[1] = 0; /* Segment number, always zero */ 868 sense_buf[1] = 0; /* Segment number, always zero */
869 869
870 if (incorrect_length) { 870 sense_buf[2] = sense_key; /* Sense key */
871 sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
872 sense_buf[3] = BYTE3(residue);
873 sense_buf[4] = BYTE2(residue);
874 sense_buf[5] = BYTE1(residue);
875 sense_buf[6] = BYTE0(residue);
876 } else
877 sense_buf[2] = sense_key; /* Sense key */
878
879 if (sense_key == ILLEGAL_REQUEST)
880 sense_buf[7] = 10; /* Additional sense length */
881 else
882 sense_buf[7] = 6; /* Additional sense length */
883 871
884 sense_buf[12] = sense_code; /* Additional sense code */ 872 sense_buf[12] = sense_code; /* Additional sense code */
885 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */ 873 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
874
886 if (sense_key == ILLEGAL_REQUEST) { 875 if (sense_key == ILLEGAL_REQUEST) {
887 sense_buf[15] = 0; 876 sense_buf[7] = 10; /* Additional sense length */
888 877
889 if (sense_code == SENCODE_INVALID_PARAM_FIELD) 878 sense_buf[15] = bit_pointer;
890 sense_buf[15] = 0x80;/* Std sense key specific field */
891 /* Illegal parameter is in the parameter block */ 879 /* Illegal parameter is in the parameter block */
892
893 if (sense_code == SENCODE_INVALID_CDB_FIELD) 880 if (sense_code == SENCODE_INVALID_CDB_FIELD)
894 sense_buf[15] = 0xc0;/* Std sense key specific field */ 881 sense_buf[15] |= 0xc0;/* Std sense key specific field */
895 /* Illegal parameter is in the CDB block */ 882 /* Illegal parameter is in the CDB block */
896 sense_buf[15] |= bit_pointer;
897 sense_buf[16] = field_pointer >> 8; /* MSB */ 883 sense_buf[16] = field_pointer >> 8; /* MSB */
898 sense_buf[17] = field_pointer; /* LSB */ 884 sense_buf[17] = field_pointer; /* LSB */
899 } 885 } else
886 sense_buf[7] = 6; /* Additional sense length */
900} 887}
901 888
902static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) 889static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
@@ -906,11 +893,9 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
906 dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); 893 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
907 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 894 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
908 SAM_STAT_CHECK_CONDITION; 895 SAM_STAT_CHECK_CONDITION;
909 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 896 set_sense(&dev->fsa_dev[cid].sense_data,
910 HARDWARE_ERROR, 897 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
911 SENCODE_INTERNAL_TARGET_FAILURE, 898 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
912 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
913 0, 0);
914 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 899 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
915 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 900 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
916 SCSI_SENSE_BUFFERSIZE)); 901 SCSI_SENSE_BUFFERSIZE));
@@ -1520,11 +1505,9 @@ static void io_callback(void *context, struct fib * fibptr)
1520 le32_to_cpu(readreply->status)); 1505 le32_to_cpu(readreply->status));
1521#endif 1506#endif
1522 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1507 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1523 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1508 set_sense(&dev->fsa_dev[cid].sense_data,
1524 HARDWARE_ERROR, 1509 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1525 SENCODE_INTERNAL_TARGET_FAILURE, 1510 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1526 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1527 0, 0);
1528 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1511 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1529 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1512 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1530 SCSI_SENSE_BUFFERSIZE)); 1513 SCSI_SENSE_BUFFERSIZE));
@@ -1733,11 +1716,9 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1733 le32_to_cpu(synchronizereply->status)); 1716 le32_to_cpu(synchronizereply->status));
1734 cmd->result = DID_OK << 16 | 1717 cmd->result = DID_OK << 16 |
1735 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1718 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1736 set_sense((u8 *)&dev->fsa_dev[cid].sense_data, 1719 set_sense(&dev->fsa_dev[cid].sense_data,
1737 HARDWARE_ERROR, 1720 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
1738 SENCODE_INTERNAL_TARGET_FAILURE, 1721 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
1739 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1740 0, 0);
1741 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1722 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1742 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1723 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1743 SCSI_SENSE_BUFFERSIZE)); 1724 SCSI_SENSE_BUFFERSIZE));
@@ -1945,10 +1926,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1945 { 1926 {
1946 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); 1927 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
1947 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 1928 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1948 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1929 set_sense(&dev->fsa_dev[cid].sense_data,
1949 ILLEGAL_REQUEST, 1930 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
1950 SENCODE_INVALID_COMMAND, 1931 ASENCODE_INVALID_COMMAND, 0, 0);
1951 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1952 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1932 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1953 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1933 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
1954 SCSI_SENSE_BUFFERSIZE)); 1934 SCSI_SENSE_BUFFERSIZE));
@@ -1995,10 +1975,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1995 scsicmd->result = DID_OK << 16 | 1975 scsicmd->result = DID_OK << 16 |
1996 COMMAND_COMPLETE << 8 | 1976 COMMAND_COMPLETE << 8 |
1997 SAM_STAT_CHECK_CONDITION; 1977 SAM_STAT_CHECK_CONDITION;
1998 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 1978 set_sense(&dev->fsa_dev[cid].sense_data,
1999 ILLEGAL_REQUEST, 1979 ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
2000 SENCODE_INVALID_CDB_FIELD, 1980 ASENCODE_NO_SENSE, 7, 2);
2001 ASENCODE_NO_SENSE, 0, 7, 2, 0);
2002 memcpy(scsicmd->sense_buffer, 1981 memcpy(scsicmd->sense_buffer,
2003 &dev->fsa_dev[cid].sense_data, 1982 &dev->fsa_dev[cid].sense_data,
2004 min_t(size_t, 1983 min_t(size_t,
@@ -2254,9 +2233,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
2254 */ 2233 */
2255 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0])); 2234 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
2256 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 2235 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
2257 set_sense((u8 *) &dev->fsa_dev[cid].sense_data, 2236 set_sense(&dev->fsa_dev[cid].sense_data,
2258 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 2237 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
2259 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0); 2238 ASENCODE_INVALID_COMMAND, 0, 0);
2260 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2239 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2261 min_t(size_t, 2240 min_t(size_t,
2262 sizeof(dev->fsa_dev[cid].sense_data), 2241 sizeof(dev->fsa_dev[cid].sense_data),
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index f8afa358b6b6..abef05146d75 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
243 * Search the list of AdapterFibContext addresses on the adapter 243 * Search the list of AdapterFibContext addresses on the adapter
244 * to be sure this is a valid address 244 * to be sure this is a valid address
245 */ 245 */
246 spin_lock_irqsave(&dev->fib_lock, flags);
246 entry = dev->fib_list.next; 247 entry = dev->fib_list.next;
247 fibctx = NULL; 248 fibctx = NULL;
248 249
@@ -251,24 +252,25 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
251 /* 252 /*
252 * Extract the AdapterFibContext from the Input parameters. 253 * Extract the AdapterFibContext from the Input parameters.
253 */ 254 */
254 if (fibctx->unique == f.fibctx) { /* We found a winner */ 255 if (fibctx->unique == f.fibctx) { /* We found a winner */
255 break; 256 break;
256 } 257 }
257 entry = entry->next; 258 entry = entry->next;
258 fibctx = NULL; 259 fibctx = NULL;
259 } 260 }
260 if (!fibctx) { 261 if (!fibctx) {
262 spin_unlock_irqrestore(&dev->fib_lock, flags);
261 dprintk ((KERN_INFO "Fib Context not found\n")); 263 dprintk ((KERN_INFO "Fib Context not found\n"));
262 return -EINVAL; 264 return -EINVAL;
263 } 265 }
264 266
265 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || 267 if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
266 (fibctx->size != sizeof(struct aac_fib_context))) { 268 (fibctx->size != sizeof(struct aac_fib_context))) {
269 spin_unlock_irqrestore(&dev->fib_lock, flags);
267 dprintk ((KERN_INFO "Fib Context corrupt?\n")); 270 dprintk ((KERN_INFO "Fib Context corrupt?\n"));
268 return -EINVAL; 271 return -EINVAL;
269 } 272 }
270 status = 0; 273 status = 0;
271 spin_lock_irqsave(&dev->fib_lock, flags);
272 /* 274 /*
273 * If there are no fibs to send back, then either wait or return 275 * If there are no fibs to send back, then either wait or return
274 * -EAGAIN 276 * -EAGAIN
@@ -414,8 +416,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
414 * @arg: ioctl arguments 416 * @arg: ioctl arguments
415 * 417 *
416 * This routine returns the driver version. 418 * This routine returns the driver version.
417 * Under Linux, there have been no version incompatibilities, so this is 419 * Under Linux, there have been no version incompatibilities, so this is
418 * simple! 420 * simple!
419 */ 421 */
420 422
421static int check_revision(struct aac_dev *dev, void __user *arg) 423static int check_revision(struct aac_dev *dev, void __user *arg)
@@ -463,7 +465,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
463 u32 data_dir; 465 u32 data_dir;
464 void __user *sg_user[32]; 466 void __user *sg_user[32];
465 void *sg_list[32]; 467 void *sg_list[32];
466 u32 sg_indx = 0; 468 u32 sg_indx = 0;
467 u32 byte_count = 0; 469 u32 byte_count = 0;
468 u32 actual_fibsize64, actual_fibsize = 0; 470 u32 actual_fibsize64, actual_fibsize = 0;
469 int i; 471 int i;
@@ -517,11 +519,11 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
517 // Fix up srb for endian and force some values 519 // Fix up srb for endian and force some values
518 520
519 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this 521 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
520 srbcmd->channel = cpu_to_le32(user_srbcmd->channel); 522 srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
521 srbcmd->id = cpu_to_le32(user_srbcmd->id); 523 srbcmd->id = cpu_to_le32(user_srbcmd->id);
522 srbcmd->lun = cpu_to_le32(user_srbcmd->lun); 524 srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
523 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); 525 srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
524 srbcmd->flags = cpu_to_le32(flags); 526 srbcmd->flags = cpu_to_le32(flags);
525 srbcmd->retry_limit = 0; // Obsolete parameter 527 srbcmd->retry_limit = 0; // Obsolete parameter
526 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); 528 srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
527 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); 529 memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
@@ -786,9 +788,9 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
786 pci_info.bus = dev->pdev->bus->number; 788 pci_info.bus = dev->pdev->bus->number;
787 pci_info.slot = PCI_SLOT(dev->pdev->devfn); 789 pci_info.slot = PCI_SLOT(dev->pdev->devfn);
788 790
789 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { 791 if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
790 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); 792 dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
791 return -EFAULT; 793 return -EFAULT;
792 } 794 }
793 return 0; 795 return 0;
794} 796}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index fb0886140dd7..e80d2a0c46af 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1130,31 +1130,29 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1130 if (error < 0) 1130 if (error < 0)
1131 goto out_deinit; 1131 goto out_deinit;
1132 1132
1133 if (!(aac->adapter_info.options & AAC_OPT_NEW_COMM)) {
1134 error = pci_set_dma_max_seg_size(pdev, 65536);
1135 if (error)
1136 goto out_deinit;
1137 }
1138
1139 /* 1133 /*
1140 * Lets override negotiations and drop the maximum SG limit to 34 1134 * Lets override negotiations and drop the maximum SG limit to 34
1141 */ 1135 */
1142 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) && 1136 if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
1143 (aac->scsi_host_ptr->sg_tablesize > 34)) { 1137 (shost->sg_tablesize > 34)) {
1144 aac->scsi_host_ptr->sg_tablesize = 34; 1138 shost->sg_tablesize = 34;
1145 aac->scsi_host_ptr->max_sectors 1139 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1146 = (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
1147 } 1140 }
1148 1141
1149 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) && 1142 if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
1150 (aac->scsi_host_ptr->sg_tablesize > 17)) { 1143 (shost->sg_tablesize > 17)) {
1151 aac->scsi_host_ptr->sg_tablesize = 17; 1144 shost->sg_tablesize = 17;
1152 aac->scsi_host_ptr->max_sectors 1145 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1153 = (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
1154 } 1146 }
1155 1147
1148 error = pci_set_dma_max_seg_size(pdev,
1149 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
1150 (shost->max_sectors << 9) : 65536);
1151 if (error)
1152 goto out_deinit;
1153
1156 /* 1154 /*
1157 * Firware printf works only with older firmware. 1155 * Firmware printf works only with older firmware.
1158 */ 1156 */
1159 if (aac_drivers[index].quirks & AAC_QUIRK_34SG) 1157 if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
1160 aac->printf_enabled = 1; 1158 aac->printf_enabled = 1;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 374ed025dc5a..ccef891d642f 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -12261,7 +12261,7 @@ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
12261/* 12261/*
12262 * Write the EEPROM from 'cfg_buf'. 12262 * Write the EEPROM from 'cfg_buf'.
12263 */ 12263 */
12264void __devinit 12264static void __devinit
12265AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf) 12265AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
12266{ 12266{
12267 ushort *wbuf; 12267 ushort *wbuf;
@@ -12328,7 +12328,7 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
12328/* 12328/*
12329 * Write the EEPROM from 'cfg_buf'. 12329 * Write the EEPROM from 'cfg_buf'.
12330 */ 12330 */
12331void __devinit 12331static void __devinit
12332AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf) 12332AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
12333{ 12333{
12334 ushort *wbuf; 12334 ushort *wbuf;
@@ -12395,7 +12395,7 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
12395/* 12395/*
12396 * Write the EEPROM from 'cfg_buf'. 12396 * Write the EEPROM from 'cfg_buf'.
12397 */ 12397 */
12398void __devinit 12398static void __devinit
12399AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf) 12399AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
12400{ 12400{
12401 ushort *wbuf; 12401 ushort *wbuf;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a67e29f83ae5..57786502e3ec 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@ struct class_device_attribute;
48/*The limit of outstanding scsi command that firmware can handle*/ 48/*The limit of outstanding scsi command that firmware can handle*/
49#define ARCMSR_MAX_OUTSTANDING_CMD 256 49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 320 50#define ARCMSR_MAX_FREECCB_NUM 320
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/08/30" 51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24"
52#define ARCMSR_SCSI_INITIATOR_ID 255 52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512 53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_XFER_SECTORS_B 4096 54#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -248,6 +248,7 @@ struct FIRMWARE_INFO
248#define ARCMSR_MESSAGE_START_BGRB 0x00060008 248#define ARCMSR_MESSAGE_START_BGRB 0x00060008
249#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008 249#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
250#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008 250#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
251#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
251/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */ 252/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
252#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000 253#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
253/* ioctl transfer */ 254/* ioctl transfer */
@@ -256,6 +257,7 @@ struct FIRMWARE_INFO
256#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002 257#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
257#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004 258#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
258#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008 259#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
260#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
259 261
260/* data tunnel buffer between user space program and its firmware */ 262/* data tunnel buffer between user space program and its firmware */
261/* user space data to iop 128bytes */ 263/* user space data to iop 128bytes */
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f4a202e8df26..4f9ff32cfed0 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -315,9 +315,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
315 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); 315 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
316 } 316 }
317 317
318 reg = (struct MessageUnit_B *)(dma_coherent +
319 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
320
321 dma_addr = dma_coherent_handle; 318 dma_addr = dma_coherent_handle;
322 ccb_tmp = (struct CommandControlBlock *)dma_coherent; 319 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
323 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 320 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
@@ -371,8 +368,8 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
371 368
372out: 369out:
373 dma_free_coherent(&acb->pdev->dev, 370 dma_free_coherent(&acb->pdev->dev,
374 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20, 371 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
375 acb->dma_coherent, acb->dma_coherent_handle); 372 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
376 return -ENOMEM; 373 return -ENOMEM;
377} 374}
378 375
@@ -509,6 +506,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
509 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 506 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
510 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN 507 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
511 , reg->iop2drv_doorbell_reg); 508 , reg->iop2drv_doorbell_reg);
509 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
512 return 0x00; 510 return 0x00;
513 } 511 }
514 msleep(10); 512 msleep(10);
@@ -748,6 +746,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t fla
748 , ccb->startdone 746 , ccb->startdone
749 , atomic_read(&acb->ccboutstandingcount)); 747 , atomic_read(&acb->ccboutstandingcount));
750 } 748 }
749 else
751 arcmsr_report_ccb_state(acb, ccb, flag_ccb); 750 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
752} 751}
753 752
@@ -886,7 +885,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
886 } 885 }
887} 886}
888 887
889static void arcmsr_build_ccb(struct AdapterControlBlock *acb, 888static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
890 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) 889 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
891{ 890{
892 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 891 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
@@ -906,6 +905,8 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
906 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 905 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
907 906
908 nseg = scsi_dma_map(pcmd); 907 nseg = scsi_dma_map(pcmd);
908 if (nseg > ARCMSR_MAX_SG_ENTRIES)
909 return FAILED;
909 BUG_ON(nseg < 0); 910 BUG_ON(nseg < 0);
910 911
911 if (nseg) { 912 if (nseg) {
@@ -946,6 +947,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
946 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 947 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
947 ccb->ccb_flags |= CCB_FLAG_WRITE; 948 ccb->ccb_flags |= CCB_FLAG_WRITE;
948 } 949 }
950 return SUCCESS;
949} 951}
950 952
951static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) 953static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
@@ -1036,18 +1038,22 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1036 switch (acb->adapter_type) { 1038 switch (acb->adapter_type) {
1037 case ACB_ADAPTER_TYPE_A: { 1039 case ACB_ADAPTER_TYPE_A: {
1038 iounmap(acb->pmuA); 1040 iounmap(acb->pmuA);
1041 dma_free_coherent(&acb->pdev->dev,
1042 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1043 acb->dma_coherent,
1044 acb->dma_coherent_handle);
1039 break; 1045 break;
1040 } 1046 }
1041 case ACB_ADAPTER_TYPE_B: { 1047 case ACB_ADAPTER_TYPE_B: {
1042 struct MessageUnit_B *reg = acb->pmuB; 1048 struct MessageUnit_B *reg = acb->pmuB;
1043 iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); 1049 iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
1044 iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); 1050 iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
1051 dma_free_coherent(&acb->pdev->dev,
1052 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
1053 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
1045 } 1054 }
1046 } 1055 }
1047 dma_free_coherent(&acb->pdev->dev, 1056
1048 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1049 acb->dma_coherent,
1050 acb->dma_coherent_handle);
1051} 1057}
1052 1058
1053void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1059void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
@@ -1273,7 +1279,9 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1273 return 1; 1279 return 1;
1274 1280
1275 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); 1281 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
1276 1282 /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
1283 readl(reg->iop2drv_doorbell_reg);
1284 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
1277 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1285 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1278 arcmsr_iop2drv_data_wrote_handle(acb); 1286 arcmsr_iop2drv_data_wrote_handle(acb);
1279 } 1287 }
@@ -1380,12 +1388,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1380 1388
1381 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1389 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1382 unsigned long *ver_addr; 1390 unsigned long *ver_addr;
1383 dma_addr_t buf_handle;
1384 uint8_t *pQbuffer, *ptmpQbuffer; 1391 uint8_t *pQbuffer, *ptmpQbuffer;
1385 int32_t allxfer_len = 0; 1392 int32_t allxfer_len = 0;
1393 void *tmp;
1386 1394
1387 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); 1395 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
1388 if (!ver_addr) { 1396 ver_addr = (unsigned long *)tmp;
1397 if (!tmp) {
1389 retvalue = ARCMSR_MESSAGE_FAIL; 1398 retvalue = ARCMSR_MESSAGE_FAIL;
1390 goto message_out; 1399 goto message_out;
1391 } 1400 }
@@ -1421,18 +1430,19 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1421 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); 1430 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
1422 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1431 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1423 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1432 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1424 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); 1433 kfree(tmp);
1425 } 1434 }
1426 break; 1435 break;
1427 1436
1428 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1437 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1429 unsigned long *ver_addr; 1438 unsigned long *ver_addr;
1430 dma_addr_t buf_handle;
1431 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1439 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1432 uint8_t *pQbuffer, *ptmpuserbuffer; 1440 uint8_t *pQbuffer, *ptmpuserbuffer;
1441 void *tmp;
1433 1442
1434 ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle); 1443 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
1435 if (!ver_addr) { 1444 ver_addr = (unsigned long *)tmp;
1445 if (!tmp) {
1436 retvalue = ARCMSR_MESSAGE_FAIL; 1446 retvalue = ARCMSR_MESSAGE_FAIL;
1437 goto message_out; 1447 goto message_out;
1438 } 1448 }
@@ -1482,7 +1492,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1482 retvalue = ARCMSR_MESSAGE_FAIL; 1492 retvalue = ARCMSR_MESSAGE_FAIL;
1483 } 1493 }
1484 } 1494 }
1485 pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle); 1495 kfree(tmp);
1486 } 1496 }
1487 break; 1497 break;
1488 1498
@@ -1682,8 +1692,11 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1682 ccb = arcmsr_get_freeccb(acb); 1692 ccb = arcmsr_get_freeccb(acb);
1683 if (!ccb) 1693 if (!ccb)
1684 return SCSI_MLQUEUE_HOST_BUSY; 1694 return SCSI_MLQUEUE_HOST_BUSY;
1685 1695 if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
1686 arcmsr_build_ccb(acb, ccb, cmd); 1696 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
1697 cmd->scsi_done(cmd);
1698 return 0;
1699 }
1687 arcmsr_post_ccb(acb, ccb); 1700 arcmsr_post_ccb(acb, ccb);
1688 return 0; 1701 return 0;
1689} 1702}
@@ -1844,7 +1857,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
1844 } 1857 }
1845} 1858}
1846 1859
1847static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \ 1860static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
1848 struct CommandControlBlock *poll_ccb) 1861 struct CommandControlBlock *poll_ccb)
1849{ 1862{
1850 struct MessageUnit_B *reg = acb->pmuB; 1863 struct MessageUnit_B *reg = acb->pmuB;
@@ -1878,7 +1891,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
1878 (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1891 (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
1879 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 1892 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
1880 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 1893 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
1881 if (ccb->startdone == ARCMSR_CCB_ABORTED) { 1894 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
1882 printk(KERN_NOTICE "arcmsr%d: \ 1895 printk(KERN_NOTICE "arcmsr%d: \
1883 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n" 1896 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
1884 ,acb->host->host_no 1897 ,acb->host->host_no
@@ -1901,7 +1914,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
1901 } /*drain reply FIFO*/ 1914 } /*drain reply FIFO*/
1902} 1915}
1903 1916
1904static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \ 1917static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1905 struct CommandControlBlock *poll_ccb) 1918 struct CommandControlBlock *poll_ccb)
1906{ 1919{
1907 switch (acb->adapter_type) { 1920 switch (acb->adapter_type) {
@@ -2026,6 +2039,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2026 do { 2039 do {
2027 firmware_state = readl(reg->iop2drv_doorbell_reg); 2040 firmware_state = readl(reg->iop2drv_doorbell_reg);
2028 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); 2041 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
2042 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
2029 } 2043 }
2030 break; 2044 break;
2031 } 2045 }
@@ -2090,19 +2104,39 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
2090 } 2104 }
2091} 2105}
2092 2106
2107static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2108{
2109 switch (acb->adapter_type) {
2110 case ACB_ADAPTER_TYPE_A:
2111 return;
2112 case ACB_ADAPTER_TYPE_B:
2113 {
2114 struct MessageUnit_B *reg = acb->pmuB;
2115 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
2116 if(arcmsr_hbb_wait_msgint_ready(acb)) {
2117 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
2118 return;
2119 }
2120 }
2121 break;
2122 }
2123 return;
2124}
2125
2093static void arcmsr_iop_init(struct AdapterControlBlock *acb) 2126static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2094{ 2127{
2095 uint32_t intmask_org; 2128 uint32_t intmask_org;
2096 2129
2097 arcmsr_wait_firmware_ready(acb);
2098 arcmsr_iop_confirm(acb);
2099 /* disable all outbound interrupt */ 2130 /* disable all outbound interrupt */
2100 intmask_org = arcmsr_disable_outbound_ints(acb); 2131 intmask_org = arcmsr_disable_outbound_ints(acb);
2132 arcmsr_wait_firmware_ready(acb);
2133 arcmsr_iop_confirm(acb);
2101 arcmsr_get_firmware_spec(acb); 2134 arcmsr_get_firmware_spec(acb);
2102 /*start background rebuild*/ 2135 /*start background rebuild*/
2103 arcmsr_start_adapter_bgrb(acb); 2136 arcmsr_start_adapter_bgrb(acb);
2104 /* empty doorbell Qbuffer if door bell ringed */ 2137 /* empty doorbell Qbuffer if door bell ringed */
2105 arcmsr_clear_doorbell_queue_buffer(acb); 2138 arcmsr_clear_doorbell_queue_buffer(acb);
2139 arcmsr_enable_eoi_mode(acb);
2106 /* enable outbound Post Queue,outbound doorbell Interrupt */ 2140 /* enable outbound Post Queue,outbound doorbell Interrupt */
2107 arcmsr_enable_outbound_ints(acb, intmask_org); 2141 arcmsr_enable_outbound_ints(acb, intmask_org);
2108 acb->acb_flags |= ACB_F_IOP_INITED; 2142 acb->acb_flags |= ACB_F_IOP_INITED;
@@ -2275,6 +2309,7 @@ static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
2275 arcmsr_start_adapter_bgrb(acb); 2309 arcmsr_start_adapter_bgrb(acb);
2276 /* empty doorbell Qbuffer if door bell ringed */ 2310 /* empty doorbell Qbuffer if door bell ringed */
2277 arcmsr_clear_doorbell_queue_buffer(acb); 2311 arcmsr_clear_doorbell_queue_buffer(acb);
2312 arcmsr_enable_eoi_mode(acb);
2278 /* enable outbound Post Queue,outbound doorbell Interrupt */ 2313 /* enable outbound Post Queue,outbound doorbell Interrupt */
2279 arcmsr_enable_outbound_ints(acb, intmask_org); 2314 arcmsr_enable_outbound_ints(acb, intmask_org);
2280 acb->acb_flags |= ACB_F_IOP_INITED; 2315 acb->acb_flags |= ACB_F_IOP_INITED;
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index eceacf6d49ea..3bedf2466bd1 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1790,7 +1790,7 @@ int acornscsi_starttransfer(AS_Host *host)
1790 return 0; 1790 return 0;
1791 } 1791 }
1792 1792
1793 residual = host->SCpnt->request_bufflen - host->scsi.SCp.scsi_xferred; 1793 residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred;
1794 1794
1795 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer); 1795 sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
1796 sbic_arm_writenext(host->scsi.io_port, residual >> 16); 1796 sbic_arm_writenext(host->scsi.io_port, residual >> 16);
@@ -2270,7 +2270,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2270 case 0x4b: /* -> PHASE_STATUSIN */ 2270 case 0x4b: /* -> PHASE_STATUSIN */
2271 case 0x8b: /* -> PHASE_STATUSIN */ 2271 case 0x8b: /* -> PHASE_STATUSIN */
2272 /* DATA IN -> STATUS */ 2272 /* DATA IN -> STATUS */
2273 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2273 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2274 acornscsi_sbic_xfcount(host); 2274 acornscsi_sbic_xfcount(host);
2275 acornscsi_dma_stop(host); 2275 acornscsi_dma_stop(host);
2276 acornscsi_readstatusbyte(host); 2276 acornscsi_readstatusbyte(host);
@@ -2281,7 +2281,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2281 case 0x4e: /* -> PHASE_MSGOUT */ 2281 case 0x4e: /* -> PHASE_MSGOUT */
2282 case 0x8e: /* -> PHASE_MSGOUT */ 2282 case 0x8e: /* -> PHASE_MSGOUT */
2283 /* DATA IN -> MESSAGE OUT */ 2283 /* DATA IN -> MESSAGE OUT */
2284 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2284 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2285 acornscsi_sbic_xfcount(host); 2285 acornscsi_sbic_xfcount(host);
2286 acornscsi_dma_stop(host); 2286 acornscsi_dma_stop(host);
2287 acornscsi_sendmessage(host); 2287 acornscsi_sendmessage(host);
@@ -2291,7 +2291,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2291 case 0x4f: /* message in */ 2291 case 0x4f: /* message in */
2292 case 0x8f: /* message in */ 2292 case 0x8f: /* message in */
2293 /* DATA IN -> MESSAGE IN */ 2293 /* DATA IN -> MESSAGE IN */
2294 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2294 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2295 acornscsi_sbic_xfcount(host); 2295 acornscsi_sbic_xfcount(host);
2296 acornscsi_dma_stop(host); 2296 acornscsi_dma_stop(host);
2297 acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */ 2297 acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */
@@ -2319,7 +2319,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2319 case 0x4b: /* -> PHASE_STATUSIN */ 2319 case 0x4b: /* -> PHASE_STATUSIN */
2320 case 0x8b: /* -> PHASE_STATUSIN */ 2320 case 0x8b: /* -> PHASE_STATUSIN */
2321 /* DATA OUT -> STATUS */ 2321 /* DATA OUT -> STATUS */
2322 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2322 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2323 acornscsi_sbic_xfcount(host); 2323 acornscsi_sbic_xfcount(host);
2324 acornscsi_dma_stop(host); 2324 acornscsi_dma_stop(host);
2325 acornscsi_dma_adjust(host); 2325 acornscsi_dma_adjust(host);
@@ -2331,7 +2331,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2331 case 0x4e: /* -> PHASE_MSGOUT */ 2331 case 0x4e: /* -> PHASE_MSGOUT */
2332 case 0x8e: /* -> PHASE_MSGOUT */ 2332 case 0x8e: /* -> PHASE_MSGOUT */
2333 /* DATA OUT -> MESSAGE OUT */ 2333 /* DATA OUT -> MESSAGE OUT */
2334 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2334 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2335 acornscsi_sbic_xfcount(host); 2335 acornscsi_sbic_xfcount(host);
2336 acornscsi_dma_stop(host); 2336 acornscsi_dma_stop(host);
2337 acornscsi_dma_adjust(host); 2337 acornscsi_dma_adjust(host);
@@ -2342,7 +2342,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
2342 case 0x4f: /* message in */ 2342 case 0x4f: /* message in */
2343 case 0x8f: /* message in */ 2343 case 0x8f: /* message in */
2344 /* DATA OUT -> MESSAGE IN */ 2344 /* DATA OUT -> MESSAGE IN */
2345 host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen - 2345 host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
2346 acornscsi_sbic_xfcount(host); 2346 acornscsi_sbic_xfcount(host);
2347 acornscsi_dma_stop(host); 2347 acornscsi_dma_stop(host);
2348 acornscsi_dma_adjust(host); 2348 acornscsi_dma_adjust(host);
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index bb6550e31926..138a521ba1a8 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -18,17 +18,32 @@
18 * The scatter-gather list handling. This contains all 18 * The scatter-gather list handling. This contains all
19 * the yucky stuff that needs to be fixed properly. 19 * the yucky stuff that needs to be fixed properly.
20 */ 20 */
21
22/*
23 * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
24 * entries of uninitialized memory. SCp is from scsi-ml and has a valid
25 * (possibly chained) sg-list
26 */
21static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max) 27static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max)
22{ 28{
23 int bufs = SCp->buffers_residual; 29 int bufs = SCp->buffers_residual;
24 30
31 /* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg().
32 * and to remove this BUG_ON. Use min() in-its-place
33 */
25 BUG_ON(bufs + 1 > max); 34 BUG_ON(bufs + 1 > max);
26 35
27 sg_set_buf(sg, SCp->ptr, SCp->this_residual); 36 sg_set_buf(sg, SCp->ptr, SCp->this_residual);
28 37
29 if (bufs) 38 if (bufs) {
30 memcpy(sg + 1, SCp->buffer + 1, 39 struct scatterlist *src_sg;
31 sizeof(struct scatterlist) * bufs); 40 unsigned i;
41
42 for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i)
43 *(++sg) = *src_sg;
44 sg_mark_end(sg);
45 }
46
32 return bufs + 1; 47 return bufs + 1;
33} 48}
34 49
@@ -36,7 +51,7 @@ static inline int next_SCp(struct scsi_pointer *SCp)
36{ 51{
37 int ret = SCp->buffers_residual; 52 int ret = SCp->buffers_residual;
38 if (ret) { 53 if (ret) {
39 SCp->buffer++; 54 SCp->buffer = sg_next(SCp->buffer);
40 SCp->buffers_residual--; 55 SCp->buffers_residual--;
41 SCp->ptr = sg_virt(SCp->buffer); 56 SCp->ptr = sg_virt(SCp->buffer);
42 SCp->this_residual = SCp->buffer->length; 57 SCp->this_residual = SCp->buffer->length;
@@ -68,46 +83,46 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
68{ 83{
69 memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer)); 84 memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
70 85
71 if (SCpnt->use_sg) { 86 if (scsi_bufflen(SCpnt)) {
72 unsigned long len = 0; 87 unsigned long len = 0;
73 int buf;
74 88
75 SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer; 89 SCpnt->SCp.buffer = scsi_sglist(SCpnt);
76 SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1; 90 SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
77 SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer); 91 SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
78 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length; 92 SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
79 SCpnt->SCp.phase = SCpnt->request_bufflen; 93 SCpnt->SCp.phase = scsi_bufflen(SCpnt);
80 94
81#ifdef BELT_AND_BRACES 95#ifdef BELT_AND_BRACES
82 /* 96 { /*
83 * Calculate correct buffer length. Some commands 97 * Calculate correct buffer length. Some commands
84 * come in with the wrong request_bufflen. 98 * come in with the wrong scsi_bufflen.
85 */ 99 */
86 for (buf = 0; buf <= SCpnt->SCp.buffers_residual; buf++) 100 struct scatterlist *sg;
87 len += SCpnt->SCp.buffer[buf].length; 101 unsigned i, sg_count = scsi_sg_count(SCpnt);
88 102
89 if (SCpnt->request_bufflen != len) 103 scsi_for_each_sg(SCpnt, sg, sg_count, i)
90 printk(KERN_WARNING "scsi%d.%c: bad request buffer " 104 len += sg->length;
91 "length %d, should be %ld\n", SCpnt->device->host->host_no, 105
92 '0' + SCpnt->device->id, SCpnt->request_bufflen, len); 106 if (scsi_bufflen(SCpnt) != len) {
93 SCpnt->request_bufflen = len; 107 printk(KERN_WARNING
108 "scsi%d.%c: bad request buffer "
109 "length %d, should be %ld\n",
110 SCpnt->device->host->host_no,
111 '0' + SCpnt->device->id,
112 scsi_bufflen(SCpnt), len);
113 /*
114 * FIXME: Totaly naive fixup. We should abort
115 * with error
116 */
117 SCpnt->SCp.phase =
118 min_t(unsigned long, len,
119 scsi_bufflen(SCpnt));
120 }
121 }
94#endif 122#endif
95 } else { 123 } else {
96 SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer;
97 SCpnt->SCp.this_residual = SCpnt->request_bufflen;
98 SCpnt->SCp.phase = SCpnt->request_bufflen;
99 }
100
101 /*
102 * If the upper SCSI layers pass a buffer, but zero length,
103 * we aren't interested in the buffer pointer.
104 */
105 if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.ptr) {
106#if 0 //def BELT_AND_BRACES
107 printk(KERN_WARNING "scsi%d.%c: zero length buffer passed for "
108 "command ", SCpnt->host->host_no, '0' + SCpnt->target);
109 __scsi_print_command(SCpnt->cmnd);
110#endif
111 SCpnt->SCp.ptr = NULL; 124 SCpnt->SCp.ptr = NULL;
125 SCpnt->SCp.this_residual = 0;
126 SCpnt->SCp.phase = 0;
112 } 127 }
113} 128}
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c
deleted file mode 100644
index 23f7c24ab809..000000000000
--- a/drivers/scsi/blz1230.c
+++ /dev/null
@@ -1,353 +0,0 @@
1/* blz1230.c: Driver for Blizzard 1230 SCSI IV Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on the CyberStorm driver, hence the occasional
6 * reference to CyberStorm.
7 */
8
9/* TODO:
10 *
11 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
12 * to the caches and the Sparc MMU mapping.
13 * 2) Make as few routines required outside the generic driver. A lot of the
14 * routines in this file used to be inline!
15 */
16
17#include <linux/module.h>
18
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/delay.h>
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/blkdev.h>
26#include <linux/proc_fs.h>
27#include <linux/stat.h>
28#include <linux/interrupt.h>
29
30#include "scsi.h"
31#include <scsi/scsi_host.h>
32#include "NCR53C9x.h"
33
34#include <linux/zorro.h>
35#include <asm/irq.h>
36#include <asm/amigaints.h>
37#include <asm/amigahw.h>
38
39#include <asm/pgtable.h>
40
41#define MKIV 1
42
43/* The controller registers can be found in the Z2 config area at these
44 * offsets:
45 */
46#define BLZ1230_ESP_ADDR 0x8000
47#define BLZ1230_DMA_ADDR 0x10000
48#define BLZ1230II_ESP_ADDR 0x10000
49#define BLZ1230II_DMA_ADDR 0x10021
50
51
52/* The Blizzard 1230 DMA interface
53 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
54 * Only two things can be programmed in the Blizzard DMA:
55 * 1) The data direction is controlled by the status of bit 31 (1 = write)
56 * 2) The source/dest address (word aligned, shifted one right) in bits 30-0
57 *
58 * Program DMA by first latching the highest byte of the address/direction
59 * (i.e. bits 31-24 of the long word constructed as described in steps 1+2
60 * above). Then write each byte of the address/direction (starting with the
61 * top byte, working down) to the DMA address register.
62 *
63 * Figure out interrupt status by reading the ESP status byte.
64 */
65struct blz1230_dma_registers {
66 volatile unsigned char dma_addr; /* DMA address [0x0000] */
67 unsigned char dmapad2[0x7fff];
68 volatile unsigned char dma_latch; /* DMA latch [0x8000] */
69};
70
71struct blz1230II_dma_registers {
72 volatile unsigned char dma_addr; /* DMA address [0x0000] */
73 unsigned char dmapad2[0xf];
74 volatile unsigned char dma_latch; /* DMA latch [0x0010] */
75};
76
77#define BLZ1230_DMA_WRITE 0x80000000
78
79static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
80static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
81static void dma_dump_state(struct NCR_ESP *esp);
82static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
83static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
84static void dma_ints_off(struct NCR_ESP *esp);
85static void dma_ints_on(struct NCR_ESP *esp);
86static int dma_irq_p(struct NCR_ESP *esp);
87static int dma_ports_p(struct NCR_ESP *esp);
88static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
89
90static volatile unsigned char cmd_buffer[16];
91 /* This is where all commands are put
92 * before they are transferred to the ESP chip
93 * via PIO.
94 */
95
96/***************************************************************** Detection */
97int __init blz1230_esp_detect(struct scsi_host_template *tpnt)
98{
99 struct NCR_ESP *esp;
100 struct zorro_dev *z = NULL;
101 unsigned long address;
102 struct ESP_regs *eregs;
103 unsigned long board;
104
105#if MKIV
106#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260
107#define REAL_BLZ1230_ESP_ADDR BLZ1230_ESP_ADDR
108#define REAL_BLZ1230_DMA_ADDR BLZ1230_DMA_ADDR
109#else
110#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060
111#define REAL_BLZ1230_ESP_ADDR BLZ1230II_ESP_ADDR
112#define REAL_BLZ1230_DMA_ADDR BLZ1230II_DMA_ADDR
113#endif
114
115 if ((z = zorro_find_device(REAL_BLZ1230_ID, z))) {
116 board = z->resource.start;
117 if (request_mem_region(board+REAL_BLZ1230_ESP_ADDR,
118 sizeof(struct ESP_regs), "NCR53C9x")) {
119 /* Do some magic to figure out if the blizzard is
120 * equipped with a SCSI controller
121 */
122 address = ZTWO_VADDR(board);
123 eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR);
124 esp = esp_allocate(tpnt, (void *)board + REAL_BLZ1230_ESP_ADDR,
125 0);
126
127 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
128 udelay(5);
129 if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
130 goto err_out;
131
132 /* Do command transfer with programmed I/O */
133 esp->do_pio_cmds = 1;
134
135 /* Required functions */
136 esp->dma_bytes_sent = &dma_bytes_sent;
137 esp->dma_can_transfer = &dma_can_transfer;
138 esp->dma_dump_state = &dma_dump_state;
139 esp->dma_init_read = &dma_init_read;
140 esp->dma_init_write = &dma_init_write;
141 esp->dma_ints_off = &dma_ints_off;
142 esp->dma_ints_on = &dma_ints_on;
143 esp->dma_irq_p = &dma_irq_p;
144 esp->dma_ports_p = &dma_ports_p;
145 esp->dma_setup = &dma_setup;
146
147 /* Optional functions */
148 esp->dma_barrier = 0;
149 esp->dma_drain = 0;
150 esp->dma_invalidate = 0;
151 esp->dma_irq_entry = 0;
152 esp->dma_irq_exit = 0;
153 esp->dma_led_on = 0;
154 esp->dma_led_off = 0;
155 esp->dma_poll = 0;
156 esp->dma_reset = 0;
157
158 /* SCSI chip speed */
159 esp->cfreq = 40000000;
160
161 /* The DMA registers on the Blizzard are mapped
162 * relative to the device (i.e. in the same Zorro
163 * I/O block).
164 */
165 esp->dregs = (void *)(address + REAL_BLZ1230_DMA_ADDR);
166
167 /* ESP register base */
168 esp->eregs = eregs;
169
170 /* Set the command buffer */
171 esp->esp_command = cmd_buffer;
172 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
173
174 esp->irq = IRQ_AMIGA_PORTS;
175 esp->slot = board+REAL_BLZ1230_ESP_ADDR;
176 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
177 "Blizzard 1230 SCSI IV", esp->ehost))
178 goto err_out;
179
180 /* Figure out our scsi ID on the bus */
181 esp->scsi_id = 7;
182
183 /* We don't have a differential SCSI-bus. */
184 esp->diff = 0;
185
186 esp_initialize(esp);
187
188 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
189 esps_running = esps_in_use;
190 return esps_in_use;
191 }
192 }
193 return 0;
194
195 err_out:
196 scsi_unregister(esp->ehost);
197 esp_deallocate(esp);
198 release_mem_region(board+REAL_BLZ1230_ESP_ADDR,
199 sizeof(struct ESP_regs));
200 return 0;
201}
202
203/************************************************************* DMA Functions */
204static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
205{
206 /* Since the Blizzard DMA is fully dedicated to the ESP chip,
207 * the number of bytes sent (to the ESP chip) equals the number
208 * of bytes in the FIFO - there is no buffering in the DMA controller.
209 * XXXX Do I read this right? It is from host to ESP, right?
210 */
211 return fifo_count;
212}
213
214static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
215{
216 /* I don't think there's any limit on the Blizzard DMA. So we use what
217 * the ESP chip can handle (24 bit).
218 */
219 unsigned long sz = sp->SCp.this_residual;
220 if(sz > 0x1000000)
221 sz = 0x1000000;
222 return sz;
223}
224
225static void dma_dump_state(struct NCR_ESP *esp)
226{
227 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
228 amiga_custom.intreqr, amiga_custom.intenar));
229}
230
231void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
232{
233#if MKIV
234 struct blz1230_dma_registers *dregs =
235 (struct blz1230_dma_registers *) (esp->dregs);
236#else
237 struct blz1230II_dma_registers *dregs =
238 (struct blz1230II_dma_registers *) (esp->dregs);
239#endif
240
241 cache_clear(addr, length);
242
243 addr >>= 1;
244 addr &= ~(BLZ1230_DMA_WRITE);
245
246 /* First set latch */
247 dregs->dma_latch = (addr >> 24) & 0xff;
248
249 /* Then pump the address to the DMA address register */
250#if MKIV
251 dregs->dma_addr = (addr >> 24) & 0xff;
252#endif
253 dregs->dma_addr = (addr >> 16) & 0xff;
254 dregs->dma_addr = (addr >> 8) & 0xff;
255 dregs->dma_addr = (addr ) & 0xff;
256}
257
258void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
259{
260#if MKIV
261 struct blz1230_dma_registers *dregs =
262 (struct blz1230_dma_registers *) (esp->dregs);
263#else
264 struct blz1230II_dma_registers *dregs =
265 (struct blz1230II_dma_registers *) (esp->dregs);
266#endif
267
268 cache_push(addr, length);
269
270 addr >>= 1;
271 addr |= BLZ1230_DMA_WRITE;
272
273 /* First set latch */
274 dregs->dma_latch = (addr >> 24) & 0xff;
275
276 /* Then pump the address to the DMA address register */
277#if MKIV
278 dregs->dma_addr = (addr >> 24) & 0xff;
279#endif
280 dregs->dma_addr = (addr >> 16) & 0xff;
281 dregs->dma_addr = (addr >> 8) & 0xff;
282 dregs->dma_addr = (addr ) & 0xff;
283}
284
285static void dma_ints_off(struct NCR_ESP *esp)
286{
287 disable_irq(esp->irq);
288}
289
290static void dma_ints_on(struct NCR_ESP *esp)
291{
292 enable_irq(esp->irq);
293}
294
295static int dma_irq_p(struct NCR_ESP *esp)
296{
297 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
298}
299
300static int dma_ports_p(struct NCR_ESP *esp)
301{
302 return ((amiga_custom.intenar) & IF_PORTS);
303}
304
305static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
306{
307 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
308 * so when (write) is true, it actually means READ!
309 */
310 if(write){
311 dma_init_read(esp, addr, count);
312 } else {
313 dma_init_write(esp, addr, count);
314 }
315}
316
317#define HOSTS_C
318
319int blz1230_esp_release(struct Scsi_Host *instance)
320{
321#ifdef MODULE
322 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
323 esp_deallocate((struct NCR_ESP *)instance->hostdata);
324 esp_release();
325 release_mem_region(address, sizeof(struct ESP_regs));
326 free_irq(IRQ_AMIGA_PORTS, esp_intr);
327#endif
328 return 1;
329}
330
331
332static struct scsi_host_template driver_template = {
333 .proc_name = "esp-blz1230",
334 .proc_info = esp_proc_info,
335 .name = "Blizzard1230 SCSI IV",
336 .detect = blz1230_esp_detect,
337 .slave_alloc = esp_slave_alloc,
338 .slave_destroy = esp_slave_destroy,
339 .release = blz1230_esp_release,
340 .queuecommand = esp_queue,
341 .eh_abort_handler = esp_abort,
342 .eh_bus_reset_handler = esp_reset,
343 .can_queue = 7,
344 .this_id = 7,
345 .sg_tablesize = SG_ALL,
346 .cmd_per_lun = 1,
347 .use_clustering = ENABLE_CLUSTERING
348};
349
350
351#include "scsi_module.c"
352
353MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c
deleted file mode 100644
index b6203ec00961..000000000000
--- a/drivers/scsi/blz2060.c
+++ /dev/null
@@ -1,306 +0,0 @@
1/* blz2060.c: Driver for Blizzard 2060 SCSI Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on the CyberStorm driver, hence the occasional
6 * reference to CyberStorm.
7 */
8
9/* TODO:
10 *
11 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
12 * to the caches and the Sparc MMU mapping.
13 * 2) Make as few routines required outside the generic driver. A lot of the
14 * routines in this file used to be inline!
15 */
16
17#include <linux/module.h>
18
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/delay.h>
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/blkdev.h>
26#include <linux/proc_fs.h>
27#include <linux/stat.h>
28#include <linux/interrupt.h>
29
30#include "scsi.h"
31#include <scsi/scsi_host.h>
32#include "NCR53C9x.h"
33
34#include <linux/zorro.h>
35#include <asm/irq.h>
36#include <asm/amigaints.h>
37#include <asm/amigahw.h>
38
39#include <asm/pgtable.h>
40
41/* The controller registers can be found in the Z2 config area at these
42 * offsets:
43 */
44#define BLZ2060_ESP_ADDR 0x1ff00
45#define BLZ2060_DMA_ADDR 0x1ffe0
46
47
48/* The Blizzard 2060 DMA interface
49 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
50 * Only two things can be programmed in the Blizzard DMA:
51 * 1) The data direction is controlled by the status of bit 31 (1 = write)
52 * 2) The source/dest address (word aligned, shifted one right) in bits 30-0
53 *
54 * Figure out interrupt status by reading the ESP status byte.
55 */
56struct blz2060_dma_registers {
57 volatile unsigned char dma_led_ctrl; /* DMA led control [0x000] */
58 unsigned char dmapad1[0x0f];
59 volatile unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
60 unsigned char dmapad2[0x03];
61 volatile unsigned char dma_addr1; /* DMA address [0x014] */
62 unsigned char dmapad3[0x03];
63 volatile unsigned char dma_addr2; /* DMA address [0x018] */
64 unsigned char dmapad4[0x03];
65 volatile unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
66};
67
68#define BLZ2060_DMA_WRITE 0x80000000
69
70/* DMA control bits */
71#define BLZ2060_DMA_LED 0x02 /* HD led control 1 = off */
72
73static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
74static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
75static void dma_dump_state(struct NCR_ESP *esp);
76static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
77static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
78static void dma_ints_off(struct NCR_ESP *esp);
79static void dma_ints_on(struct NCR_ESP *esp);
80static int dma_irq_p(struct NCR_ESP *esp);
81static void dma_led_off(struct NCR_ESP *esp);
82static void dma_led_on(struct NCR_ESP *esp);
83static int dma_ports_p(struct NCR_ESP *esp);
84static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
85
86static volatile unsigned char cmd_buffer[16];
87 /* This is where all commands are put
88 * before they are transferred to the ESP chip
89 * via PIO.
90 */
91
92/***************************************************************** Detection */
93int __init blz2060_esp_detect(struct scsi_host_template *tpnt)
94{
95 struct NCR_ESP *esp;
96 struct zorro_dev *z = NULL;
97 unsigned long address;
98
99 if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_2060, z))) {
100 unsigned long board = z->resource.start;
101 if (request_mem_region(board+BLZ2060_ESP_ADDR,
102 sizeof(struct ESP_regs), "NCR53C9x")) {
103 esp = esp_allocate(tpnt, (void *)board + BLZ2060_ESP_ADDR, 0);
104
105 /* Do command transfer with programmed I/O */
106 esp->do_pio_cmds = 1;
107
108 /* Required functions */
109 esp->dma_bytes_sent = &dma_bytes_sent;
110 esp->dma_can_transfer = &dma_can_transfer;
111 esp->dma_dump_state = &dma_dump_state;
112 esp->dma_init_read = &dma_init_read;
113 esp->dma_init_write = &dma_init_write;
114 esp->dma_ints_off = &dma_ints_off;
115 esp->dma_ints_on = &dma_ints_on;
116 esp->dma_irq_p = &dma_irq_p;
117 esp->dma_ports_p = &dma_ports_p;
118 esp->dma_setup = &dma_setup;
119
120 /* Optional functions */
121 esp->dma_barrier = 0;
122 esp->dma_drain = 0;
123 esp->dma_invalidate = 0;
124 esp->dma_irq_entry = 0;
125 esp->dma_irq_exit = 0;
126 esp->dma_led_on = &dma_led_on;
127 esp->dma_led_off = &dma_led_off;
128 esp->dma_poll = 0;
129 esp->dma_reset = 0;
130
131 /* SCSI chip speed */
132 esp->cfreq = 40000000;
133
134 /* The DMA registers on the Blizzard are mapped
135 * relative to the device (i.e. in the same Zorro
136 * I/O block).
137 */
138 address = (unsigned long)ZTWO_VADDR(board);
139 esp->dregs = (void *)(address + BLZ2060_DMA_ADDR);
140
141 /* ESP register base */
142 esp->eregs = (struct ESP_regs *)(address + BLZ2060_ESP_ADDR);
143
144 /* Set the command buffer */
145 esp->esp_command = cmd_buffer;
146 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
147
148 esp->irq = IRQ_AMIGA_PORTS;
149 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
150 "Blizzard 2060 SCSI", esp->ehost);
151
152 /* Figure out our scsi ID on the bus */
153 esp->scsi_id = 7;
154
155 /* We don't have a differential SCSI-bus. */
156 esp->diff = 0;
157
158 esp_initialize(esp);
159
160 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
161 esps_running = esps_in_use;
162 return esps_in_use;
163 }
164 }
165 return 0;
166}
167
168/************************************************************* DMA Functions */
169static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
170{
171 /* Since the Blizzard DMA is fully dedicated to the ESP chip,
172 * the number of bytes sent (to the ESP chip) equals the number
173 * of bytes in the FIFO - there is no buffering in the DMA controller.
174 * XXXX Do I read this right? It is from host to ESP, right?
175 */
176 return fifo_count;
177}
178
179static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
180{
181 /* I don't think there's any limit on the Blizzard DMA. So we use what
182 * the ESP chip can handle (24 bit).
183 */
184 unsigned long sz = sp->SCp.this_residual;
185 if(sz > 0x1000000)
186 sz = 0x1000000;
187 return sz;
188}
189
190static void dma_dump_state(struct NCR_ESP *esp)
191{
192 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
193 amiga_custom.intreqr, amiga_custom.intenar));
194}
195
196static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
197{
198 struct blz2060_dma_registers *dregs =
199 (struct blz2060_dma_registers *) (esp->dregs);
200
201 cache_clear(addr, length);
202
203 addr >>= 1;
204 addr &= ~(BLZ2060_DMA_WRITE);
205 dregs->dma_addr3 = (addr ) & 0xff;
206 dregs->dma_addr2 = (addr >> 8) & 0xff;
207 dregs->dma_addr1 = (addr >> 16) & 0xff;
208 dregs->dma_addr0 = (addr >> 24) & 0xff;
209}
210
211static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
212{
213 struct blz2060_dma_registers *dregs =
214 (struct blz2060_dma_registers *) (esp->dregs);
215
216 cache_push(addr, length);
217
218 addr >>= 1;
219 addr |= BLZ2060_DMA_WRITE;
220 dregs->dma_addr3 = (addr ) & 0xff;
221 dregs->dma_addr2 = (addr >> 8) & 0xff;
222 dregs->dma_addr1 = (addr >> 16) & 0xff;
223 dregs->dma_addr0 = (addr >> 24) & 0xff;
224}
225
226static void dma_ints_off(struct NCR_ESP *esp)
227{
228 disable_irq(esp->irq);
229}
230
231static void dma_ints_on(struct NCR_ESP *esp)
232{
233 enable_irq(esp->irq);
234}
235
236static int dma_irq_p(struct NCR_ESP *esp)
237{
238 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
239}
240
241static void dma_led_off(struct NCR_ESP *esp)
242{
243 ((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl =
244 BLZ2060_DMA_LED;
245}
246
247static void dma_led_on(struct NCR_ESP *esp)
248{
249 ((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl = 0;
250}
251
252static int dma_ports_p(struct NCR_ESP *esp)
253{
254 return ((amiga_custom.intenar) & IF_PORTS);
255}
256
257static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
258{
259 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
260 * so when (write) is true, it actually means READ!
261 */
262 if(write){
263 dma_init_read(esp, addr, count);
264 } else {
265 dma_init_write(esp, addr, count);
266 }
267}
268
269#define HOSTS_C
270
271int blz2060_esp_release(struct Scsi_Host *instance)
272{
273#ifdef MODULE
274 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
275
276 esp_deallocate((struct NCR_ESP *)instance->hostdata);
277 esp_release();
278 release_mem_region(address, sizeof(struct ESP_regs));
279 free_irq(IRQ_AMIGA_PORTS, esp_intr);
280#endif
281 return 1;
282}
283
284
285static struct scsi_host_template driver_template = {
286 .proc_name = "esp-blz2060",
287 .proc_info = esp_proc_info,
288 .name = "Blizzard2060 SCSI",
289 .detect = blz2060_esp_detect,
290 .slave_alloc = esp_slave_alloc,
291 .slave_destroy = esp_slave_destroy,
292 .release = blz2060_esp_release,
293 .queuecommand = esp_queue,
294 .eh_abort_handler = esp_abort,
295 .eh_bus_reset_handler = esp_reset,
296 .can_queue = 7,
297 .this_id = 7,
298 .sg_tablesize = SG_ALL,
299 .cmd_per_lun = 1,
300 .use_clustering = ENABLE_CLUSTERING
301};
302
303
304#include "scsi_module.c"
305
306MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c
deleted file mode 100644
index c6b98a42e89d..000000000000
--- a/drivers/scsi/cyberstorm.c
+++ /dev/null
@@ -1,377 +0,0 @@
1/* cyberstorm.c: Driver for CyberStorm SCSI Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * The CyberStorm SCSI driver is based on David S. Miller's ESP driver
6 * for the Sparc computers.
7 *
8 * This work was made possible by Phase5 who willingly (and most generously)
9 * supported me with hardware and all the information I needed.
10 */
11
12/* TODO:
13 *
14 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
15 * to the caches and the Sparc MMU mapping.
16 * 2) Make as few routines required outside the generic driver. A lot of the
17 * routines in this file used to be inline!
18 */
19
20#include <linux/module.h>
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/types.h>
26#include <linux/string.h>
27#include <linux/slab.h>
28#include <linux/blkdev.h>
29#include <linux/proc_fs.h>
30#include <linux/stat.h>
31#include <linux/interrupt.h>
32
33#include "scsi.h"
34#include <scsi/scsi_host.h>
35#include "NCR53C9x.h"
36
37#include <linux/zorro.h>
38#include <asm/irq.h>
39#include <asm/amigaints.h>
40#include <asm/amigahw.h>
41
42#include <asm/pgtable.h>
43
44/* The controller registers can be found in the Z2 config area at these
45 * offsets:
46 */
47#define CYBER_ESP_ADDR 0xf400
48#define CYBER_DMA_ADDR 0xf800
49
50
51/* The CyberStorm DMA interface */
52struct cyber_dma_registers {
53 volatile unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
54 unsigned char dmapad1[1];
55 volatile unsigned char dma_addr1; /* DMA address [0x002] */
56 unsigned char dmapad2[1];
57 volatile unsigned char dma_addr2; /* DMA address [0x004] */
58 unsigned char dmapad3[1];
59 volatile unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
60 unsigned char dmapad4[0x3fb];
61 volatile unsigned char cond_reg; /* DMA cond (ro) [0x402] */
62#define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
63};
64
65/* DMA control bits */
66#define CYBER_DMA_LED 0x80 /* HD led control 1 = on */
67#define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
68#define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
69
70/* DMA status bits */
71#define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
72
73/* The bits below appears to be Phase5 Debug bits only; they were not
74 * described by Phase5 so using them may seem a bit stupid...
75 */
76#define CYBER_HOST_ID 0x02 /* If set, host ID should be 7, otherwise
77 * it should be 6.
78 */
79#define CYBER_SLOW_CABLE 0x08 /* If *not* set, assume SLOW_CABLE */
80
81static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
82static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
83static void dma_dump_state(struct NCR_ESP *esp);
84static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
85static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
86static void dma_ints_off(struct NCR_ESP *esp);
87static void dma_ints_on(struct NCR_ESP *esp);
88static int dma_irq_p(struct NCR_ESP *esp);
89static void dma_led_off(struct NCR_ESP *esp);
90static void dma_led_on(struct NCR_ESP *esp);
91static int dma_ports_p(struct NCR_ESP *esp);
92static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
93
94static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
95 * to ctrl_reg. Always write a copy
96 * to this register when writing to
97 * the hardware register!
98 */
99
100static volatile unsigned char cmd_buffer[16];
101 /* This is where all commands are put
102 * before they are transferred to the ESP chip
103 * via PIO.
104 */
105
106/***************************************************************** Detection */
107int __init cyber_esp_detect(struct scsi_host_template *tpnt)
108{
109 struct NCR_ESP *esp;
110 struct zorro_dev *z = NULL;
111 unsigned long address;
112
113 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
114 unsigned long board = z->resource.start;
115 if ((z->id == ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM ||
116 z->id == ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060) &&
117 request_mem_region(board+CYBER_ESP_ADDR,
118 sizeof(struct ESP_regs), "NCR53C9x")) {
119 /* Figure out if this is a CyberStorm or really a
120 * Fastlane/Blizzard Mk II by looking at the board size.
121 * CyberStorm maps 64kB
122 * (ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM does anyway)
123 */
124 if(z->resource.end-board != 0xffff) {
125 release_mem_region(board+CYBER_ESP_ADDR,
126 sizeof(struct ESP_regs));
127 return 0;
128 }
129 esp = esp_allocate(tpnt, (void *)board + CYBER_ESP_ADDR, 0);
130
131 /* Do command transfer with programmed I/O */
132 esp->do_pio_cmds = 1;
133
134 /* Required functions */
135 esp->dma_bytes_sent = &dma_bytes_sent;
136 esp->dma_can_transfer = &dma_can_transfer;
137 esp->dma_dump_state = &dma_dump_state;
138 esp->dma_init_read = &dma_init_read;
139 esp->dma_init_write = &dma_init_write;
140 esp->dma_ints_off = &dma_ints_off;
141 esp->dma_ints_on = &dma_ints_on;
142 esp->dma_irq_p = &dma_irq_p;
143 esp->dma_ports_p = &dma_ports_p;
144 esp->dma_setup = &dma_setup;
145
146 /* Optional functions */
147 esp->dma_barrier = 0;
148 esp->dma_drain = 0;
149 esp->dma_invalidate = 0;
150 esp->dma_irq_entry = 0;
151 esp->dma_irq_exit = 0;
152 esp->dma_led_on = &dma_led_on;
153 esp->dma_led_off = &dma_led_off;
154 esp->dma_poll = 0;
155 esp->dma_reset = 0;
156
157 /* SCSI chip speed */
158 esp->cfreq = 40000000;
159
160 /* The DMA registers on the CyberStorm are mapped
161 * relative to the device (i.e. in the same Zorro
162 * I/O block).
163 */
164 address = (unsigned long)ZTWO_VADDR(board);
165 esp->dregs = (void *)(address + CYBER_DMA_ADDR);
166
167 /* ESP register base */
168 esp->eregs = (struct ESP_regs *)(address + CYBER_ESP_ADDR);
169
170 /* Set the command buffer */
171 esp->esp_command = cmd_buffer;
172 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
173
174 esp->irq = IRQ_AMIGA_PORTS;
175 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
176 "CyberStorm SCSI", esp->ehost);
177 /* Figure out our scsi ID on the bus */
178 /* The DMA cond flag contains a hardcoded jumper bit
179 * which can be used to select host number 6 or 7.
180 * However, even though it may change, we use a hardcoded
181 * value of 7.
182 */
183 esp->scsi_id = 7;
184
185 /* We don't have a differential SCSI-bus. */
186 esp->diff = 0;
187
188 esp_initialize(esp);
189
190 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
191 esps_running = esps_in_use;
192 return esps_in_use;
193 }
194 }
195 return 0;
196}
197
198/************************************************************* DMA Functions */
199static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
200{
201 /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
202 * the number of bytes sent (to the ESP chip) equals the number
203 * of bytes in the FIFO - there is no buffering in the DMA controller.
204 * XXXX Do I read this right? It is from host to ESP, right?
205 */
206 return fifo_count;
207}
208
209static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
210{
211 /* I don't think there's any limit on the CyberDMA. So we use what
212 * the ESP chip can handle (24 bit).
213 */
214 unsigned long sz = sp->SCp.this_residual;
215 if(sz > 0x1000000)
216 sz = 0x1000000;
217 return sz;
218}
219
220static void dma_dump_state(struct NCR_ESP *esp)
221{
222 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
223 esp->esp_id, ((struct cyber_dma_registers *)
224 (esp->dregs))->cond_reg));
225 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
226 amiga_custom.intreqr, amiga_custom.intenar));
227}
228
229static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
230{
231 struct cyber_dma_registers *dregs =
232 (struct cyber_dma_registers *) esp->dregs;
233
234 cache_clear(addr, length);
235
236 addr &= ~(1);
237 dregs->dma_addr0 = (addr >> 24) & 0xff;
238 dregs->dma_addr1 = (addr >> 16) & 0xff;
239 dregs->dma_addr2 = (addr >> 8) & 0xff;
240 dregs->dma_addr3 = (addr ) & 0xff;
241 ctrl_data &= ~(CYBER_DMA_WRITE);
242
243 /* Check if physical address is outside Z2 space and of
244 * block length/block aligned in memory. If this is the
245 * case, enable 32 bit transfer. In all other cases, fall back
246 * to 16 bit transfer.
247 * Obviously 32 bit transfer should be enabled if the DMA address
248 * and length are 32 bit aligned. However, this leads to some
249 * strange behavior. Even 64 bit aligned addr/length fails.
250 * Until I've found a reason for this, 32 bit transfer is only
251 * used for full-block transfers (1kB).
252 * -jskov
253 */
254#if 0
255 if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
256 (addr < 0xff0000)))
257 ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
258 else
259 ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
260#else
261 ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
262#endif
263 dregs->ctrl_reg = ctrl_data;
264}
265
266static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
267{
268 struct cyber_dma_registers *dregs =
269 (struct cyber_dma_registers *) esp->dregs;
270
271 cache_push(addr, length);
272
273 addr |= 1;
274 dregs->dma_addr0 = (addr >> 24) & 0xff;
275 dregs->dma_addr1 = (addr >> 16) & 0xff;
276 dregs->dma_addr2 = (addr >> 8) & 0xff;
277 dregs->dma_addr3 = (addr ) & 0xff;
278 ctrl_data |= CYBER_DMA_WRITE;
279
280 /* See comment above */
281#if 0
282 if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
283 (addr < 0xff0000)))
284 ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
285 else
286 ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
287#else
288 ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
289#endif
290 dregs->ctrl_reg = ctrl_data;
291}
292
293static void dma_ints_off(struct NCR_ESP *esp)
294{
295 disable_irq(esp->irq);
296}
297
298static void dma_ints_on(struct NCR_ESP *esp)
299{
300 enable_irq(esp->irq);
301}
302
303static int dma_irq_p(struct NCR_ESP *esp)
304{
305 /* It's important to check the DMA IRQ bit in the correct way! */
306 return ((esp_read(esp->eregs->esp_status) & ESP_STAT_INTR) &&
307 ((((struct cyber_dma_registers *)(esp->dregs))->cond_reg) &
308 CYBER_DMA_HNDL_INTR));
309}
310
311static void dma_led_off(struct NCR_ESP *esp)
312{
313 ctrl_data &= ~CYBER_DMA_LED;
314 ((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
315}
316
317static void dma_led_on(struct NCR_ESP *esp)
318{
319 ctrl_data |= CYBER_DMA_LED;
320 ((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
321}
322
323static int dma_ports_p(struct NCR_ESP *esp)
324{
325 return ((amiga_custom.intenar) & IF_PORTS);
326}
327
328static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
329{
330 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
331 * so when (write) is true, it actually means READ!
332 */
333 if(write){
334 dma_init_read(esp, addr, count);
335 } else {
336 dma_init_write(esp, addr, count);
337 }
338}
339
340#define HOSTS_C
341
342int cyber_esp_release(struct Scsi_Host *instance)
343{
344#ifdef MODULE
345 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
346
347 esp_deallocate((struct NCR_ESP *)instance->hostdata);
348 esp_release();
349 release_mem_region(address, sizeof(struct ESP_regs));
350 free_irq(IRQ_AMIGA_PORTS, esp_intr);
351#endif
352 return 1;
353}
354
355
356static struct scsi_host_template driver_template = {
357 .proc_name = "esp-cyberstorm",
358 .proc_info = esp_proc_info,
359 .name = "CyberStorm SCSI",
360 .detect = cyber_esp_detect,
361 .slave_alloc = esp_slave_alloc,
362 .slave_destroy = esp_slave_destroy,
363 .release = cyber_esp_release,
364 .queuecommand = esp_queue,
365 .eh_abort_handler = esp_abort,
366 .eh_bus_reset_handler = esp_reset,
367 .can_queue = 7,
368 .this_id = 7,
369 .sg_tablesize = SG_ALL,
370 .cmd_per_lun = 1,
371 .use_clustering = ENABLE_CLUSTERING
372};
373
374
375#include "scsi_module.c"
376
377MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c
deleted file mode 100644
index e336e853e66f..000000000000
--- a/drivers/scsi/cyberstormII.c
+++ /dev/null
@@ -1,314 +0,0 @@
1/* cyberstormII.c: Driver for CyberStorm SCSI Mk II
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on cyberstorm.c
6 */
7
8/* TODO:
9 *
10 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
11 * to the caches and the Sparc MMU mapping.
12 * 2) Make as few routines required outside the generic driver. A lot of the
13 * routines in this file used to be inline!
14 */
15
16#include <linux/module.h>
17
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/types.h>
22#include <linux/string.h>
23#include <linux/slab.h>
24#include <linux/blkdev.h>
25#include <linux/proc_fs.h>
26#include <linux/stat.h>
27#include <linux/interrupt.h>
28
29#include "scsi.h"
30#include <scsi/scsi_host.h>
31#include "NCR53C9x.h"
32
33#include <linux/zorro.h>
34#include <asm/irq.h>
35#include <asm/amigaints.h>
36#include <asm/amigahw.h>
37
38#include <asm/pgtable.h>
39
40/* The controller registers can be found in the Z2 config area at these
41 * offsets:
42 */
43#define CYBERII_ESP_ADDR 0x1ff03
44#define CYBERII_DMA_ADDR 0x1ff43
45
46
47/* The CyberStorm II DMA interface */
48struct cyberII_dma_registers {
49 volatile unsigned char cond_reg; /* DMA cond (ro) [0x000] */
50#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
51 unsigned char dmapad4[0x3f];
52 volatile unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
53 unsigned char dmapad1[3];
54 volatile unsigned char dma_addr1; /* DMA address [0x044] */
55 unsigned char dmapad2[3];
56 volatile unsigned char dma_addr2; /* DMA address [0x048] */
57 unsigned char dmapad3[3];
58 volatile unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
59};
60
61/* DMA control bits */
62#define CYBERII_DMA_LED 0x02 /* HD led control 1 = on */
63
64static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
65static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
66static void dma_dump_state(struct NCR_ESP *esp);
67static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
68static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
69static void dma_ints_off(struct NCR_ESP *esp);
70static void dma_ints_on(struct NCR_ESP *esp);
71static int dma_irq_p(struct NCR_ESP *esp);
72static void dma_led_off(struct NCR_ESP *esp);
73static void dma_led_on(struct NCR_ESP *esp);
74static int dma_ports_p(struct NCR_ESP *esp);
75static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
76
77static volatile unsigned char cmd_buffer[16];
78 /* This is where all commands are put
79 * before they are transferred to the ESP chip
80 * via PIO.
81 */
82
83/***************************************************************** Detection */
84int __init cyberII_esp_detect(struct scsi_host_template *tpnt)
85{
86 struct NCR_ESP *esp;
87 struct zorro_dev *z = NULL;
88 unsigned long address;
89 struct ESP_regs *eregs;
90
91 if ((z = zorro_find_device(ZORRO_PROD_PHASE5_CYBERSTORM_MK_II, z))) {
92 unsigned long board = z->resource.start;
93 if (request_mem_region(board+CYBERII_ESP_ADDR,
94 sizeof(struct ESP_regs), "NCR53C9x")) {
95 /* Do some magic to figure out if the CyberStorm Mk II
96 * is equipped with a SCSI controller
97 */
98 address = (unsigned long)ZTWO_VADDR(board);
99 eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR);
100
101 esp = esp_allocate(tpnt, (void *)board + CYBERII_ESP_ADDR, 0);
102
103 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
104 udelay(5);
105 if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)) {
106 esp_deallocate(esp);
107 scsi_unregister(esp->ehost);
108 release_mem_region(board+CYBERII_ESP_ADDR,
109 sizeof(struct ESP_regs));
110 return 0; /* Bail out if address did not hold data */
111 }
112
113 /* Do command transfer with programmed I/O */
114 esp->do_pio_cmds = 1;
115
116 /* Required functions */
117 esp->dma_bytes_sent = &dma_bytes_sent;
118 esp->dma_can_transfer = &dma_can_transfer;
119 esp->dma_dump_state = &dma_dump_state;
120 esp->dma_init_read = &dma_init_read;
121 esp->dma_init_write = &dma_init_write;
122 esp->dma_ints_off = &dma_ints_off;
123 esp->dma_ints_on = &dma_ints_on;
124 esp->dma_irq_p = &dma_irq_p;
125 esp->dma_ports_p = &dma_ports_p;
126 esp->dma_setup = &dma_setup;
127
128 /* Optional functions */
129 esp->dma_barrier = 0;
130 esp->dma_drain = 0;
131 esp->dma_invalidate = 0;
132 esp->dma_irq_entry = 0;
133 esp->dma_irq_exit = 0;
134 esp->dma_led_on = &dma_led_on;
135 esp->dma_led_off = &dma_led_off;
136 esp->dma_poll = 0;
137 esp->dma_reset = 0;
138
139 /* SCSI chip speed */
140 esp->cfreq = 40000000;
141
142 /* The DMA registers on the CyberStorm are mapped
143 * relative to the device (i.e. in the same Zorro
144 * I/O block).
145 */
146 esp->dregs = (void *)(address + CYBERII_DMA_ADDR);
147
148 /* ESP register base */
149 esp->eregs = eregs;
150
151 /* Set the command buffer */
152 esp->esp_command = cmd_buffer;
153 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
154
155 esp->irq = IRQ_AMIGA_PORTS;
156 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
157 "CyberStorm SCSI Mk II", esp->ehost);
158
159 /* Figure out our scsi ID on the bus */
160 esp->scsi_id = 7;
161
162 /* We don't have a differential SCSI-bus. */
163 esp->diff = 0;
164
165 esp_initialize(esp);
166
167 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
168 esps_running = esps_in_use;
169 return esps_in_use;
170 }
171 }
172 return 0;
173}
174
175/************************************************************* DMA Functions */
176static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
177{
178 /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
179 * the number of bytes sent (to the ESP chip) equals the number
180 * of bytes in the FIFO - there is no buffering in the DMA controller.
181 * XXXX Do I read this right? It is from host to ESP, right?
182 */
183 return fifo_count;
184}
185
186static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
187{
188 /* I don't think there's any limit on the CyberDMA. So we use what
189 * the ESP chip can handle (24 bit).
190 */
191 unsigned long sz = sp->SCp.this_residual;
192 if(sz > 0x1000000)
193 sz = 0x1000000;
194 return sz;
195}
196
197static void dma_dump_state(struct NCR_ESP *esp)
198{
199 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
200 esp->esp_id, ((struct cyberII_dma_registers *)
201 (esp->dregs))->cond_reg));
202 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
203 amiga_custom.intreqr, amiga_custom.intenar));
204}
205
206static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
207{
208 struct cyberII_dma_registers *dregs =
209 (struct cyberII_dma_registers *) esp->dregs;
210
211 cache_clear(addr, length);
212
213 addr &= ~(1);
214 dregs->dma_addr0 = (addr >> 24) & 0xff;
215 dregs->dma_addr1 = (addr >> 16) & 0xff;
216 dregs->dma_addr2 = (addr >> 8) & 0xff;
217 dregs->dma_addr3 = (addr ) & 0xff;
218}
219
220static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
221{
222 struct cyberII_dma_registers *dregs =
223 (struct cyberII_dma_registers *) esp->dregs;
224
225 cache_push(addr, length);
226
227 addr |= 1;
228 dregs->dma_addr0 = (addr >> 24) & 0xff;
229 dregs->dma_addr1 = (addr >> 16) & 0xff;
230 dregs->dma_addr2 = (addr >> 8) & 0xff;
231 dregs->dma_addr3 = (addr ) & 0xff;
232}
233
234static void dma_ints_off(struct NCR_ESP *esp)
235{
236 disable_irq(esp->irq);
237}
238
239static void dma_ints_on(struct NCR_ESP *esp)
240{
241 enable_irq(esp->irq);
242}
243
244static int dma_irq_p(struct NCR_ESP *esp)
245{
246 /* It's important to check the DMA IRQ bit in the correct way! */
247 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
248}
249
250static void dma_led_off(struct NCR_ESP *esp)
251{
252 ((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg &= ~CYBERII_DMA_LED;
253}
254
255static void dma_led_on(struct NCR_ESP *esp)
256{
257 ((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg |= CYBERII_DMA_LED;
258}
259
260static int dma_ports_p(struct NCR_ESP *esp)
261{
262 return ((amiga_custom.intenar) & IF_PORTS);
263}
264
265static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
266{
267 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
268 * so when (write) is true, it actually means READ!
269 */
270 if(write){
271 dma_init_read(esp, addr, count);
272 } else {
273 dma_init_write(esp, addr, count);
274 }
275}
276
277#define HOSTS_C
278
279int cyberII_esp_release(struct Scsi_Host *instance)
280{
281#ifdef MODULE
282 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
283
284 esp_deallocate((struct NCR_ESP *)instance->hostdata);
285 esp_release();
286 release_mem_region(address, sizeof(struct ESP_regs));
287 free_irq(IRQ_AMIGA_PORTS, esp_intr);
288#endif
289 return 1;
290}
291
292
293static struct scsi_host_template driver_template = {
294 .proc_name = "esp-cyberstormII",
295 .proc_info = esp_proc_info,
296 .name = "CyberStorm Mk II SCSI",
297 .detect = cyberII_esp_detect,
298 .slave_alloc = esp_slave_alloc,
299 .slave_destroy = esp_slave_destroy,
300 .release = cyberII_esp_release,
301 .queuecommand = esp_queue,
302 .eh_abort_handler = esp_abort,
303 .eh_bus_reset_handler = esp_reset,
304 .can_queue = 7,
305 .this_id = 7,
306 .sg_tablesize = SG_ALL,
307 .cmd_per_lun = 1,
308 .use_clustering = ENABLE_CLUSTERING
309};
310
311
312#include "scsi_module.c"
313
314MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 22ef3716e786..e351db6c0077 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4267,7 +4267,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4267 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; 4267 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4268 int srb_idx = 0; 4268 int srb_idx = 0;
4269 unsigned i = 0; 4269 unsigned i = 0;
4270 struct SGentry *ptr; 4270 struct SGentry *uninitialized_var(ptr);
4271 4271
4272 for (i = 0; i < DC395x_MAX_SRB_CNT; i++) 4272 for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
4273 acb->srb_array[i].segment_x = NULL; 4273 acb->srb_array[i].segment_x = NULL;
diff --git a/drivers/scsi/dec_esp.c b/drivers/scsi/dec_esp.c
deleted file mode 100644
index d42ad663ffee..000000000000
--- a/drivers/scsi/dec_esp.c
+++ /dev/null
@@ -1,687 +0,0 @@
1/*
2 * dec_esp.c: Driver for SCSI chips on IOASIC based TURBOchannel DECstations
3 * and TURBOchannel PMAZ-A cards
4 *
5 * TURBOchannel changes by Harald Koerfgen
6 * PMAZ-A support by David Airlie
7 *
8 * based on jazz_esp.c:
9 * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
10 *
11 * jazz_esp is based on David S. Miller's ESP driver and cyber_esp
12 *
13 * 20000819 - Small PMAZ-AA fixes by Florian Lohoff <flo@rfc822.org>
14 * Be warned the PMAZ-AA works currently as a single card.
15 * Dont try to put multiple cards in one machine - They are
16 * both detected but it may crash under high load garbling your
17 * data.
18 * 20001005 - Initialization fixes for 2.4.0-test9
19 * Florian Lohoff <flo@rfc822.org>
20 *
21 * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
22 */
23
24#include <linux/kernel.h>
25#include <linux/delay.h>
26#include <linux/types.h>
27#include <linux/string.h>
28#include <linux/slab.h>
29#include <linux/blkdev.h>
30#include <linux/proc_fs.h>
31#include <linux/spinlock.h>
32#include <linux/stat.h>
33#include <linux/tc.h>
34
35#include <asm/dma.h>
36#include <asm/irq.h>
37#include <asm/pgtable.h>
38#include <asm/system.h>
39
40#include <asm/dec/interrupts.h>
41#include <asm/dec/ioasic.h>
42#include <asm/dec/ioasic_addrs.h>
43#include <asm/dec/ioasic_ints.h>
44#include <asm/dec/machtype.h>
45#include <asm/dec/system.h>
46
47#define DEC_SCSI_SREG 0
48#define DEC_SCSI_DMAREG 0x40000
49#define DEC_SCSI_SRAM 0x80000
50#define DEC_SCSI_DIAG 0xC0000
51
52#include "scsi.h"
53#include <scsi/scsi_host.h>
54#include "NCR53C9x.h"
55
56static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
57static void dma_drain(struct NCR_ESP *esp);
58static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
59static void dma_dump_state(struct NCR_ESP *esp);
60static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
61static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
62static void dma_ints_off(struct NCR_ESP *esp);
63static void dma_ints_on(struct NCR_ESP *esp);
64static int dma_irq_p(struct NCR_ESP *esp);
65static int dma_ports_p(struct NCR_ESP *esp);
66static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
67static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
68static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp);
69static void dma_advance_sg(struct scsi_cmnd * sp);
70
71static void pmaz_dma_drain(struct NCR_ESP *esp);
72static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
73static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
74static void pmaz_dma_ints_off(struct NCR_ESP *esp);
75static void pmaz_dma_ints_on(struct NCR_ESP *esp);
76static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
77static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
78
79#define TC_ESP_RAM_SIZE 0x20000
80#define ESP_TGT_DMA_SIZE ((TC_ESP_RAM_SIZE/7) & ~(sizeof(int)-1))
81#define ESP_NCMD 7
82
83#define TC_ESP_DMAR_MASK 0x1ffff
84#define TC_ESP_DMAR_WRITE 0x80000000
85#define TC_ESP_DMA_ADDR(x) ((unsigned)(x) & TC_ESP_DMAR_MASK)
86
87u32 esp_virt_buffer;
88int scsi_current_length;
89
90volatile unsigned char cmd_buffer[16];
91volatile unsigned char pmaz_cmd_buffer[16];
92 /* This is where all commands are put
93 * before they are trasfered to the ESP chip
94 * via PIO.
95 */
96
97static irqreturn_t scsi_dma_merr_int(int, void *);
98static irqreturn_t scsi_dma_err_int(int, void *);
99static irqreturn_t scsi_dma_int(int, void *);
100
101static struct scsi_host_template dec_esp_template = {
102 .module = THIS_MODULE,
103 .name = "NCR53C94",
104 .info = esp_info,
105 .queuecommand = esp_queue,
106 .eh_abort_handler = esp_abort,
107 .eh_bus_reset_handler = esp_reset,
108 .slave_alloc = esp_slave_alloc,
109 .slave_destroy = esp_slave_destroy,
110 .proc_info = esp_proc_info,
111 .proc_name = "dec_esp",
112 .can_queue = 7,
113 .sg_tablesize = SG_ALL,
114 .cmd_per_lun = 1,
115 .use_clustering = DISABLE_CLUSTERING,
116};
117
118static struct NCR_ESP *dec_esp_platform;
119
120/***************************************************************** Detection */
121static int dec_esp_platform_probe(void)
122{
123 struct NCR_ESP *esp;
124 int err = 0;
125
126 if (IOASIC) {
127 esp = esp_allocate(&dec_esp_template, NULL, 1);
128
129 /* Do command transfer with programmed I/O */
130 esp->do_pio_cmds = 1;
131
132 /* Required functions */
133 esp->dma_bytes_sent = &dma_bytes_sent;
134 esp->dma_can_transfer = &dma_can_transfer;
135 esp->dma_dump_state = &dma_dump_state;
136 esp->dma_init_read = &dma_init_read;
137 esp->dma_init_write = &dma_init_write;
138 esp->dma_ints_off = &dma_ints_off;
139 esp->dma_ints_on = &dma_ints_on;
140 esp->dma_irq_p = &dma_irq_p;
141 esp->dma_ports_p = &dma_ports_p;
142 esp->dma_setup = &dma_setup;
143
144 /* Optional functions */
145 esp->dma_barrier = 0;
146 esp->dma_drain = &dma_drain;
147 esp->dma_invalidate = 0;
148 esp->dma_irq_entry = 0;
149 esp->dma_irq_exit = 0;
150 esp->dma_poll = 0;
151 esp->dma_reset = 0;
152 esp->dma_led_off = 0;
153 esp->dma_led_on = 0;
154
155 /* virtual DMA functions */
156 esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
157 esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
158 esp->dma_mmu_release_scsi_one = 0;
159 esp->dma_mmu_release_scsi_sgl = 0;
160 esp->dma_advance_sg = &dma_advance_sg;
161
162
163 /* SCSI chip speed */
164 esp->cfreq = 25000000;
165
166 esp->dregs = 0;
167
168 /* ESP register base */
169 esp->eregs = (void *)CKSEG1ADDR(dec_kn_slot_base +
170 IOASIC_SCSI);
171
172 /* Set the command buffer */
173 esp->esp_command = (volatile unsigned char *) cmd_buffer;
174
175 /* get virtual dma address for command buffer */
176 esp->esp_command_dvma = virt_to_phys(cmd_buffer);
177
178 esp->irq = dec_interrupt[DEC_IRQ_ASC];
179
180 esp->scsi_id = 7;
181
182 /* Check for differential SCSI-bus */
183 esp->diff = 0;
184
185 err = request_irq(esp->irq, esp_intr, IRQF_DISABLED,
186 "ncr53c94", esp->ehost);
187 if (err)
188 goto err_alloc;
189 err = request_irq(dec_interrupt[DEC_IRQ_ASC_MERR],
190 scsi_dma_merr_int, IRQF_DISABLED,
191 "ncr53c94 error", esp->ehost);
192 if (err)
193 goto err_irq;
194 err = request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
195 scsi_dma_err_int, IRQF_DISABLED,
196 "ncr53c94 overrun", esp->ehost);
197 if (err)
198 goto err_irq_merr;
199 err = request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], scsi_dma_int,
200 IRQF_DISABLED, "ncr53c94 dma", esp->ehost);
201 if (err)
202 goto err_irq_err;
203
204 esp_initialize(esp);
205
206 err = scsi_add_host(esp->ehost, NULL);
207 if (err) {
208 printk(KERN_ERR "ESP: Unable to register adapter\n");
209 goto err_irq_dma;
210 }
211
212 scsi_scan_host(esp->ehost);
213
214 dec_esp_platform = esp;
215 }
216
217 return 0;
218
219err_irq_dma:
220 free_irq(dec_interrupt[DEC_IRQ_ASC_DMA], esp->ehost);
221err_irq_err:
222 free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], esp->ehost);
223err_irq_merr:
224 free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], esp->ehost);
225err_irq:
226 free_irq(esp->irq, esp->ehost);
227err_alloc:
228 esp_deallocate(esp);
229 scsi_host_put(esp->ehost);
230 return err;
231}
232
233static int __init dec_esp_probe(struct device *dev)
234{
235 struct NCR_ESP *esp;
236 resource_size_t start, len;
237 int err;
238
239 esp = esp_allocate(&dec_esp_template, NULL, 1);
240
241 dev_set_drvdata(dev, esp);
242
243 start = to_tc_dev(dev)->resource.start;
244 len = to_tc_dev(dev)->resource.end - start + 1;
245
246 if (!request_mem_region(start, len, dev->bus_id)) {
247 printk(KERN_ERR "%s: Unable to reserve MMIO resource\n",
248 dev->bus_id);
249 err = -EBUSY;
250 goto err_alloc;
251 }
252
253 /* Store base addr into esp struct. */
254 esp->slot = start;
255
256 esp->dregs = 0;
257 esp->eregs = (void *)CKSEG1ADDR(start + DEC_SCSI_SREG);
258 esp->do_pio_cmds = 1;
259
260 /* Set the command buffer. */
261 esp->esp_command = (volatile unsigned char *)pmaz_cmd_buffer;
262
263 /* Get virtual dma address for command buffer. */
264 esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer);
265
266 esp->cfreq = tc_get_speed(to_tc_dev(dev)->bus);
267
268 esp->irq = to_tc_dev(dev)->interrupt;
269
270 /* Required functions. */
271 esp->dma_bytes_sent = &dma_bytes_sent;
272 esp->dma_can_transfer = &dma_can_transfer;
273 esp->dma_dump_state = &dma_dump_state;
274 esp->dma_init_read = &pmaz_dma_init_read;
275 esp->dma_init_write = &pmaz_dma_init_write;
276 esp->dma_ints_off = &pmaz_dma_ints_off;
277 esp->dma_ints_on = &pmaz_dma_ints_on;
278 esp->dma_irq_p = &dma_irq_p;
279 esp->dma_ports_p = &dma_ports_p;
280 esp->dma_setup = &pmaz_dma_setup;
281
282 /* Optional functions. */
283 esp->dma_barrier = 0;
284 esp->dma_drain = &pmaz_dma_drain;
285 esp->dma_invalidate = 0;
286 esp->dma_irq_entry = 0;
287 esp->dma_irq_exit = 0;
288 esp->dma_poll = 0;
289 esp->dma_reset = 0;
290 esp->dma_led_off = 0;
291 esp->dma_led_on = 0;
292
293 esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one;
294 esp->dma_mmu_get_scsi_sgl = 0;
295 esp->dma_mmu_release_scsi_one = 0;
296 esp->dma_mmu_release_scsi_sgl = 0;
297 esp->dma_advance_sg = 0;
298
299 err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, "PMAZ_AA",
300 esp->ehost);
301 if (err) {
302 printk(KERN_ERR "%s: Unable to get IRQ %d\n",
303 dev->bus_id, esp->irq);
304 goto err_resource;
305 }
306
307 esp->scsi_id = 7;
308 esp->diff = 0;
309 esp_initialize(esp);
310
311 err = scsi_add_host(esp->ehost, dev);
312 if (err) {
313 printk(KERN_ERR "%s: Unable to register adapter\n",
314 dev->bus_id);
315 goto err_irq;
316 }
317
318 scsi_scan_host(esp->ehost);
319
320 return 0;
321
322err_irq:
323 free_irq(esp->irq, esp->ehost);
324
325err_resource:
326 release_mem_region(start, len);
327
328err_alloc:
329 esp_deallocate(esp);
330 scsi_host_put(esp->ehost);
331 return err;
332}
333
334static void __exit dec_esp_platform_remove(void)
335{
336 struct NCR_ESP *esp = dec_esp_platform;
337
338 free_irq(esp->irq, esp->ehost);
339 esp_deallocate(esp);
340 scsi_host_put(esp->ehost);
341 dec_esp_platform = NULL;
342}
343
344static void __exit dec_esp_remove(struct device *dev)
345{
346 struct NCR_ESP *esp = dev_get_drvdata(dev);
347
348 free_irq(esp->irq, esp->ehost);
349 esp_deallocate(esp);
350 scsi_host_put(esp->ehost);
351}
352
353
354/************************************************************* DMA Functions */
355static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id)
356{
357 printk("Got unexpected SCSI DMA Interrupt! < ");
358 printk("SCSI_DMA_MEMRDERR ");
359 printk(">\n");
360
361 return IRQ_HANDLED;
362}
363
364static irqreturn_t scsi_dma_err_int(int irq, void *dev_id)
365{
366 /* empty */
367
368 return IRQ_HANDLED;
369}
370
371static irqreturn_t scsi_dma_int(int irq, void *dev_id)
372{
373 u32 scsi_next_ptr;
374
375 scsi_next_ptr = ioasic_read(IO_REG_SCSI_DMA_P);
376
377 /* next page */
378 scsi_next_ptr = (((scsi_next_ptr >> 3) + PAGE_SIZE) & PAGE_MASK) << 3;
379 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
380 fast_iob();
381
382 return IRQ_HANDLED;
383}
384
385static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
386{
387 return fifo_count;
388}
389
390static void dma_drain(struct NCR_ESP *esp)
391{
392 u32 nw, data0, data1, scsi_data_ptr;
393 u16 *p;
394
395 nw = ioasic_read(IO_REG_SCSI_SCR);
396
397 /*
398 * Is there something in the dma buffers left?
399 */
400 if (nw) {
401 scsi_data_ptr = ioasic_read(IO_REG_SCSI_DMA_P) >> 3;
402 p = phys_to_virt(scsi_data_ptr);
403 switch (nw) {
404 case 1:
405 data0 = ioasic_read(IO_REG_SCSI_SDR0);
406 p[0] = data0 & 0xffff;
407 break;
408 case 2:
409 data0 = ioasic_read(IO_REG_SCSI_SDR0);
410 p[0] = data0 & 0xffff;
411 p[1] = (data0 >> 16) & 0xffff;
412 break;
413 case 3:
414 data0 = ioasic_read(IO_REG_SCSI_SDR0);
415 data1 = ioasic_read(IO_REG_SCSI_SDR1);
416 p[0] = data0 & 0xffff;
417 p[1] = (data0 >> 16) & 0xffff;
418 p[2] = data1 & 0xffff;
419 break;
420 default:
421 printk("Strange: %d words in dma buffer left\n", nw);
422 break;
423 }
424 }
425}
426
427static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp)
428{
429 return sp->SCp.this_residual;
430}
431
432static void dma_dump_state(struct NCR_ESP *esp)
433{
434}
435
436static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
437{
438 u32 scsi_next_ptr, ioasic_ssr;
439 unsigned long flags;
440
441 if (vaddress & 3)
442 panic("dec_esp.c: unable to handle partial word transfers, yet...");
443
444 dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
445
446 spin_lock_irqsave(&ioasic_ssr_lock, flags);
447
448 fast_mb();
449 ioasic_ssr = ioasic_read(IO_REG_SSR);
450
451 ioasic_ssr &= ~IO_SSR_SCSI_DMA_EN;
452 ioasic_write(IO_REG_SSR, ioasic_ssr);
453
454 fast_wmb();
455 ioasic_write(IO_REG_SCSI_SCR, 0);
456 ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
457
458 /* prepare for next page */
459 scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
460 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
461
462 ioasic_ssr |= (IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
463 fast_wmb();
464 ioasic_write(IO_REG_SSR, ioasic_ssr);
465
466 fast_iob();
467 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
468}
469
470static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
471{
472 u32 scsi_next_ptr, ioasic_ssr;
473 unsigned long flags;
474
475 if (vaddress & 3)
476 panic("dec_esp.c: unable to handle partial word transfers, yet...");
477
478 dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
479
480 spin_lock_irqsave(&ioasic_ssr_lock, flags);
481
482 fast_mb();
483 ioasic_ssr = ioasic_read(IO_REG_SSR);
484
485 ioasic_ssr &= ~(IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
486 ioasic_write(IO_REG_SSR, ioasic_ssr);
487
488 fast_wmb();
489 ioasic_write(IO_REG_SCSI_SCR, 0);
490 ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
491
492 /* prepare for next page */
493 scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
494 ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
495
496 ioasic_ssr |= IO_SSR_SCSI_DMA_EN;
497 fast_wmb();
498 ioasic_write(IO_REG_SSR, ioasic_ssr);
499
500 fast_iob();
501 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
502}
503
504static void dma_ints_off(struct NCR_ESP *esp)
505{
506 disable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
507}
508
509static void dma_ints_on(struct NCR_ESP *esp)
510{
511 enable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
512}
513
514static int dma_irq_p(struct NCR_ESP *esp)
515{
516 return (esp->eregs->esp_status & ESP_STAT_INTR);
517}
518
519static int dma_ports_p(struct NCR_ESP *esp)
520{
521 /*
522 * FIXME: what's this good for?
523 */
524 return 1;
525}
526
527static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
528{
529 /*
530 * DMA_ST_WRITE means "move data from device to memory"
531 * so when (write) is true, it actually means READ!
532 */
533 if (write)
534 dma_init_read(esp, addr, count);
535 else
536 dma_init_write(esp, addr, count);
537}
538
539static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
540{
541 sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
542}
543
544static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp)
545{
546 int sz = sp->SCp.buffers_residual;
547 struct scatterlist *sg = sp->SCp.buffer;
548
549 while (sz >= 0) {
550 sg[sz].dma_address = page_to_phys(sg[sz].page) + sg[sz].offset;
551 sz--;
552 }
553 sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
554}
555
556static void dma_advance_sg(struct scsi_cmnd * sp)
557{
558 sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
559}
560
561static void pmaz_dma_drain(struct NCR_ESP *esp)
562{
563 memcpy(phys_to_virt(esp_virt_buffer),
564 (void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
565 ESP_TGT_DMA_SIZE),
566 scsi_current_length);
567}
568
569static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
570{
571 volatile u32 *dmareg =
572 (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
573
574 if (length > ESP_TGT_DMA_SIZE)
575 length = ESP_TGT_DMA_SIZE;
576
577 *dmareg = TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
578
579 iob();
580
581 esp_virt_buffer = vaddress;
582 scsi_current_length = length;
583}
584
585static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
586{
587 volatile u32 *dmareg =
588 (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
589
590 memcpy((void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
591 ESP_TGT_DMA_SIZE),
592 phys_to_virt(vaddress), length);
593
594 wmb();
595 *dmareg = TC_ESP_DMAR_WRITE | TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
596
597 iob();
598}
599
600static void pmaz_dma_ints_off(struct NCR_ESP *esp)
601{
602}
603
604static void pmaz_dma_ints_on(struct NCR_ESP *esp)
605{
606}
607
608static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
609{
610 /*
611 * DMA_ST_WRITE means "move data from device to memory"
612 * so when (write) is true, it actually means READ!
613 */
614 if (write)
615 pmaz_dma_init_read(esp, addr, count);
616 else
617 pmaz_dma_init_write(esp, addr, count);
618}
619
620static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
621{
622 sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
623}
624
625
626#ifdef CONFIG_TC
627static int __init dec_esp_tc_probe(struct device *dev);
628static int __exit dec_esp_tc_remove(struct device *dev);
629
630static const struct tc_device_id dec_esp_tc_table[] = {
631 { "DEC ", "PMAZ-AA " },
632 { }
633};
634MODULE_DEVICE_TABLE(tc, dec_esp_tc_table);
635
636static struct tc_driver dec_esp_tc_driver = {
637 .id_table = dec_esp_tc_table,
638 .driver = {
639 .name = "dec_esp",
640 .bus = &tc_bus_type,
641 .probe = dec_esp_tc_probe,
642 .remove = __exit_p(dec_esp_tc_remove),
643 },
644};
645
646static int __init dec_esp_tc_probe(struct device *dev)
647{
648 int status = dec_esp_probe(dev);
649 if (!status)
650 get_device(dev);
651 return status;
652}
653
654static int __exit dec_esp_tc_remove(struct device *dev)
655{
656 put_device(dev);
657 dec_esp_remove(dev);
658 return 0;
659}
660#endif
661
662static int __init dec_esp_init(void)
663{
664 int status;
665
666 status = tc_register_driver(&dec_esp_tc_driver);
667 if (!status)
668 dec_esp_platform_probe();
669
670 if (nesps) {
671 pr_info("ESP: Total of %d ESP hosts found, "
672 "%d actually in use.\n", nesps, esps_in_use);
673 esps_running = esps_in_use;
674 }
675
676 return status;
677}
678
679static void __exit dec_esp_exit(void)
680{
681 dec_esp_platform_remove();
682 tc_unregister_driver(&dec_esp_tc_driver);
683}
684
685
686module_init(dec_esp_init);
687module_exit(dec_esp_exit);
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c
deleted file mode 100644
index 4266a2139b5f..000000000000
--- a/drivers/scsi/fastlane.c
+++ /dev/null
@@ -1,421 +0,0 @@
1/* fastlane.c: Driver for Phase5's Fastlane SCSI Controller.
2 *
3 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
4 *
5 * This driver is based on the CyberStorm driver, hence the occasional
6 * reference to CyberStorm.
7 *
8 * Betatesting & crucial adjustments by
9 * Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
10 *
11 */
12
13/* TODO:
14 *
15 * o According to the doc from laire, it is required to reset the DMA when
16 * the transfer is done. ATM we reset DMA just before every new
17 * dma_init_(read|write).
18 *
19 * 1) Figure out how to make a cleaner merge with the sparc driver with regard
20 * to the caches and the Sparc MMU mapping.
21 * 2) Make as few routines required outside the generic driver. A lot of the
22 * routines in this file used to be inline!
23 */
24
25#include <linux/module.h>
26
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/delay.h>
30#include <linux/types.h>
31#include <linux/string.h>
32#include <linux/slab.h>
33#include <linux/blkdev.h>
34#include <linux/proc_fs.h>
35#include <linux/stat.h>
36#include <linux/interrupt.h>
37
38#include "scsi.h"
39#include <scsi/scsi_host.h>
40#include "NCR53C9x.h"
41
42#include <linux/zorro.h>
43#include <asm/irq.h>
44
45#include <asm/amigaints.h>
46#include <asm/amigahw.h>
47
48#include <asm/pgtable.h>
49
50/* Such day has just come... */
51#if 0
52/* Let this defined unless you really need to enable DMA IRQ one day */
53#define NODMAIRQ
54#endif
55
56/* The controller registers can be found in the Z2 config area at these
57 * offsets:
58 */
59#define FASTLANE_ESP_ADDR 0x1000001
60#define FASTLANE_DMA_ADDR 0x1000041
61
62
63/* The Fastlane DMA interface */
64struct fastlane_dma_registers {
65 volatile unsigned char cond_reg; /* DMA status (ro) [0x0000] */
66#define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
67 unsigned char dmapad1[0x3f];
68 volatile unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
69};
70
71
72/* DMA status bits */
73#define FASTLANE_DMA_MINT 0x80
74#define FASTLANE_DMA_IACT 0x40
75#define FASTLANE_DMA_CREQ 0x20
76
77/* DMA control bits */
78#define FASTLANE_DMA_FCODE 0xa0
79#define FASTLANE_DMA_MASK 0xf3
80#define FASTLANE_DMA_LED 0x10 /* HD led control 1 = on */
81#define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
82#define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
83#define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
84#define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
85
86static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
87static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
88static void dma_dump_state(struct NCR_ESP *esp);
89static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
90static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
91static void dma_ints_off(struct NCR_ESP *esp);
92static void dma_ints_on(struct NCR_ESP *esp);
93static int dma_irq_p(struct NCR_ESP *esp);
94static void dma_irq_exit(struct NCR_ESP *esp);
95static void dma_led_off(struct NCR_ESP *esp);
96static void dma_led_on(struct NCR_ESP *esp);
97static int dma_ports_p(struct NCR_ESP *esp);
98static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
99
100static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
101 * to ctrl_reg. Always write a copy
102 * to this register when writing to
103 * the hardware register!
104 */
105
106static volatile unsigned char cmd_buffer[16];
107 /* This is where all commands are put
108 * before they are transferred to the ESP chip
109 * via PIO.
110 */
111
112static inline void dma_clear(struct NCR_ESP *esp)
113{
114 struct fastlane_dma_registers *dregs =
115 (struct fastlane_dma_registers *) (esp->dregs);
116 unsigned long *t;
117
118 ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
119 dregs->ctrl_reg = ctrl_data;
120
121 t = (unsigned long *)(esp->edev);
122
123 dregs->clear_strobe = 0;
124 *t = 0 ;
125}
126
127/***************************************************************** Detection */
128int __init fastlane_esp_detect(struct scsi_host_template *tpnt)
129{
130 struct NCR_ESP *esp;
131 struct zorro_dev *z = NULL;
132 unsigned long address;
133
134 if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060, z))) {
135 unsigned long board = z->resource.start;
136 if (request_mem_region(board+FASTLANE_ESP_ADDR,
137 sizeof(struct ESP_regs), "NCR53C9x")) {
138 /* Check if this is really a fastlane controller. The problem
139 * is that also the cyberstorm and blizzard controllers use
140 * this ID value. Fortunately only Fastlane maps in Z3 space
141 */
142 if (board < 0x1000000) {
143 goto err_release;
144 }
145 esp = esp_allocate(tpnt, (void *)board + FASTLANE_ESP_ADDR, 0);
146
147 /* Do command transfer with programmed I/O */
148 esp->do_pio_cmds = 1;
149
150 /* Required functions */
151 esp->dma_bytes_sent = &dma_bytes_sent;
152 esp->dma_can_transfer = &dma_can_transfer;
153 esp->dma_dump_state = &dma_dump_state;
154 esp->dma_init_read = &dma_init_read;
155 esp->dma_init_write = &dma_init_write;
156 esp->dma_ints_off = &dma_ints_off;
157 esp->dma_ints_on = &dma_ints_on;
158 esp->dma_irq_p = &dma_irq_p;
159 esp->dma_ports_p = &dma_ports_p;
160 esp->dma_setup = &dma_setup;
161
162 /* Optional functions */
163 esp->dma_barrier = 0;
164 esp->dma_drain = 0;
165 esp->dma_invalidate = 0;
166 esp->dma_irq_entry = 0;
167 esp->dma_irq_exit = &dma_irq_exit;
168 esp->dma_led_on = &dma_led_on;
169 esp->dma_led_off = &dma_led_off;
170 esp->dma_poll = 0;
171 esp->dma_reset = 0;
172
173 /* Initialize the portBits (enable IRQs) */
174 ctrl_data = (FASTLANE_DMA_FCODE |
175#ifndef NODMAIRQ
176 FASTLANE_DMA_EDI |
177#endif
178 FASTLANE_DMA_ESI);
179
180
181 /* SCSI chip clock */
182 esp->cfreq = 40000000;
183
184
185 /* Map the physical address space into virtual kernel space */
186 address = (unsigned long)
187 z_ioremap(board, z->resource.end-board+1);
188
189 if(!address){
190 printk("Could not remap Fastlane controller memory!");
191 goto err_unregister;
192 }
193
194
195 /* The DMA registers on the Fastlane are mapped
196 * relative to the device (i.e. in the same Zorro
197 * I/O block).
198 */
199 esp->dregs = (void *)(address + FASTLANE_DMA_ADDR);
200
201 /* ESP register base */
202 esp->eregs = (struct ESP_regs *)(address + FASTLANE_ESP_ADDR);
203
204 /* Board base */
205 esp->edev = (void *) address;
206
207 /* Set the command buffer */
208 esp->esp_command = cmd_buffer;
209 esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
210
211 esp->irq = IRQ_AMIGA_PORTS;
212 esp->slot = board+FASTLANE_ESP_ADDR;
213 if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
214 "Fastlane SCSI", esp->ehost)) {
215 printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
216 goto err_unmap;
217 }
218
219 /* Controller ID */
220 esp->scsi_id = 7;
221
222 /* We don't have a differential SCSI-bus. */
223 esp->diff = 0;
224
225 dma_clear(esp);
226 esp_initialize(esp);
227
228 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
229 esps_running = esps_in_use;
230 return esps_in_use;
231 }
232 }
233 return 0;
234
235 err_unmap:
236 z_iounmap((void *)address);
237 err_unregister:
238 scsi_unregister (esp->ehost);
239 err_release:
240 release_mem_region(z->resource.start+FASTLANE_ESP_ADDR,
241 sizeof(struct ESP_regs));
242 return 0;
243}
244
245
246/************************************************************* DMA Functions */
247static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
248{
249 /* Since the Fastlane DMA is fully dedicated to the ESP chip,
250 * the number of bytes sent (to the ESP chip) equals the number
251 * of bytes in the FIFO - there is no buffering in the DMA controller.
252 * XXXX Do I read this right? It is from host to ESP, right?
253 */
254 return fifo_count;
255}
256
257static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
258{
259 unsigned long sz = sp->SCp.this_residual;
260 if(sz > 0xfffc)
261 sz = 0xfffc;
262 return sz;
263}
264
265static void dma_dump_state(struct NCR_ESP *esp)
266{
267 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
268 esp->esp_id, ((struct fastlane_dma_registers *)
269 (esp->dregs))->cond_reg));
270 ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
271 amiga_custom.intreqr, amiga_custom.intenar));
272}
273
274static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
275{
276 struct fastlane_dma_registers *dregs =
277 (struct fastlane_dma_registers *) (esp->dregs);
278 unsigned long *t;
279
280 cache_clear(addr, length);
281
282 dma_clear(esp);
283
284 t = (unsigned long *)((addr & 0x00ffffff) + esp->edev);
285
286 dregs->clear_strobe = 0;
287 *t = addr;
288
289 ctrl_data = (ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE;
290 dregs->ctrl_reg = ctrl_data;
291}
292
293static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
294{
295 struct fastlane_dma_registers *dregs =
296 (struct fastlane_dma_registers *) (esp->dregs);
297 unsigned long *t;
298
299 cache_push(addr, length);
300
301 dma_clear(esp);
302
303 t = (unsigned long *)((addr & 0x00ffffff) + (esp->edev));
304
305 dregs->clear_strobe = 0;
306 *t = addr;
307
308 ctrl_data = ((ctrl_data & FASTLANE_DMA_MASK) |
309 FASTLANE_DMA_ENABLE |
310 FASTLANE_DMA_WRITE);
311 dregs->ctrl_reg = ctrl_data;
312}
313
314
315static void dma_ints_off(struct NCR_ESP *esp)
316{
317 disable_irq(esp->irq);
318}
319
320static void dma_ints_on(struct NCR_ESP *esp)
321{
322 enable_irq(esp->irq);
323}
324
325static void dma_irq_exit(struct NCR_ESP *esp)
326{
327 struct fastlane_dma_registers *dregs =
328 (struct fastlane_dma_registers *) (esp->dregs);
329
330 dregs->ctrl_reg = ctrl_data & ~(FASTLANE_DMA_EDI|FASTLANE_DMA_ESI);
331#ifdef __mc68000__
332 nop();
333#endif
334 dregs->ctrl_reg = ctrl_data;
335}
336
337static int dma_irq_p(struct NCR_ESP *esp)
338{
339 struct fastlane_dma_registers *dregs =
340 (struct fastlane_dma_registers *) (esp->dregs);
341 unsigned char dma_status;
342
343 dma_status = dregs->cond_reg;
344
345 if(dma_status & FASTLANE_DMA_IACT)
346 return 0; /* not our IRQ */
347
348 /* Return non-zero if ESP requested IRQ */
349 return (
350#ifndef NODMAIRQ
351 (dma_status & FASTLANE_DMA_CREQ) &&
352#endif
353 (!(dma_status & FASTLANE_DMA_MINT)) &&
354 (esp_read(((struct ESP_regs *) (esp->eregs))->esp_status) & ESP_STAT_INTR));
355}
356
357static void dma_led_off(struct NCR_ESP *esp)
358{
359 ctrl_data &= ~FASTLANE_DMA_LED;
360 ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
361}
362
363static void dma_led_on(struct NCR_ESP *esp)
364{
365 ctrl_data |= FASTLANE_DMA_LED;
366 ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
367}
368
369static int dma_ports_p(struct NCR_ESP *esp)
370{
371 return ((amiga_custom.intenar) & IF_PORTS);
372}
373
374static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
375{
376 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
377 * so when (write) is true, it actually means READ!
378 */
379 if(write){
380 dma_init_read(esp, addr, count);
381 } else {
382 dma_init_write(esp, addr, count);
383 }
384}
385
386#define HOSTS_C
387
388int fastlane_esp_release(struct Scsi_Host *instance)
389{
390#ifdef MODULE
391 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
392 esp_deallocate((struct NCR_ESP *)instance->hostdata);
393 esp_release();
394 release_mem_region(address, sizeof(struct ESP_regs));
395 free_irq(IRQ_AMIGA_PORTS, esp_intr);
396#endif
397 return 1;
398}
399
400
401static struct scsi_host_template driver_template = {
402 .proc_name = "esp-fastlane",
403 .proc_info = esp_proc_info,
404 .name = "Fastlane SCSI",
405 .detect = fastlane_esp_detect,
406 .slave_alloc = esp_slave_alloc,
407 .slave_destroy = esp_slave_destroy,
408 .release = fastlane_esp_release,
409 .queuecommand = esp_queue,
410 .eh_abort_handler = esp_abort,
411 .eh_bus_reset_handler = esp_reset,
412 .can_queue = 7,
413 .this_id = 7,
414 .sg_tablesize = SG_ALL,
415 .cmd_per_lun = 1,
416 .use_clustering = ENABLE_CLUSTERING
417};
418
419#include "scsi_module.c"
420
421MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b6f99dfbb038..8a178674cb18 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -629,8 +629,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
629 int rc; 629 int rc;
630 630
631 if (tcp_conn->in.datalen) { 631 if (tcp_conn->in.datalen) {
632 printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n", 632 iscsi_conn_printk(KERN_ERR, conn,
633 tcp_conn->in.datalen); 633 "invalid R2t with datalen %d\n",
634 tcp_conn->in.datalen);
634 return ISCSI_ERR_DATALEN; 635 return ISCSI_ERR_DATALEN;
635 } 636 }
636 637
@@ -644,8 +645,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
644 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); 645 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
645 646
646 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) { 647 if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
647 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " 648 iscsi_conn_printk(KERN_INFO, conn,
648 "recovery...\n", ctask->itt); 649 "dropping R2T itt %d in recovery.\n",
650 ctask->itt);
649 return 0; 651 return 0;
650 } 652 }
651 653
@@ -655,7 +657,8 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
655 r2t->exp_statsn = rhdr->statsn; 657 r2t->exp_statsn = rhdr->statsn;
656 r2t->data_length = be32_to_cpu(rhdr->data_length); 658 r2t->data_length = be32_to_cpu(rhdr->data_length);
657 if (r2t->data_length == 0) { 659 if (r2t->data_length == 0) {
658 printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n"); 660 iscsi_conn_printk(KERN_ERR, conn,
661 "invalid R2T with zero data len\n");
659 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 662 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
660 sizeof(void*)); 663 sizeof(void*));
661 return ISCSI_ERR_DATALEN; 664 return ISCSI_ERR_DATALEN;
@@ -668,9 +671,10 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
668 671
669 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 672 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
670 if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) { 673 if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
671 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at " 674 iscsi_conn_printk(KERN_ERR, conn,
672 "offset %u and total length %d\n", r2t->data_length, 675 "invalid R2T with data len %u at offset %u "
673 r2t->data_offset, scsi_bufflen(ctask->sc)); 676 "and total length %d\n", r2t->data_length,
677 r2t->data_offset, scsi_bufflen(ctask->sc));
674 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t, 678 __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
675 sizeof(void*)); 679 sizeof(void*));
676 return ISCSI_ERR_DATALEN; 680 return ISCSI_ERR_DATALEN;
@@ -736,8 +740,9 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
736 /* verify PDU length */ 740 /* verify PDU length */
737 tcp_conn->in.datalen = ntoh24(hdr->dlength); 741 tcp_conn->in.datalen = ntoh24(hdr->dlength);
738 if (tcp_conn->in.datalen > conn->max_recv_dlength) { 742 if (tcp_conn->in.datalen > conn->max_recv_dlength) {
739 printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n", 743 iscsi_conn_printk(KERN_ERR, conn,
740 tcp_conn->in.datalen, conn->max_recv_dlength); 744 "iscsi_tcp: datalen %d > %d\n",
745 tcp_conn->in.datalen, conn->max_recv_dlength);
741 return ISCSI_ERR_DATALEN; 746 return ISCSI_ERR_DATALEN;
742 } 747 }
743 748
@@ -819,10 +824,12 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
819 * For now we fail until we find a vendor that needs it 824 * For now we fail until we find a vendor that needs it
820 */ 825 */
821 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) { 826 if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
822 printk(KERN_ERR "iscsi_tcp: received buffer of len %u " 827 iscsi_conn_printk(KERN_ERR, conn,
823 "but conn buffer is only %u (opcode %0x)\n", 828 "iscsi_tcp: received buffer of "
824 tcp_conn->in.datalen, 829 "len %u but conn buffer is only %u "
825 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode); 830 "(opcode %0x)\n",
831 tcp_conn->in.datalen,
832 ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
826 rc = ISCSI_ERR_PROTO; 833 rc = ISCSI_ERR_PROTO;
827 break; 834 break;
828 } 835 }
@@ -1496,30 +1503,25 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1496 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1503 tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1497 CRYPTO_ALG_ASYNC); 1504 CRYPTO_ALG_ASYNC);
1498 tcp_conn->tx_hash.flags = 0; 1505 tcp_conn->tx_hash.flags = 0;
1499 if (IS_ERR(tcp_conn->tx_hash.tfm)) { 1506 if (IS_ERR(tcp_conn->tx_hash.tfm))
1500 printk(KERN_ERR "Could not create connection due to crc32c "
1501 "loading error %ld. Make sure the crc32c module is "
1502 "built as a module or into the kernel\n",
1503 PTR_ERR(tcp_conn->tx_hash.tfm));
1504 goto free_tcp_conn; 1507 goto free_tcp_conn;
1505 }
1506 1508
1507 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0, 1509 tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
1508 CRYPTO_ALG_ASYNC); 1510 CRYPTO_ALG_ASYNC);
1509 tcp_conn->rx_hash.flags = 0; 1511 tcp_conn->rx_hash.flags = 0;
1510 if (IS_ERR(tcp_conn->rx_hash.tfm)) { 1512 if (IS_ERR(tcp_conn->rx_hash.tfm))
1511 printk(KERN_ERR "Could not create connection due to crc32c "
1512 "loading error %ld. Make sure the crc32c module is "
1513 "built as a module or into the kernel\n",
1514 PTR_ERR(tcp_conn->rx_hash.tfm));
1515 goto free_tx_tfm; 1513 goto free_tx_tfm;
1516 }
1517 1514
1518 return cls_conn; 1515 return cls_conn;
1519 1516
1520free_tx_tfm: 1517free_tx_tfm:
1521 crypto_free_hash(tcp_conn->tx_hash.tfm); 1518 crypto_free_hash(tcp_conn->tx_hash.tfm);
1522free_tcp_conn: 1519free_tcp_conn:
1520 iscsi_conn_printk(KERN_ERR, conn,
1521 "Could not create connection due to crc32c "
1522 "loading error. Make sure the crc32c "
1523 "module is built as a module or into the "
1524 "kernel\n");
1523 kfree(tcp_conn); 1525 kfree(tcp_conn);
1524tcp_conn_alloc_fail: 1526tcp_conn_alloc_fail:
1525 iscsi_conn_teardown(cls_conn); 1527 iscsi_conn_teardown(cls_conn);
@@ -1627,7 +1629,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1627 /* lookup for existing socket */ 1629 /* lookup for existing socket */
1628 sock = sockfd_lookup((int)transport_eph, &err); 1630 sock = sockfd_lookup((int)transport_eph, &err);
1629 if (!sock) { 1631 if (!sock) {
1630 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err); 1632 iscsi_conn_printk(KERN_ERR, conn,
1633 "sockfd_lookup failed %d\n", err);
1631 return -EEXIST; 1634 return -EEXIST;
1632 } 1635 }
1633 /* 1636 /*
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 553168ae44f1..59f8445eab0d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -160,7 +160,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
160 hdr->opcode = ISCSI_OP_SCSI_CMD; 160 hdr->opcode = ISCSI_OP_SCSI_CMD;
161 hdr->flags = ISCSI_ATTR_SIMPLE; 161 hdr->flags = ISCSI_ATTR_SIMPLE;
162 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 162 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
163 hdr->itt = build_itt(ctask->itt, conn->id, session->age); 163 hdr->itt = build_itt(ctask->itt, session->age);
164 hdr->data_length = cpu_to_be32(scsi_bufflen(sc)); 164 hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
165 hdr->cmdsn = cpu_to_be32(session->cmdsn); 165 hdr->cmdsn = cpu_to_be32(session->cmdsn);
166 session->cmdsn++; 166 session->cmdsn++;
@@ -416,8 +416,9 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
416 416
417 if (datalen < 2) { 417 if (datalen < 2) {
418invalid_datalen: 418invalid_datalen:
419 printk(KERN_ERR "iscsi: Got CHECK_CONDITION but " 419 iscsi_conn_printk(KERN_ERR, conn,
420 "invalid data buffer size of %d\n", datalen); 420 "Got CHECK_CONDITION but invalid data "
421 "buffer size of %d\n", datalen);
421 sc->result = DID_BAD_TARGET << 16; 422 sc->result = DID_BAD_TARGET << 16;
422 goto out; 423 goto out;
423 } 424 }
@@ -494,7 +495,7 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
494 495
495 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 496 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
496 if (!mtask) { 497 if (!mtask) {
497 printk(KERN_ERR "Could not send nopout\n"); 498 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
498 return; 499 return;
499 } 500 }
500 501
@@ -522,9 +523,10 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
522 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 523 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
523 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 524 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
524 itt = get_itt(rejected_pdu.itt); 525 itt = get_itt(rejected_pdu.itt);
525 printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected " 526 iscsi_conn_printk(KERN_ERR, conn,
526 "due to DataDigest error.\n", itt, 527 "itt 0x%x had pdu (op 0x%x) rejected "
527 rejected_pdu.opcode); 528 "due to DataDigest error.\n", itt,
529 rejected_pdu.opcode);
528 } 530 }
529 } 531 }
530 return 0; 532 return 0;
@@ -541,8 +543,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
541 * queuecommand or send generic. session lock must be held and verify 543 * queuecommand or send generic. session lock must be held and verify
542 * itt must have been called. 544 * itt must have been called.
543 */ 545 */
544int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 546static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
545 char *data, int datalen) 547 char *data, int datalen)
546{ 548{
547 struct iscsi_session *session = conn->session; 549 struct iscsi_session *session = conn->session;
548 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; 550 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
@@ -672,7 +674,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
672 674
673 return rc; 675 return rc;
674} 676}
675EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
676 677
677int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 678int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
678 char *data, int datalen) 679 char *data, int datalen)
@@ -697,18 +698,13 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
697 if (hdr->itt != RESERVED_ITT) { 698 if (hdr->itt != RESERVED_ITT) {
698 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) != 699 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
699 (session->age << ISCSI_AGE_SHIFT)) { 700 (session->age << ISCSI_AGE_SHIFT)) {
700 printk(KERN_ERR "iscsi: received itt %x expected " 701 iscsi_conn_printk(KERN_ERR, conn,
701 "session age (%x)\n", (__force u32)hdr->itt, 702 "received itt %x expected session "
702 session->age & ISCSI_AGE_MASK); 703 "age (%x)\n", (__force u32)hdr->itt,
704 session->age & ISCSI_AGE_MASK);
703 return ISCSI_ERR_BAD_ITT; 705 return ISCSI_ERR_BAD_ITT;
704 } 706 }
705 707
706 if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
707 (conn->id << ISCSI_CID_SHIFT)) {
708 printk(KERN_ERR "iscsi: received itt %x, expected "
709 "CID (%x)\n", (__force u32)hdr->itt, conn->id);
710 return ISCSI_ERR_BAD_ITT;
711 }
712 itt = get_itt(hdr->itt); 708 itt = get_itt(hdr->itt);
713 } else 709 } else
714 itt = ~0U; 710 itt = ~0U;
@@ -717,16 +713,17 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
717 ctask = session->cmds[itt]; 713 ctask = session->cmds[itt];
718 714
719 if (!ctask->sc) { 715 if (!ctask->sc) {
720 printk(KERN_INFO "iscsi: dropping ctask with " 716 iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
721 "itt 0x%x\n", ctask->itt); 717 "with itt 0x%x\n", ctask->itt);
722 /* force drop */ 718 /* force drop */
723 return ISCSI_ERR_NO_SCSI_CMD; 719 return ISCSI_ERR_NO_SCSI_CMD;
724 } 720 }
725 721
726 if (ctask->sc->SCp.phase != session->age) { 722 if (ctask->sc->SCp.phase != session->age) {
727 printk(KERN_ERR "iscsi: ctask's session age %d, " 723 iscsi_conn_printk(KERN_ERR, conn,
728 "expected %d\n", ctask->sc->SCp.phase, 724 "iscsi: ctask's session age %d, "
729 session->age); 725 "expected %d\n", ctask->sc->SCp.phase,
726 session->age);
730 return ISCSI_ERR_SESSION_FAILED; 727 return ISCSI_ERR_SESSION_FAILED;
731 } 728 }
732 } 729 }
@@ -771,7 +768,7 @@ static void iscsi_prep_mtask(struct iscsi_conn *conn,
771 */ 768 */
772 nop->cmdsn = cpu_to_be32(session->cmdsn); 769 nop->cmdsn = cpu_to_be32(session->cmdsn);
773 if (hdr->itt != RESERVED_ITT) { 770 if (hdr->itt != RESERVED_ITT) {
774 hdr->itt = build_itt(mtask->itt, conn->id, session->age); 771 hdr->itt = build_itt(mtask->itt, session->age);
775 /* 772 /*
776 * TODO: We always use immediate, so we never hit this. 773 * TODO: We always use immediate, so we never hit this.
777 * If we start to send tmfs or nops as non-immediate then 774 * If we start to send tmfs or nops as non-immediate then
@@ -997,6 +994,7 @@ enum {
997 FAILURE_SESSION_IN_RECOVERY, 994 FAILURE_SESSION_IN_RECOVERY,
998 FAILURE_SESSION_RECOVERY_TIMEOUT, 995 FAILURE_SESSION_RECOVERY_TIMEOUT,
999 FAILURE_SESSION_LOGGING_OUT, 996 FAILURE_SESSION_LOGGING_OUT,
997 FAILURE_SESSION_NOT_READY,
1000}; 998};
1001 999
1002int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) 1000int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
@@ -1017,6 +1015,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1017 session = iscsi_hostdata(host->hostdata); 1015 session = iscsi_hostdata(host->hostdata);
1018 spin_lock(&session->lock); 1016 spin_lock(&session->lock);
1019 1017
1018 reason = iscsi_session_chkready(session_to_cls(session));
1019 if (reason) {
1020 sc->result = reason;
1021 goto fault;
1022 }
1023
1020 /* 1024 /*
1021 * ISCSI_STATE_FAILED is a temp. state. The recovery 1025 * ISCSI_STATE_FAILED is a temp. state. The recovery
1022 * code will decide what is best to do with command queued 1026 * code will decide what is best to do with command queued
@@ -1033,18 +1037,23 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1033 switch (session->state) { 1037 switch (session->state) {
1034 case ISCSI_STATE_IN_RECOVERY: 1038 case ISCSI_STATE_IN_RECOVERY:
1035 reason = FAILURE_SESSION_IN_RECOVERY; 1039 reason = FAILURE_SESSION_IN_RECOVERY;
1036 goto reject; 1040 sc->result = DID_IMM_RETRY << 16;
1041 break;
1037 case ISCSI_STATE_LOGGING_OUT: 1042 case ISCSI_STATE_LOGGING_OUT:
1038 reason = FAILURE_SESSION_LOGGING_OUT; 1043 reason = FAILURE_SESSION_LOGGING_OUT;
1039 goto reject; 1044 sc->result = DID_IMM_RETRY << 16;
1045 break;
1040 case ISCSI_STATE_RECOVERY_FAILED: 1046 case ISCSI_STATE_RECOVERY_FAILED:
1041 reason = FAILURE_SESSION_RECOVERY_TIMEOUT; 1047 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1048 sc->result = DID_NO_CONNECT << 16;
1042 break; 1049 break;
1043 case ISCSI_STATE_TERMINATE: 1050 case ISCSI_STATE_TERMINATE:
1044 reason = FAILURE_SESSION_TERMINATE; 1051 reason = FAILURE_SESSION_TERMINATE;
1052 sc->result = DID_NO_CONNECT << 16;
1045 break; 1053 break;
1046 default: 1054 default:
1047 reason = FAILURE_SESSION_FREED; 1055 reason = FAILURE_SESSION_FREED;
1056 sc->result = DID_NO_CONNECT << 16;
1048 } 1057 }
1049 goto fault; 1058 goto fault;
1050 } 1059 }
@@ -1052,6 +1061,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1052 conn = session->leadconn; 1061 conn = session->leadconn;
1053 if (!conn) { 1062 if (!conn) {
1054 reason = FAILURE_SESSION_FREED; 1063 reason = FAILURE_SESSION_FREED;
1064 sc->result = DID_NO_CONNECT << 16;
1055 goto fault; 1065 goto fault;
1056 } 1066 }
1057 1067
@@ -1091,9 +1101,7 @@ reject:
1091 1101
1092fault: 1102fault:
1093 spin_unlock(&session->lock); 1103 spin_unlock(&session->lock);
1094 printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n", 1104 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
1095 sc->cmnd[0], reason);
1096 sc->result = (DID_NO_CONNECT << 16);
1097 scsi_set_resid(sc, scsi_bufflen(sc)); 1105 scsi_set_resid(sc, scsi_bufflen(sc));
1098 sc->scsi_done(sc); 1106 sc->scsi_done(sc);
1099 spin_lock(host->host_lock); 1107 spin_lock(host->host_lock);
@@ -1160,7 +1168,8 @@ failed:
1160 mutex_lock(&session->eh_mutex); 1168 mutex_lock(&session->eh_mutex);
1161 spin_lock_bh(&session->lock); 1169 spin_lock_bh(&session->lock);
1162 if (session->state == ISCSI_STATE_LOGGED_IN) 1170 if (session->state == ISCSI_STATE_LOGGED_IN)
1163 printk(KERN_INFO "iscsi: host reset succeeded\n"); 1171 iscsi_session_printk(KERN_INFO, session,
1172 "host reset succeeded\n");
1164 else 1173 else
1165 goto failed; 1174 goto failed;
1166 spin_unlock_bh(&session->lock); 1175 spin_unlock_bh(&session->lock);
@@ -1239,7 +1248,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1239 * Fail commands. session lock held and recv side suspended and xmit 1248 * Fail commands. session lock held and recv side suspended and xmit
1240 * thread flushed 1249 * thread flushed
1241 */ 1250 */
1242static void fail_all_commands(struct iscsi_conn *conn, unsigned lun) 1251static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1252 int error)
1243{ 1253{
1244 struct iscsi_cmd_task *ctask, *tmp; 1254 struct iscsi_cmd_task *ctask, *tmp;
1245 1255
@@ -1251,7 +1261,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
1251 if (lun == ctask->sc->device->lun || lun == -1) { 1261 if (lun == ctask->sc->device->lun || lun == -1) {
1252 debug_scsi("failing pending sc %p itt 0x%x\n", 1262 debug_scsi("failing pending sc %p itt 0x%x\n",
1253 ctask->sc, ctask->itt); 1263 ctask->sc, ctask->itt);
1254 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1264 fail_command(conn, ctask, error << 16);
1255 } 1265 }
1256 } 1266 }
1257 1267
@@ -1259,7 +1269,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
1259 if (lun == ctask->sc->device->lun || lun == -1) { 1269 if (lun == ctask->sc->device->lun || lun == -1) {
1260 debug_scsi("failing requeued sc %p itt 0x%x\n", 1270 debug_scsi("failing requeued sc %p itt 0x%x\n",
1261 ctask->sc, ctask->itt); 1271 ctask->sc, ctask->itt);
1262 fail_command(conn, ctask, DID_BUS_BUSY << 16); 1272 fail_command(conn, ctask, error << 16);
1263 } 1273 }
1264 } 1274 }
1265 1275
@@ -1357,10 +1367,10 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1357 last_recv = conn->last_recv; 1367 last_recv = conn->last_recv;
1358 if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ), 1368 if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ),
1359 jiffies)) { 1369 jiffies)) {
1360 printk(KERN_ERR "ping timeout of %d secs expired, " 1370 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1361 "last rx %lu, last ping %lu, now %lu\n", 1371 "expired, last rx %lu, last ping %lu, "
1362 conn->ping_timeout, last_recv, 1372 "now %lu\n", conn->ping_timeout, last_recv,
1363 conn->last_ping, jiffies); 1373 conn->last_ping, jiffies);
1364 spin_unlock(&session->lock); 1374 spin_unlock(&session->lock);
1365 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1375 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1366 return; 1376 return;
@@ -1373,14 +1383,11 @@ static void iscsi_check_transport_timeouts(unsigned long data)
1373 iscsi_send_nopout(conn, NULL); 1383 iscsi_send_nopout(conn, NULL);
1374 } 1384 }
1375 next_timeout = last_recv + timeout + (conn->ping_timeout * HZ); 1385 next_timeout = last_recv + timeout + (conn->ping_timeout * HZ);
1376 } else { 1386 } else
1377 next_timeout = last_recv + timeout; 1387 next_timeout = last_recv + timeout;
1378 }
1379 1388
1380 if (next_timeout) { 1389 debug_scsi("Setting next tmo %lu\n", next_timeout);
1381 debug_scsi("Setting next tmo %lu\n", next_timeout); 1390 mod_timer(&conn->transport_timer, next_timeout);
1382 mod_timer(&conn->transport_timer, next_timeout);
1383 }
1384done: 1391done:
1385 spin_unlock(&session->lock); 1392 spin_unlock(&session->lock);
1386} 1393}
@@ -1573,7 +1580,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1573 /* need to grab the recv lock then session lock */ 1580 /* need to grab the recv lock then session lock */
1574 write_lock_bh(conn->recv_lock); 1581 write_lock_bh(conn->recv_lock);
1575 spin_lock(&session->lock); 1582 spin_lock(&session->lock);
1576 fail_all_commands(conn, sc->device->lun); 1583 fail_all_commands(conn, sc->device->lun, DID_ERROR);
1577 conn->tmf_state = TMF_INITIAL; 1584 conn->tmf_state = TMF_INITIAL;
1578 spin_unlock(&session->lock); 1585 spin_unlock(&session->lock);
1579 write_unlock_bh(conn->recv_lock); 1586 write_unlock_bh(conn->recv_lock);
@@ -1944,9 +1951,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1944 } 1951 }
1945 spin_unlock_irqrestore(session->host->host_lock, flags); 1952 spin_unlock_irqrestore(session->host->host_lock, flags);
1946 msleep_interruptible(500); 1953 msleep_interruptible(500);
1947 printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d " 1954 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
1948 "host_failed %d\n", session->host->host_busy, 1955 "host_busy %d host_failed %d\n",
1949 session->host->host_failed); 1956 session->host->host_busy,
1957 session->host->host_failed);
1950 /* 1958 /*
1951 * force eh_abort() to unblock 1959 * force eh_abort() to unblock
1952 */ 1960 */
@@ -1975,27 +1983,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
1975 struct iscsi_session *session = conn->session; 1983 struct iscsi_session *session = conn->session;
1976 1984
1977 if (!session) { 1985 if (!session) {
1978 printk(KERN_ERR "iscsi: can't start unbound connection\n"); 1986 iscsi_conn_printk(KERN_ERR, conn,
1987 "can't start unbound connection\n");
1979 return -EPERM; 1988 return -EPERM;
1980 } 1989 }
1981 1990
1982 if ((session->imm_data_en || !session->initial_r2t_en) && 1991 if ((session->imm_data_en || !session->initial_r2t_en) &&
1983 session->first_burst > session->max_burst) { 1992 session->first_burst > session->max_burst) {
1984 printk("iscsi: invalid burst lengths: " 1993 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
1985 "first_burst %d max_burst %d\n", 1994 "first_burst %d max_burst %d\n",
1986 session->first_burst, session->max_burst); 1995 session->first_burst, session->max_burst);
1987 return -EINVAL; 1996 return -EINVAL;
1988 } 1997 }
1989 1998
1990 if (conn->ping_timeout && !conn->recv_timeout) { 1999 if (conn->ping_timeout && !conn->recv_timeout) {
1991 printk(KERN_ERR "iscsi: invalid recv timeout of zero " 2000 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
1992 "Using 5 seconds\n."); 2001 "zero. Using 5 seconds\n.");
1993 conn->recv_timeout = 5; 2002 conn->recv_timeout = 5;
1994 } 2003 }
1995 2004
1996 if (conn->recv_timeout && !conn->ping_timeout) { 2005 if (conn->recv_timeout && !conn->ping_timeout) {
1997 printk(KERN_ERR "iscsi: invalid ping timeout of zero " 2006 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
1998 "Using 5 seconds.\n"); 2007 "zero. Using 5 seconds.\n");
1999 conn->ping_timeout = 5; 2008 conn->ping_timeout = 5;
2000 } 2009 }
2001 2010
@@ -2019,11 +2028,9 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2019 conn->stop_stage = 0; 2028 conn->stop_stage = 0;
2020 conn->tmf_state = TMF_INITIAL; 2029 conn->tmf_state = TMF_INITIAL;
2021 session->age++; 2030 session->age++;
2022 spin_unlock_bh(&session->lock); 2031 if (session->age == 16)
2023 2032 session->age = 0;
2024 iscsi_unblock_session(session_to_cls(session)); 2033 break;
2025 wake_up(&conn->ehwait);
2026 return 0;
2027 case STOP_CONN_TERM: 2034 case STOP_CONN_TERM:
2028 conn->stop_stage = 0; 2035 conn->stop_stage = 0;
2029 break; 2036 break;
@@ -2032,6 +2039,8 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2032 } 2039 }
2033 spin_unlock_bh(&session->lock); 2040 spin_unlock_bh(&session->lock);
2034 2041
2042 iscsi_unblock_session(session_to_cls(session));
2043 wake_up(&conn->ehwait);
2035 return 0; 2044 return 0;
2036} 2045}
2037EXPORT_SYMBOL_GPL(iscsi_conn_start); 2046EXPORT_SYMBOL_GPL(iscsi_conn_start);
@@ -2123,7 +2132,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
2123 * flush queues. 2132 * flush queues.
2124 */ 2133 */
2125 spin_lock_bh(&session->lock); 2134 spin_lock_bh(&session->lock);
2126 fail_all_commands(conn, -1); 2135 fail_all_commands(conn, -1,
2136 STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
2127 flush_control_queues(session, conn); 2137 flush_control_queues(session, conn);
2128 spin_unlock_bh(&session->lock); 2138 spin_unlock_bh(&session->lock);
2129 mutex_unlock(&session->eh_mutex); 2139 mutex_unlock(&session->eh_mutex);
@@ -2140,7 +2150,8 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
2140 iscsi_start_session_recovery(session, conn, flag); 2150 iscsi_start_session_recovery(session, conn, flag);
2141 break; 2151 break;
2142 default: 2152 default:
2143 printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag); 2153 iscsi_conn_printk(KERN_ERR, conn,
2154 "invalid stop flag %d\n", flag);
2144 } 2155 }
2145} 2156}
2146EXPORT_SYMBOL_GPL(iscsi_conn_stop); 2157EXPORT_SYMBOL_GPL(iscsi_conn_stop);
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
deleted file mode 100644
index bcb49021b7e2..000000000000
--- a/drivers/scsi/mac_esp.c
+++ /dev/null
@@ -1,751 +0,0 @@
1/*
2 * 68k mac 53c9[46] scsi driver
3 *
4 * copyright (c) 1998, David Weis weisd3458@uni.edu
5 *
6 * debugging on Quadra 800 and 660AV Michael Schmitz, Dave Kilzer 7/98
7 *
8 * based loosely on cyber_esp.c
9 */
10
11/* these are unused for now */
12#define myreadl(addr) (*(volatile unsigned int *) (addr))
13#define mywritel(b, addr) ((*(volatile unsigned int *) (addr)) = (b))
14
15
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/types.h>
19#include <linux/ctype.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/blkdev.h>
23#include <linux/proc_fs.h>
24#include <linux/stat.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27
28#include "scsi.h"
29#include <scsi/scsi_host.h>
30#include "NCR53C9x.h"
31
32#include <asm/io.h>
33
34#include <asm/setup.h>
35#include <asm/irq.h>
36#include <asm/macints.h>
37#include <asm/machw.h>
38#include <asm/mac_via.h>
39
40#include <asm/pgtable.h>
41
42#include <asm/macintosh.h>
43
44/* #define DEBUG_MAC_ESP */
45
46extern void esp_handle(struct NCR_ESP *esp);
47extern void mac_esp_intr(int irq, void *dev_id);
48
49static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count);
50static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd *sp);
51static void dma_dump_state(struct NCR_ESP * esp);
52static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length);
53static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length);
54static void dma_ints_off(struct NCR_ESP * esp);
55static void dma_ints_on(struct NCR_ESP * esp);
56static int dma_irq_p(struct NCR_ESP * esp);
57static int dma_irq_p_quick(struct NCR_ESP * esp);
58static void dma_led_off(struct NCR_ESP * esp);
59static void dma_led_on(struct NCR_ESP *esp);
60static int dma_ports_p(struct NCR_ESP *esp);
61static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write);
62static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write);
63
64static int esp_dafb_dma_irq_p(struct NCR_ESP * espdev);
65static int esp_iosb_dma_irq_p(struct NCR_ESP * espdev);
66
67static volatile unsigned char cmd_buffer[16];
68 /* This is where all commands are put
69 * before they are transferred to the ESP chip
70 * via PIO.
71 */
72
73static int esp_initialized = 0;
74
75static int setup_num_esps = -1;
76static int setup_disconnect = -1;
77static int setup_nosync = -1;
78static int setup_can_queue = -1;
79static int setup_cmd_per_lun = -1;
80static int setup_sg_tablesize = -1;
81#ifdef SUPPORT_TAGS
82static int setup_use_tagged_queuing = -1;
83#endif
84static int setup_hostid = -1;
85
86/*
87 * Experimental ESP inthandler; check macints.c to make sure dev_id is
88 * set up properly!
89 */
90
91void mac_esp_intr(int irq, void *dev_id)
92{
93 struct NCR_ESP *esp = (struct NCR_ESP *) dev_id;
94 int irq_p = 0;
95
96 /* Handle the one ESP interrupt showing at this IRQ level. */
97 if(((esp)->irq & 0xff) == irq) {
98 /*
99 * Debug ..
100 */
101 irq_p = esp->dma_irq_p(esp);
102 printk("mac_esp: irq_p %x current %p disconnected %p\n",
103 irq_p, esp->current_SC, esp->disconnected_SC);
104
105 /*
106 * Mac: if we're here, it's an ESP interrupt for sure!
107 */
108 if((esp->current_SC || esp->disconnected_SC)) {
109 esp->dma_ints_off(esp);
110
111 ESPIRQ(("I%d(", esp->esp_id));
112 esp_handle(esp);
113 ESPIRQ((")"));
114
115 esp->dma_ints_on(esp);
116 }
117 }
118}
119
120/*
121 * Debug hooks; use for playing with the interrupt flag testing and interrupt
122 * acknowledge on the various machines
123 */
124
125void scsi_esp_polled(int irq, void *dev_id)
126{
127 if (esp_initialized == 0)
128 return;
129
130 mac_esp_intr(irq, dev_id);
131}
132
133void fake_intr(int irq, void *dev_id)
134{
135#ifdef DEBUG_MAC_ESP
136 printk("mac_esp: got irq\n");
137#endif
138
139 mac_esp_intr(irq, dev_id);
140}
141
142irqreturn_t fake_drq(int irq, void *dev_id)
143{
144 printk("mac_esp: got drq\n");
145 return IRQ_HANDLED;
146}
147
148#define DRIVER_SETUP
149
150/*
151 * Function : mac_esp_setup(char *str)
152 *
153 * Purpose : booter command line initialization of the overrides array,
154 *
155 * Inputs : str - parameters, separated by commas.
156 *
157 * Currently unused in the new driver; need to add settable parameters to the
158 * detect function.
159 *
160 */
161
162static int __init mac_esp_setup(char *str) {
163#ifdef DRIVER_SETUP
164 /* Format of mac53c9x parameter is:
165 * mac53c9x=<num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
166 * Negative values mean don't change.
167 */
168
169 char *this_opt;
170 long opt;
171
172 this_opt = strsep (&str, ",");
173 if(this_opt) {
174 opt = simple_strtol( this_opt, NULL, 0 );
175
176 if (opt >= 0 && opt <= 2)
177 setup_num_esps = opt;
178 else if (opt > 2)
179 printk( "mac_esp_setup: invalid number of hosts %ld !\n", opt );
180
181 this_opt = strsep (&str, ",");
182 }
183 if(this_opt) {
184 opt = simple_strtol( this_opt, NULL, 0 );
185
186 if (opt > 0)
187 setup_disconnect = opt;
188
189 this_opt = strsep (&str, ",");
190 }
191 if(this_opt) {
192 opt = simple_strtol( this_opt, NULL, 0 );
193
194 if (opt >= 0)
195 setup_nosync = opt;
196
197 this_opt = strsep (&str, ",");
198 }
199 if(this_opt) {
200 opt = simple_strtol( this_opt, NULL, 0 );
201
202 if (opt > 0)
203 setup_can_queue = opt;
204
205 this_opt = strsep (&str, ",");
206 }
207 if(this_opt) {
208 opt = simple_strtol( this_opt, NULL, 0 );
209
210 if (opt > 0)
211 setup_cmd_per_lun = opt;
212
213 this_opt = strsep (&str, ",");
214 }
215 if(this_opt) {
216 opt = simple_strtol( this_opt, NULL, 0 );
217
218 if (opt >= 0) {
219 setup_sg_tablesize = opt;
220 /* Must be <= SG_ALL (255) */
221 if (setup_sg_tablesize > SG_ALL)
222 setup_sg_tablesize = SG_ALL;
223 }
224
225 this_opt = strsep (&str, ",");
226 }
227 if(this_opt) {
228 opt = simple_strtol( this_opt, NULL, 0 );
229
230 /* Must be between 0 and 7 */
231 if (opt >= 0 && opt <= 7)
232 setup_hostid = opt;
233 else if (opt > 7)
234 printk( "mac_esp_setup: invalid host ID %ld !\n", opt);
235
236 this_opt = strsep (&str, ",");
237 }
238#ifdef SUPPORT_TAGS
239 if(this_opt) {
240 opt = simple_strtol( this_opt, NULL, 0 );
241 if (opt >= 0)
242 setup_use_tagged_queuing = !!opt;
243 }
244#endif
245#endif
246 return 1;
247}
248
249__setup("mac53c9x=", mac_esp_setup);
250
251
252/*
253 * ESP address 'detection'
254 */
255
256unsigned long get_base(int chip_num)
257{
258 /*
259 * using the chip_num and mac model, figure out where the
260 * chips are mapped
261 */
262
263 unsigned long io_base = 0x50f00000;
264 unsigned int second_offset = 0x402;
265 unsigned long scsi_loc = 0;
266
267 switch (macintosh_config->scsi_type) {
268
269 /* 950, 900, 700 */
270 case MAC_SCSI_QUADRA2:
271 scsi_loc = io_base + 0xf000 + ((chip_num == 0) ? 0 : second_offset);
272 break;
273
274 /* av's */
275 case MAC_SCSI_QUADRA3:
276 scsi_loc = io_base + 0x18000 + ((chip_num == 0) ? 0 : second_offset);
277 break;
278
279 /* most quadra/centris models are like this */
280 case MAC_SCSI_QUADRA:
281 scsi_loc = io_base + 0x10000;
282 break;
283
284 default:
285 printk("mac_esp: get_base: hit default!\n");
286 scsi_loc = io_base + 0x10000;
287 break;
288
289 } /* switch */
290
291 printk("mac_esp: io base at 0x%lx\n", scsi_loc);
292
293 return scsi_loc;
294}
295
296/*
297 * Model dependent ESP setup
298 */
299
300int mac_esp_detect(struct scsi_host_template * tpnt)
301{
302 int quick = 0;
303 int chipnum, chipspresent = 0;
304#if 0
305 unsigned long timeout;
306#endif
307
308 if (esp_initialized > 0)
309 return -ENODEV;
310
311 /* what do we have in this machine... */
312 if (MACHW_PRESENT(MAC_SCSI_96)) {
313 chipspresent ++;
314 }
315
316 if (MACHW_PRESENT(MAC_SCSI_96_2)) {
317 chipspresent ++;
318 }
319
320 /* number of ESPs present ? */
321 if (setup_num_esps >= 0) {
322 if (chipspresent >= setup_num_esps)
323 chipspresent = setup_num_esps;
324 else
325 printk("mac_esp_detect: num_hosts detected %d setup %d \n",
326 chipspresent, setup_num_esps);
327 }
328
329 /* TODO: add disconnect / nosync flags */
330
331 /* setup variables */
332 tpnt->can_queue =
333 (setup_can_queue > 0) ? setup_can_queue : 7;
334 tpnt->cmd_per_lun =
335 (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : 1;
336 tpnt->sg_tablesize =
337 (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_ALL;
338
339 if (setup_hostid >= 0)
340 tpnt->this_id = setup_hostid;
341 else {
342 /* use 7 as default */
343 tpnt->this_id = 7;
344 }
345
346#ifdef SUPPORT_TAGS
347 if (setup_use_tagged_queuing < 0)
348 setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING;
349#endif
350
351 for (chipnum = 0; chipnum < chipspresent; chipnum ++) {
352 struct NCR_ESP * esp;
353
354 esp = esp_allocate(tpnt, NULL, 0);
355 esp->eregs = (struct ESP_regs *) get_base(chipnum);
356
357 esp->dma_irq_p = &esp_dafb_dma_irq_p;
358 if (chipnum == 0) {
359
360 if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
361 /* most machines except those below :-) */
362 quick = 1;
363 esp->dma_irq_p = &esp_iosb_dma_irq_p;
364 } else if (macintosh_config->scsi_type == MAC_SCSI_QUADRA3) {
365 /* mostly av's */
366 quick = 0;
367 } else {
368 /* q950, 900, 700 */
369 quick = 1;
370 out_be32(0xf9800024, 0x1d1);
371 esp->dregs = (void *) 0xf9800024;
372 }
373
374 } else { /* chipnum */
375
376 quick = 1;
377 out_be32(0xf9800028, 0x1d1);
378 esp->dregs = (void *) 0xf9800028;
379
380 } /* chipnum == 0 */
381
382 /* use pio for command bytes; pio for message/data: TBI */
383 esp->do_pio_cmds = 1;
384
385 /* Set the command buffer */
386 esp->esp_command = (volatile unsigned char*) cmd_buffer;
387 esp->esp_command_dvma = (__u32) cmd_buffer;
388
389 /* various functions */
390 esp->dma_bytes_sent = &dma_bytes_sent;
391 esp->dma_can_transfer = &dma_can_transfer;
392 esp->dma_dump_state = &dma_dump_state;
393 esp->dma_init_read = NULL;
394 esp->dma_init_write = NULL;
395 esp->dma_ints_off = &dma_ints_off;
396 esp->dma_ints_on = &dma_ints_on;
397
398 esp->dma_ports_p = &dma_ports_p;
399
400
401 /* Optional functions */
402 esp->dma_barrier = NULL;
403 esp->dma_drain = NULL;
404 esp->dma_invalidate = NULL;
405 esp->dma_irq_entry = NULL;
406 esp->dma_irq_exit = NULL;
407 esp->dma_led_on = NULL;
408 esp->dma_led_off = NULL;
409 esp->dma_poll = NULL;
410 esp->dma_reset = NULL;
411
412 /* SCSI chip speed */
413 /* below esp->cfreq = 40000000; */
414
415
416 if (quick) {
417 /* 'quick' means there's handshake glue logic like in the 5380 case */
418 esp->dma_setup = &dma_setup_quick;
419 } else {
420 esp->dma_setup = &dma_setup;
421 }
422
423 if (chipnum == 0) {
424
425 esp->irq = IRQ_MAC_SCSI;
426
427 request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp->ehost);
428#if 0 /* conflicts with IOP ADB */
429 request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp->ehost);
430#endif
431
432 if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
433 esp->cfreq = 16500000;
434 } else {
435 esp->cfreq = 25000000;
436 }
437
438
439 } else { /* chipnum == 1 */
440
441 esp->irq = IRQ_MAC_SCSIDRQ;
442#if 0 /* conflicts with IOP ADB */
443 request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp->ehost);
444#endif
445
446 esp->cfreq = 25000000;
447
448 }
449
450 if (quick) {
451 printk("esp: using quick version\n");
452 }
453
454 printk("esp: addr at 0x%p\n", esp->eregs);
455
456 esp->scsi_id = 7;
457 esp->diff = 0;
458
459 esp_initialize(esp);
460
461 } /* for chipnum */
462
463 if (chipspresent)
464 printk("\nmac_esp: %d esp controllers found\n", chipspresent);
465
466 esp_initialized = chipspresent;
467
468 return chipspresent;
469}
470
471static int mac_esp_release(struct Scsi_Host *shost)
472{
473 if (shost->irq)
474 free_irq(shost->irq, NULL);
475 if (shost->io_port && shost->n_io_port)
476 release_region(shost->io_port, shost->n_io_port);
477 scsi_unregister(shost);
478 return 0;
479}
480
481/*
482 * I've been wondering what this is supposed to do, for some time. Talking
483 * to Allen Briggs: These machines have an extra register someplace where the
484 * DRQ pin of the ESP can be monitored. That isn't useful for determining
485 * anything else (such as reselect interrupt or other magic) though.
486 * Maybe make the semantics should be changed like
487 * if (esp->current_SC)
488 * ... check DRQ flag ...
489 * else
490 * ... disconnected, check pending VIA interrupt ...
491 *
492 * There's a problem with using the dabf flag or mac_irq_pending() here: both
493 * seem to return 1 even though no interrupt is currently pending, resulting
494 * in esp_exec_cmd() holding off the next command, and possibly infinite loops
495 * in esp_intr().
496 * Short term fix: just use esp_status & ESP_STAT_INTR here, as long as we
497 * use simple PIO. The DRQ status will be important when implementing pseudo
498 * DMA mode (set up ESP transfer count, return, do a batch of bytes in PIO or
499 * 'hardware handshake' mode upon DRQ).
500 * If you plan on changing this (i.e. to save the esp_status register access in
501 * favor of a VIA register access or a shadow register for the IFR), make sure
502 * to try a debug version of this first to monitor what registers would be a good
503 * indicator of the ESP interrupt.
504 */
505
506static int esp_dafb_dma_irq_p(struct NCR_ESP * esp)
507{
508 unsigned int ret;
509 int sreg = esp_read(esp->eregs->esp_status);
510
511#ifdef DEBUG_MAC_ESP
512 printk("mac_esp: esp_dafb_dma_irq_p dafb %d irq %d\n",
513 readl(esp->dregs), mac_irq_pending(IRQ_MAC_SCSI));
514#endif
515
516 sreg &= ESP_STAT_INTR;
517
518 /*
519 * maybe working; this is essentially what's used for iosb_dma_irq_p
520 */
521 if (sreg)
522 return 1;
523 else
524 return 0;
525
526 /*
527 * didn't work ...
528 */
529#if 0
530 if (esp->current_SC)
531 ret = readl(esp->dregs) & 0x200;
532 else if (esp->disconnected_SC)
533 ret = 1; /* sreg ?? */
534 else
535 ret = mac_irq_pending(IRQ_MAC_SCSI);
536
537 return(ret);
538#endif
539
540}
541
542/*
543 * See above: testing mac_irq_pending always returned 8 (SCSI IRQ) regardless
544 * of the actual ESP status.
545 */
546
547static int esp_iosb_dma_irq_p(struct NCR_ESP * esp)
548{
549 int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ);
550 int sreg = esp_read(esp->eregs->esp_status);
551
552#ifdef DEBUG_MAC_ESP
553 printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n",
554 mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI),
555 sreg, esp->current_SC, esp->disconnected_SC);
556#endif
557
558 sreg &= ESP_STAT_INTR;
559
560 if (sreg)
561 return (sreg);
562 else
563 return 0;
564}
565
566/*
567 * This seems to be OK for PIO at least ... usually 0 after PIO.
568 */
569
570static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count)
571{
572
573#ifdef DEBUG_MAC_ESP
574 printk("mac_esp: dma bytes sent = %x\n", fifo_count);
575#endif
576
577 return fifo_count;
578}
579
580/*
581 * dma_can_transfer is used to switch between DMA and PIO, if DMA (pseudo)
582 * is ever implemented. Returning 0 here will use PIO.
583 */
584
585static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd * sp)
586{
587 unsigned long sz = sp->SCp.this_residual;
588#if 0 /* no DMA yet; make conditional */
589 if (sz > 0x10000000) {
590 sz = 0x10000000;
591 }
592 printk("mac_esp: dma can transfer = 0lx%x\n", sz);
593#else
594
595#ifdef DEBUG_MAC_ESP
596 printk("mac_esp: pio to transfer = %ld\n", sz);
597#endif
598
599 sz = 0;
600#endif
601 return sz;
602}
603
604/*
605 * Not yet ...
606 */
607
608static void dma_dump_state(struct NCR_ESP * esp)
609{
610#ifdef DEBUG_MAC_ESP
611 printk("mac_esp: dma_dump_state: called\n");
612#endif
613#if 0
614 ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
615 esp->esp_id, ((struct mac_dma_registers *)
616 (esp->dregs))->cond_reg));
617#endif
618}
619
620/*
621 * DMA setup: should be used to set up the ESP transfer count for pseudo
622 * DMA transfers; need a DRQ transfer function to do the actual transfer
623 */
624
625static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length)
626{
627 printk("mac_esp: dma_init_read\n");
628}
629
630
631static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length)
632{
633 printk("mac_esp: dma_init_write\n");
634}
635
636
637static void dma_ints_off(struct NCR_ESP * esp)
638{
639 disable_irq(esp->irq);
640}
641
642
643static void dma_ints_on(struct NCR_ESP * esp)
644{
645 enable_irq(esp->irq);
646}
647
648/*
649 * generic dma_irq_p(), unused
650 */
651
652static int dma_irq_p(struct NCR_ESP * esp)
653{
654 int i = esp_read(esp->eregs->esp_status);
655
656#ifdef DEBUG_MAC_ESP
657 printk("mac_esp: dma_irq_p status %d\n", i);
658#endif
659
660 return (i & ESP_STAT_INTR);
661}
662
663static int dma_irq_p_quick(struct NCR_ESP * esp)
664{
665 /*
666 * Copied from iosb_dma_irq_p()
667 */
668 int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ);
669 int sreg = esp_read(esp->eregs->esp_status);
670
671#ifdef DEBUG_MAC_ESP
672 printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n",
673 mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI),
674 sreg, esp->current_SC, esp->disconnected_SC);
675#endif
676
677 sreg &= ESP_STAT_INTR;
678
679 if (sreg)
680 return (sreg);
681 else
682 return 0;
683
684}
685
686static void dma_led_off(struct NCR_ESP * esp)
687{
688#ifdef DEBUG_MAC_ESP
689 printk("mac_esp: dma_led_off: called\n");
690#endif
691}
692
693
694static void dma_led_on(struct NCR_ESP * esp)
695{
696#ifdef DEBUG_MAC_ESP
697 printk("mac_esp: dma_led_on: called\n");
698#endif
699}
700
701
702static int dma_ports_p(struct NCR_ESP * esp)
703{
704 return 0;
705}
706
707
708static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write)
709{
710
711#ifdef DEBUG_MAC_ESP
712 printk("mac_esp: dma_setup\n");
713#endif
714
715 if (write) {
716 dma_init_read(esp, (char *) addr, count);
717 } else {
718 dma_init_write(esp, (char *) addr, count);
719 }
720}
721
722
723static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write)
724{
725#ifdef DEBUG_MAC_ESP
726 printk("mac_esp: dma_setup_quick\n");
727#endif
728}
729
730static struct scsi_host_template driver_template = {
731 .proc_name = "mac_esp",
732 .name = "Mac 53C9x SCSI",
733 .detect = mac_esp_detect,
734 .slave_alloc = esp_slave_alloc,
735 .slave_destroy = esp_slave_destroy,
736 .release = mac_esp_release,
737 .info = esp_info,
738 .queuecommand = esp_queue,
739 .eh_abort_handler = esp_abort,
740 .eh_bus_reset_handler = esp_reset,
741 .can_queue = 7,
742 .this_id = 7,
743 .sg_tablesize = SG_ALL,
744 .cmd_per_lun = 1,
745 .use_clustering = DISABLE_CLUSTERING
746};
747
748
749#include "scsi_module.c"
750
751MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/mca_53c9x.c b/drivers/scsi/mca_53c9x.c
deleted file mode 100644
index d693d0f21395..000000000000
--- a/drivers/scsi/mca_53c9x.c
+++ /dev/null
@@ -1,520 +0,0 @@
1/* mca_53c9x.c: Driver for the SCSI adapter found on NCR 35xx
2 * (and maybe some other) Microchannel machines
3 *
4 * Code taken mostly from Cyberstorm SCSI drivers
5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
6 *
7 * Hacked to work with the NCR MCA stuff by Tymm Twillman (tymm@computer.org)
8 *
9 * The CyberStorm SCSI driver (and this driver) is based on David S. Miller's
10 * ESP driver * for the Sparc computers.
11 *
12 * Special thanks to Ken Stewart at Symbios (LSI) for helping with info on
13 * the 86C01. I was on the brink of going ga-ga...
14 *
15 * Also thanks to Jesper Skov for helping me with info on how the Amiga
16 * does things...
17 */
18
19/*
20 * This is currently only set up to use one 53c9x card at a time; it could be
21 * changed fairly easily to detect/use more than one, but I'm not too sure how
22 * many cards that use the 53c9x on MCA systems there are (if, in fact, there
23 * are cards that use them, other than the one built into some NCR systems)...
24 * If anyone requests this, I'll throw it in, otherwise it's not worth the
25 * effort.
26 */
27
28/*
29 * Info on the 86C01 MCA interface chip at the bottom, if you care enough to
30 * look.
31 */
32
33#include <linux/delay.h>
34#include <linux/interrupt.h>
35#include <linux/kernel.h>
36#include <linux/mca.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/slab.h>
40#include <linux/blkdev.h>
41#include <linux/proc_fs.h>
42#include <linux/stat.h>
43#include <linux/mca-legacy.h>
44
45#include "scsi.h"
46#include <scsi/scsi_host.h>
47#include "NCR53C9x.h"
48
49#include <asm/dma.h>
50#include <asm/irq.h>
51#include <asm/mca_dma.h>
52#include <asm/pgtable.h>
53
54/*
55 * From ibmmca.c (IBM scsi controller card driver) -- used for turning PS2 disk
56 * activity LED on and off
57 */
58
59#define PS2_SYS_CTR 0x92
60
61/* Ports the ncr's 53c94 can be put at; indexed by pos register value */
62
63#define MCA_53C9X_IO_PORTS { \
64 0x0000, 0x0240, 0x0340, 0x0400, \
65 0x0420, 0x3240, 0x8240, 0xA240, \
66 }
67
68/*
69 * Supposedly there were some cards put together with the 'c9x and 86c01. If
70 * they have different ID's from the ones on the 3500 series machines,
71 * you can add them here and hopefully things will work out.
72 */
73
74#define MCA_53C9X_IDS { \
75 0x7F4C, \
76 0x0000, \
77 }
78
79static int dma_bytes_sent(struct NCR_ESP *, int);
80static int dma_can_transfer(struct NCR_ESP *, Scsi_Cmnd *);
81static void dma_dump_state(struct NCR_ESP *);
82static void dma_init_read(struct NCR_ESP *, __u32, int);
83static void dma_init_write(struct NCR_ESP *, __u32, int);
84static void dma_ints_off(struct NCR_ESP *);
85static void dma_ints_on(struct NCR_ESP *);
86static int dma_irq_p(struct NCR_ESP *);
87static int dma_ports_p(struct NCR_ESP *);
88static void dma_setup(struct NCR_ESP *, __u32, int, int);
89static void dma_led_on(struct NCR_ESP *);
90static void dma_led_off(struct NCR_ESP *);
91
92/* This is where all commands are put before they are trasfered to the
93 * 53c9x via PIO.
94 */
95
96static volatile unsigned char cmd_buffer[16];
97
98/*
99 * We keep the structure that is used to access the registers on the 53c9x
100 * here.
101 */
102
103static struct ESP_regs eregs;
104
105/***************************************************************** Detection */
106static int mca_esp_detect(struct scsi_host_template *tpnt)
107{
108 struct NCR_ESP *esp;
109 static int io_port_by_pos[] = MCA_53C9X_IO_PORTS;
110 int mca_53c9x_ids[] = MCA_53C9X_IDS;
111 int *id_to_check = mca_53c9x_ids;
112 int slot;
113 int pos[3];
114 unsigned int tmp_io_addr;
115 unsigned char tmp_byte;
116
117
118 if (!MCA_bus)
119 return 0;
120
121 while (*id_to_check) {
122 if ((slot = mca_find_adapter(*id_to_check, 0)) !=
123 MCA_NOTFOUND)
124 {
125 esp = esp_allocate(tpnt, NULL, 0);
126
127 pos[0] = mca_read_stored_pos(slot, 2);
128 pos[1] = mca_read_stored_pos(slot, 3);
129 pos[2] = mca_read_stored_pos(slot, 4);
130
131 esp->eregs = &eregs;
132
133 /*
134 * IO port base is given in the first (non-ID) pos
135 * register, like so:
136 *
137 * Bits 3 2 1 IO base
138 * ----------------------------
139 * 0 0 0 <disabled>
140 * 0 0 1 0x0240
141 * 0 1 0 0x0340
142 * 0 1 1 0x0400
143 * 1 0 0 0x0420
144 * 1 0 1 0x3240
145 * 1 1 0 0x8240
146 * 1 1 1 0xA240
147 */
148
149 tmp_io_addr =
150 io_port_by_pos[(pos[0] & 0x0E) >> 1];
151
152 esp->eregs->io_addr = tmp_io_addr + 0x10;
153
154 if (esp->eregs->io_addr == 0x0000) {
155 printk("Adapter is disabled.\n");
156 break;
157 }
158
159 /*
160 * IRQ is specified in bits 4 and 5:
161 *
162 * Bits 4 5 IRQ
163 * -----------------------
164 * 0 0 3
165 * 0 1 5
166 * 1 0 7
167 * 1 1 9
168 */
169
170 esp->irq = ((pos[0] & 0x30) >> 3) + 3;
171
172 /*
173 * DMA channel is in the low 3 bits of the second
174 * POS register
175 */
176
177 esp->dma = pos[1] & 7;
178 esp->slot = slot;
179
180 if (request_irq(esp->irq, esp_intr, 0,
181 "NCR 53c9x SCSI", esp->ehost))
182 {
183 printk("Unable to request IRQ %d.\n", esp->irq);
184 esp_deallocate(esp);
185 scsi_unregister(esp->ehost);
186 return 0;
187 }
188
189 if (request_dma(esp->dma, "NCR 53c9x SCSI")) {
190 printk("Unable to request DMA channel %d.\n",
191 esp->dma);
192 free_irq(esp->irq, esp_intr);
193 esp_deallocate(esp);
194 scsi_unregister(esp->ehost);
195 return 0;
196 }
197
198 request_region(tmp_io_addr, 32, "NCR 53c9x SCSI");
199
200 /*
201 * 86C01 handles DMA, IO mode, from address
202 * (base + 0x0a)
203 */
204
205 mca_disable_dma(esp->dma);
206 mca_set_dma_io(esp->dma, tmp_io_addr + 0x0a);
207 mca_enable_dma(esp->dma);
208
209 /* Tell the 86C01 to give us interrupts */
210
211 tmp_byte = inb(tmp_io_addr + 0x02) | 0x40;
212 outb(tmp_byte, tmp_io_addr + 0x02);
213
214 /*
215 * Scsi ID -- general purpose register, hi
216 * 2 bits; add 4 to this number to get the
217 * ID
218 */
219
220 esp->scsi_id = ((pos[2] & 0xC0) >> 6) + 4;
221
222 /* Do command transfer with programmed I/O */
223
224 esp->do_pio_cmds = 1;
225
226 /* Required functions */
227
228 esp->dma_bytes_sent = &dma_bytes_sent;
229 esp->dma_can_transfer = &dma_can_transfer;
230 esp->dma_dump_state = &dma_dump_state;
231 esp->dma_init_read = &dma_init_read;
232 esp->dma_init_write = &dma_init_write;
233 esp->dma_ints_off = &dma_ints_off;
234 esp->dma_ints_on = &dma_ints_on;
235 esp->dma_irq_p = &dma_irq_p;
236 esp->dma_ports_p = &dma_ports_p;
237 esp->dma_setup = &dma_setup;
238
239 /* Optional functions */
240
241 esp->dma_barrier = NULL;
242 esp->dma_drain = NULL;
243 esp->dma_invalidate = NULL;
244 esp->dma_irq_entry = NULL;
245 esp->dma_irq_exit = NULL;
246 esp->dma_led_on = dma_led_on;
247 esp->dma_led_off = dma_led_off;
248 esp->dma_poll = NULL;
249 esp->dma_reset = NULL;
250
251 /* Set the command buffer */
252
253 esp->esp_command = (volatile unsigned char*)
254 cmd_buffer;
255 esp->esp_command_dvma = isa_virt_to_bus(cmd_buffer);
256
257 /* SCSI chip speed */
258
259 esp->cfreq = 25000000;
260
261 /* Differential SCSI? I think not. */
262
263 esp->diff = 0;
264
265 esp_initialize(esp);
266
267 printk(" Adapter found in slot %2d: io port 0x%x "
268 "irq %d dma channel %d\n", slot + 1, tmp_io_addr,
269 esp->irq, esp->dma);
270
271 mca_set_adapter_name(slot, "NCR 53C9X SCSI Adapter");
272 mca_mark_as_used(slot);
273
274 break;
275 }
276
277 id_to_check++;
278 }
279
280 return esps_in_use;
281}
282
283
284/******************************************************************* Release */
285
286static int mca_esp_release(struct Scsi_Host *host)
287{
288 struct NCR_ESP *esp = (struct NCR_ESP *)host->hostdata;
289 unsigned char tmp_byte;
290
291 esp_deallocate(esp);
292 /*
293 * Tell the 86C01 to stop sending interrupts
294 */
295
296 tmp_byte = inb(esp->eregs->io_addr - 0x0E);
297 tmp_byte &= ~0x40;
298 outb(tmp_byte, esp->eregs->io_addr - 0x0E);
299
300 free_irq(esp->irq, esp_intr);
301 free_dma(esp->dma);
302
303 mca_mark_as_unused(esp->slot);
304
305 return 0;
306}
307
308/************************************************************* DMA Functions */
309static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
310{
311 /* Ask the 53c9x. It knows. */
312
313 return fifo_count;
314}
315
316static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
317{
318 /*
319 * The MCA dma channels can only do up to 128K bytes at a time.
320 * (16 bit mode)
321 */
322
323 unsigned long sz = sp->SCp.this_residual;
324 if(sz > 0x20000)
325 sz = 0x20000;
326 return sz;
327}
328
329static void dma_dump_state(struct NCR_ESP *esp)
330{
331 /*
332 * Doesn't quite match up to the other drivers, but we do what we
333 * can.
334 */
335
336 ESPLOG(("esp%d: dma channel <%d>\n", esp->esp_id, esp->dma));
337 ESPLOG(("bytes left to dma: %d\n", mca_get_dma_residue(esp->dma)));
338}
339
340static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
341{
342 unsigned long flags;
343
344
345 save_flags(flags);
346 cli();
347
348 mca_disable_dma(esp->dma);
349 mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_16 |
350 MCA_DMA_MODE_IO);
351 mca_set_dma_addr(esp->dma, addr);
352 mca_set_dma_count(esp->dma, length / 2); /* !!! */
353 mca_enable_dma(esp->dma);
354
355 restore_flags(flags);
356}
357
358static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
359{
360 unsigned long flags;
361
362
363 save_flags(flags);
364 cli();
365
366 mca_disable_dma(esp->dma);
367 mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_WRITE |
368 MCA_DMA_MODE_16 | MCA_DMA_MODE_IO);
369 mca_set_dma_addr(esp->dma, addr);
370 mca_set_dma_count(esp->dma, length / 2); /* !!! */
371 mca_enable_dma(esp->dma);
372
373 restore_flags(flags);
374}
375
376static void dma_ints_off(struct NCR_ESP *esp)
377{
378 /*
379 * Tell the 'C01 to shut up. All interrupts are routed through it.
380 */
381
382 outb(inb(esp->eregs->io_addr - 0x0E) & ~0x40,
383 esp->eregs->io_addr - 0x0E);
384}
385
386static void dma_ints_on(struct NCR_ESP *esp)
387{
388 /*
389 * Ok. You can speak again.
390 */
391
392 outb(inb(esp->eregs->io_addr - 0x0E) | 0x40,
393 esp->eregs->io_addr - 0x0E);
394}
395
396static int dma_irq_p(struct NCR_ESP *esp)
397{
398 /*
399 * DaveM says that this should return a "yes" if there is an interrupt
400 * or a DMA error occurred. I copied the Amiga driver's semantics,
401 * though, because it seems to work and we can't really tell if
402 * a DMA error happened. This gives the "yes" if the scsi chip
403 * is sending an interrupt and no DMA activity is taking place
404 */
405
406 return (!(inb(esp->eregs->io_addr - 0x04) & 1) &&
407 !(inb(esp->eregs->io_addr - 0x04) & 2) );
408}
409
410static int dma_ports_p(struct NCR_ESP *esp)
411{
412 /*
413 * Check to see if interrupts are enabled on the 'C01 (in case abort
414 * is entered multiple times, so we only do the abort once)
415 */
416
417 return (inb(esp->eregs->io_addr - 0x0E) & 0x40) ? 1:0;
418}
419
420static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
421{
422 if(write){
423 dma_init_write(esp, addr, count);
424 } else {
425 dma_init_read(esp, addr, count);
426 }
427}
428
429/*
430 * These will not play nicely with other disk controllers that try to use the
431 * disk active LED... but what can you do? Don't answer that.
432 *
433 * Stolen shamelessly from ibmmca.c -- IBM Microchannel SCSI adapter driver
434 *
435 */
436
437static void dma_led_on(struct NCR_ESP *esp)
438{
439 outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR);
440}
441
442static void dma_led_off(struct NCR_ESP *esp)
443{
444 outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR);
445}
446
447static struct scsi_host_template driver_template = {
448 .proc_name = "mca_53c9x",
449 .name = "NCR 53c9x SCSI",
450 .detect = mca_esp_detect,
451 .slave_alloc = esp_slave_alloc,
452 .slave_destroy = esp_slave_destroy,
453 .release = mca_esp_release,
454 .queuecommand = esp_queue,
455 .eh_abort_handler = esp_abort,
456 .eh_bus_reset_handler = esp_reset,
457 .can_queue = 7,
458 .sg_tablesize = SG_ALL,
459 .cmd_per_lun = 1,
460 .unchecked_isa_dma = 1,
461 .use_clustering = DISABLE_CLUSTERING
462};
463
464
465#include "scsi_module.c"
466
467/*
468 * OK, here's the goods I promised. The NCR 86C01 is an MCA interface chip
469 * that handles enabling/diabling IRQ, dma interfacing, IO port selection
470 * and other fun stuff. It takes up 16 addresses, and the chip it is
471 * connnected to gets the following 16. Registers are as follows:
472 *
473 * Offsets 0-1 : Card ID
474 *
475 * Offset 2 : Mode enable register --
476 * Bit 7 : Data Word width (1 = 16, 0 = 8)
477 * Bit 6 : IRQ enable (1 = enabled)
478 * Bits 5,4 : IRQ select
479 * 0 0 : IRQ 3
480 * 0 1 : IRQ 5
481 * 1 0 : IRQ 7
482 * 1 1 : IRQ 9
483 * Bits 3-1 : Base Address
484 * 0 0 0 : <disabled>
485 * 0 0 1 : 0x0240
486 * 0 1 0 : 0x0340
487 * 0 1 1 : 0x0400
488 * 1 0 0 : 0x0420
489 * 1 0 1 : 0x3240
490 * 1 1 0 : 0x8240
491 * 1 1 1 : 0xA240
492 * Bit 0 : Card enable (1 = enabled)
493 *
494 * Offset 3 : DMA control register --
495 * Bit 7 : DMA enable (1 = enabled)
496 * Bits 6,5 : Preemt Count Select (transfers to complete after
497 * 'C01 has been preempted on MCA bus)
498 * 0 0 : 0
499 * 0 1 : 1
500 * 1 0 : 3
501 * 1 1 : 7
502 * (all these wacky numbers; I'm sure there's a reason somewhere)
503 * Bit 4 : Fairness enable (1 = fair bus priority)
504 * Bits 3-0 : Arbitration level (0-15 consecutive)
505 *
506 * Offset 4 : General purpose register
507 * Bits 7-3 : User definable (here, 7,6 are SCSI ID)
508 * Bits 2-0 : reserved
509 *
510 * Offset 10 : DMA decode register (used for IO based DMA; also can do
511 * PIO through this port)
512 *
513 * Offset 12 : Status
514 * Bits 7-2 : reserved
515 * Bit 1 : DMA pending (1 = pending)
516 * Bit 0 : IRQ pending (0 = pending)
517 *
518 * Exciting, huh?
519 *
520 */
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
deleted file mode 100644
index 8e5eadbd5c51..000000000000
--- a/drivers/scsi/oktagon_esp.c
+++ /dev/null
@@ -1,606 +0,0 @@
1/*
2 * Oktagon_esp.c -- Driver for bsc Oktagon
3 *
4 * Written by Carsten Pluntke 1998
5 *
6 * Based on cyber_esp.c
7 */
8
9
10#if defined(CONFIG_AMIGA) || defined(CONFIG_APUS)
11#define USE_BOTTOM_HALF
12#endif
13
14#include <linux/module.h>
15
16#include <linux/kernel.h>
17#include <linux/delay.h>
18#include <linux/types.h>
19#include <linux/string.h>
20#include <linux/slab.h>
21#include <linux/blkdev.h>
22#include <linux/proc_fs.h>
23#include <linux/stat.h>
24#include <linux/reboot.h>
25#include <asm/system.h>
26#include <asm/ptrace.h>
27#include <asm/pgtable.h>
28
29
30#include "scsi.h"
31#include <scsi/scsi_host.h>
32#include "NCR53C9x.h"
33
34#include <linux/zorro.h>
35#include <asm/irq.h>
36#include <asm/amigaints.h>
37#include <asm/amigahw.h>
38
39#ifdef USE_BOTTOM_HALF
40#include <linux/workqueue.h>
41#include <linux/interrupt.h>
42#endif
43
44/* The controller registers can be found in the Z2 config area at these
45 * offsets:
46 */
47#define OKTAGON_ESP_ADDR 0x03000
48#define OKTAGON_DMA_ADDR 0x01000
49
50
51static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
52static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
53static void dma_dump_state(struct NCR_ESP *esp);
54static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
55static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
56static void dma_ints_off(struct NCR_ESP *esp);
57static void dma_ints_on(struct NCR_ESP *esp);
58static int dma_irq_p(struct NCR_ESP *esp);
59static void dma_led_off(struct NCR_ESP *esp);
60static void dma_led_on(struct NCR_ESP *esp);
61static int dma_ports_p(struct NCR_ESP *esp);
62static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
63
64static void dma_irq_exit(struct NCR_ESP *esp);
65static void dma_invalidate(struct NCR_ESP *esp);
66
67static void dma_mmu_get_scsi_one(struct NCR_ESP *,Scsi_Cmnd *);
68static void dma_mmu_get_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
69static void dma_mmu_release_scsi_one(struct NCR_ESP *,Scsi_Cmnd *);
70static void dma_mmu_release_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
71static void dma_advance_sg(Scsi_Cmnd *);
72static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
73
74#ifdef USE_BOTTOM_HALF
75static void dma_commit(struct work_struct *unused);
76
77long oktag_to_io(long *paddr, long *addr, long len);
78long oktag_from_io(long *addr, long *paddr, long len);
79
80static DECLARE_WORK(tq_fake_dma, dma_commit);
81
82#define DMA_MAXTRANSFER 0x8000
83
84#else
85
86/*
87 * No bottom half. Use transfer directly from IRQ. Find a narrow path
88 * between too much IRQ overhead and clogging the IRQ for too long.
89 */
90
91#define DMA_MAXTRANSFER 0x1000
92
93#endif
94
95static struct notifier_block oktagon_notifier = {
96 oktagon_notify_reboot,
97 NULL,
98 0
99};
100
101static long *paddress;
102static long *address;
103static long len;
104static long dma_on;
105static int direction;
106static struct NCR_ESP *current_esp;
107
108
109static volatile unsigned char cmd_buffer[16];
110 /* This is where all commands are put
111 * before they are trasfered to the ESP chip
112 * via PIO.
113 */
114
115/***************************************************************** Detection */
116int oktagon_esp_detect(struct scsi_host_template *tpnt)
117{
118 struct NCR_ESP *esp;
119 struct zorro_dev *z = NULL;
120 unsigned long address;
121 struct ESP_regs *eregs;
122
123 while ((z = zorro_find_device(ZORRO_PROD_BSC_OKTAGON_2008, z))) {
124 unsigned long board = z->resource.start;
125 if (request_mem_region(board+OKTAGON_ESP_ADDR,
126 sizeof(struct ESP_regs), "NCR53C9x")) {
127 /*
128 * It is a SCSI controller.
129 * Hardwire Host adapter to SCSI ID 7
130 */
131
132 address = (unsigned long)ZTWO_VADDR(board);
133 eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR);
134
135 /* This line was 5 lines lower */
136 esp = esp_allocate(tpnt, (void *)board + OKTAGON_ESP_ADDR, 0);
137
138 /* we have to shift the registers only one bit for oktagon */
139 esp->shift = 1;
140
141 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
142 udelay(5);
143 if (esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
144 return 0; /* Bail out if address did not hold data */
145
146 /* Do command transfer with programmed I/O */
147 esp->do_pio_cmds = 1;
148
149 /* Required functions */
150 esp->dma_bytes_sent = &dma_bytes_sent;
151 esp->dma_can_transfer = &dma_can_transfer;
152 esp->dma_dump_state = &dma_dump_state;
153 esp->dma_init_read = &dma_init_read;
154 esp->dma_init_write = &dma_init_write;
155 esp->dma_ints_off = &dma_ints_off;
156 esp->dma_ints_on = &dma_ints_on;
157 esp->dma_irq_p = &dma_irq_p;
158 esp->dma_ports_p = &dma_ports_p;
159 esp->dma_setup = &dma_setup;
160
161 /* Optional functions */
162 esp->dma_barrier = 0;
163 esp->dma_drain = 0;
164 esp->dma_invalidate = &dma_invalidate;
165 esp->dma_irq_entry = 0;
166 esp->dma_irq_exit = &dma_irq_exit;
167 esp->dma_led_on = &dma_led_on;
168 esp->dma_led_off = &dma_led_off;
169 esp->dma_poll = 0;
170 esp->dma_reset = 0;
171
172 esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
173 esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
174 esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
175 esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
176 esp->dma_advance_sg = &dma_advance_sg;
177
178 /* SCSI chip speed */
179 /* Looking at the quartz of the SCSI board... */
180 esp->cfreq = 25000000;
181
182 /* The DMA registers on the CyberStorm are mapped
183 * relative to the device (i.e. in the same Zorro
184 * I/O block).
185 */
186 esp->dregs = (void *)(address + OKTAGON_DMA_ADDR);
187
188 paddress = (long *) esp->dregs;
189
190 /* ESP register base */
191 esp->eregs = eregs;
192
193 /* Set the command buffer */
194 esp->esp_command = (volatile unsigned char*) cmd_buffer;
195
196 /* Yes, the virtual address. See below. */
197 esp->esp_command_dvma = (__u32) cmd_buffer;
198
199 esp->irq = IRQ_AMIGA_PORTS;
200 request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
201 "BSC Oktagon SCSI", esp->ehost);
202
203 /* Figure out our scsi ID on the bus */
204 esp->scsi_id = 7;
205
206 /* We don't have a differential SCSI-bus. */
207 esp->diff = 0;
208
209 esp_initialize(esp);
210
211 printk("ESP_Oktagon Driver 1.1"
212#ifdef USE_BOTTOM_HALF
213 " [BOTTOM_HALF]"
214#else
215 " [IRQ]"
216#endif
217 " registered.\n");
218 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,esps_in_use);
219 esps_running = esps_in_use;
220 current_esp = esp;
221 register_reboot_notifier(&oktagon_notifier);
222 return esps_in_use;
223 }
224 }
225 return 0;
226}
227
228
229/*
230 * On certain configurations the SCSI equipment gets confused on reboot,
231 * so we have to reset it then.
232 */
233
234static int
235oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
236{
237 struct NCR_ESP *esp;
238
239 if((code == SYS_DOWN || code == SYS_HALT) && (esp = current_esp))
240 {
241 esp_bootup_reset(esp,esp->eregs);
242 udelay(500); /* Settle time. Maybe unnecessary. */
243 }
244 return NOTIFY_DONE;
245}
246
247
248
249#ifdef USE_BOTTOM_HALF
250
251
252/*
253 * The bsc Oktagon controller has no real DMA, so we have to do the 'DMA
254 * transfer' in the interrupt (Yikes!) or use a bottom half to not to clutter
255 * IRQ's for longer-than-good.
256 *
257 * FIXME
258 * BIG PROBLEM: 'len' is usually the buffer length, not the expected length
259 * of the data. So DMA may finish prematurely, further reads lead to
260 * 'machine check' on APUS systems (don't know about m68k systems, AmigaOS
261 * deliberately ignores the bus faults) and a normal copy-loop can't
262 * be exited prematurely just at the right moment by the dma_invalidate IRQ.
263 * So do it the hard way, write an own copier in assembler and
264 * catch the exception.
265 * -- Carsten
266 */
267
268
269static void dma_commit(struct work_struct *unused)
270{
271 long wait,len2,pos;
272 struct NCR_ESP *esp;
273
274 ESPDATA(("Transfer: %ld bytes, Address 0x%08lX, Direction: %d\n",
275 len,(long) address,direction));
276 dma_ints_off(current_esp);
277
278 pos = 0;
279 wait = 1;
280 if(direction) /* write? (memory to device) */
281 {
282 while(len > 0)
283 {
284 len2 = oktag_to_io(paddress, address+pos, len);
285 if(!len2)
286 {
287 if(wait > 1000)
288 {
289 printk("Expedited DMA exit (writing) %ld\n",len);
290 break;
291 }
292 mdelay(wait);
293 wait *= 2;
294 }
295 pos += len2;
296 len -= len2*sizeof(long);
297 }
298 } else {
299 while(len > 0)
300 {
301 len2 = oktag_from_io(address+pos, paddress, len);
302 if(!len2)
303 {
304 if(wait > 1000)
305 {
306 printk("Expedited DMA exit (reading) %ld\n",len);
307 break;
308 }
309 mdelay(wait);
310 wait *= 2;
311 }
312 pos += len2;
313 len -= len2*sizeof(long);
314 }
315 }
316
317 /* to make esp->shift work */
318 esp=current_esp;
319
320#if 0
321 len2 = (esp_read(current_esp->eregs->esp_tclow) & 0xff) |
322 ((esp_read(current_esp->eregs->esp_tcmed) & 0xff) << 8);
323
324 /*
325 * Uh uh. If you see this, len and transfer count registers were out of
326 * sync. That means really serious trouble.
327 */
328
329 if(len2)
330 printk("Eeeek!! Transfer count still %ld!\n",len2);
331#endif
332
333 /*
334 * Normally we just need to exit and wait for the interrupt to come.
335 * But at least one device (my Microtek ScanMaker 630) regularly mis-
336 * calculates the bytes it should send which is really ugly because
337 * it locks up the SCSI bus if not accounted for.
338 */
339
340 if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
341 {
342 long len = 100;
343 long trash[10];
344
345 /*
346 * Interrupt bit was not set. Either the device is just plain lazy
347 * so we give it a 10 ms chance or...
348 */
349 while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)))
350 udelay(100);
351
352
353 if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
354 {
355 /*
356 * So we think that the transfer count is out of sync. Since we
357 * have all we want we are happy and can ditch the trash.
358 */
359
360 len = DMA_MAXTRANSFER;
361
362 while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)))
363 oktag_from_io(trash,paddress,2);
364
365 if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
366 {
367 /*
368 * Things really have gone wrong. If we leave the system in that
369 * state, the SCSI bus is locked forever. I hope that this will
370 * turn the system in a more or less running state.
371 */
372 printk("Device is bolixed, trying bus reset...\n");
373 esp_bootup_reset(current_esp,current_esp->eregs);
374 }
375 }
376 }
377
378 ESPDATA(("Transfer_finale: do_data_finale should come\n"));
379
380 len = 0;
381 dma_on = 0;
382 dma_ints_on(current_esp);
383}
384
385#endif
386
387/************************************************************* DMA Functions */
388static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
389{
390 /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
391 * the number of bytes sent (to the ESP chip) equals the number
392 * of bytes in the FIFO - there is no buffering in the DMA controller.
393 * XXXX Do I read this right? It is from host to ESP, right?
394 */
395 return fifo_count;
396}
397
398static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
399{
400 unsigned long sz = sp->SCp.this_residual;
401 if(sz > DMA_MAXTRANSFER)
402 sz = DMA_MAXTRANSFER;
403 return sz;
404}
405
406static void dma_dump_state(struct NCR_ESP *esp)
407{
408}
409
410/*
411 * What the f$@& is this?
412 *
413 * Some SCSI devices (like my Microtek ScanMaker 630 scanner) want to transfer
414 * more data than requested. How much? Dunno. So ditch the bogus data into
415 * the sink, hoping the device will advance to the next phase sooner or later.
416 *
417 * -- Carsten
418 */
419
420static long oktag_eva_buffer[16]; /* The data sink */
421
422static void oktag_check_dma(void)
423{
424 struct NCR_ESP *esp;
425
426 esp=current_esp;
427 if(!len)
428 {
429 address = oktag_eva_buffer;
430 len = 2;
431 /* esp_do_data sets them to zero like len */
432 esp_write(current_esp->eregs->esp_tclow,2);
433 esp_write(current_esp->eregs->esp_tcmed,0);
434 }
435}
436
437static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
438{
439 /* Zorro is noncached, everything else done using processor. */
440 /* cache_clear(addr, length); */
441
442 if(dma_on)
443 panic("dma_init_read while dma process is initialized/running!\n");
444 direction = 0;
445 address = (long *) vaddress;
446 current_esp = esp;
447 len = length;
448 oktag_check_dma();
449 dma_on = 1;
450}
451
452static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
453{
454 /* cache_push(addr, length); */
455
456 if(dma_on)
457 panic("dma_init_write while dma process is initialized/running!\n");
458 direction = 1;
459 address = (long *) vaddress;
460 current_esp = esp;
461 len = length;
462 oktag_check_dma();
463 dma_on = 1;
464}
465
466static void dma_ints_off(struct NCR_ESP *esp)
467{
468 disable_irq(esp->irq);
469}
470
471static void dma_ints_on(struct NCR_ESP *esp)
472{
473 enable_irq(esp->irq);
474}
475
476static int dma_irq_p(struct NCR_ESP *esp)
477{
478 /* It's important to check the DMA IRQ bit in the correct way! */
479 return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
480}
481
482static void dma_led_off(struct NCR_ESP *esp)
483{
484}
485
486static void dma_led_on(struct NCR_ESP *esp)
487{
488}
489
490static int dma_ports_p(struct NCR_ESP *esp)
491{
492 return ((amiga_custom.intenar) & IF_PORTS);
493}
494
495static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
496{
497 /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
498 * so when (write) is true, it actually means READ!
499 */
500 if(write){
501 dma_init_read(esp, addr, count);
502 } else {
503 dma_init_write(esp, addr, count);
504 }
505}
506
507/*
508 * IRQ entry when DMA transfer is ready to be started
509 */
510
511static void dma_irq_exit(struct NCR_ESP *esp)
512{
513#ifdef USE_BOTTOM_HALF
514 if(dma_on)
515 {
516 schedule_work(&tq_fake_dma);
517 }
518#else
519 while(len && !dma_irq_p(esp))
520 {
521 if(direction)
522 *paddress = *address++;
523 else
524 *address++ = *paddress;
525 len -= (sizeof(long));
526 }
527 len = 0;
528 dma_on = 0;
529#endif
530}
531
532/*
533 * IRQ entry when DMA has just finished
534 */
535
536static void dma_invalidate(struct NCR_ESP *esp)
537{
538}
539
540/*
541 * Since the processor does the data transfer we have to use the custom
542 * mmu interface to pass the virtual address, not the physical.
543 */
544
545void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
546{
547 sp->SCp.ptr =
548 sp->request_buffer;
549}
550
551void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
552{
553 sp->SCp.ptr = sg_virt(sp->SCp.buffer);
554}
555
556void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
557{
558}
559
560void dma_mmu_release_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
561{
562}
563
564void dma_advance_sg(Scsi_Cmnd *sp)
565{
566 sp->SCp.ptr = sg_virt(sp->SCp.buffer);
567}
568
569
570#define HOSTS_C
571
572int oktagon_esp_release(struct Scsi_Host *instance)
573{
574#ifdef MODULE
575 unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
576 esp_release();
577 release_mem_region(address, sizeof(struct ESP_regs));
578 free_irq(IRQ_AMIGA_PORTS, esp_intr);
579 unregister_reboot_notifier(&oktagon_notifier);
580#endif
581 return 1;
582}
583
584
585static struct scsi_host_template driver_template = {
586 .proc_name = "esp-oktagon",
587 .proc_info = &esp_proc_info,
588 .name = "BSC Oktagon SCSI",
589 .detect = oktagon_esp_detect,
590 .slave_alloc = esp_slave_alloc,
591 .slave_destroy = esp_slave_destroy,
592 .release = oktagon_esp_release,
593 .queuecommand = esp_queue,
594 .eh_abort_handler = esp_abort,
595 .eh_bus_reset_handler = esp_reset,
596 .can_queue = 7,
597 .this_id = 7,
598 .sg_tablesize = SG_ALL,
599 .cmd_per_lun = 1,
600 .use_clustering = ENABLE_CLUSTERING
601};
602
603
604#include "scsi_module.c"
605
606MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/oktagon_io.S b/drivers/scsi/oktagon_io.S
deleted file mode 100644
index 8a7340b02707..000000000000
--- a/drivers/scsi/oktagon_io.S
+++ /dev/null
@@ -1,194 +0,0 @@
1/* -*- mode: asm -*-
2 * Due to problems while transferring data I've put these routines as assembly
3 * code.
4 * Since I'm no PPC assembler guru, the code is just the assembler version of
5
6int oktag_to_io(long *paddr,long *addr,long len)
7{
8 long *addr2 = addr;
9 for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
10 *paddr = *addr2++;
11 return addr2 - addr;
12}
13
14int oktag_from_io(long *addr,long *paddr,long len)
15{
16 long *addr2 = addr;
17 for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
18 *addr2++ = *paddr;
19 return addr2 - addr;
20}
21
22 * assembled using gcc -O2 -S, with two exception catch points where data
23 * is moved to/from the IO register.
24 */
25
26
27#ifdef CONFIG_APUS
28
29 .file "oktagon_io.c"
30
31gcc2_compiled.:
32/*
33 .section ".text"
34*/
35 .align 2
36 .globl oktag_to_io
37 .type oktag_to_io,@function
38oktag_to_io:
39 addi 5,5,3
40 srwi 5,5,2
41 cmpwi 1,5,0
42 mr 9,3
43 mr 3,4
44 addi 5,5,-1
45 bc 12,6,.L3
46.L5:
47 cmpwi 1,5,0
48 lwz 0,0(3)
49 addi 3,3,4
50 addi 5,5,-1
51exp1: stw 0,0(9)
52 bc 4,6,.L5
53.L3:
54ret1: subf 3,4,3
55 srawi 3,3,2
56 blr
57.Lfe1:
58 .size oktag_to_io,.Lfe1-oktag_to_io
59 .align 2
60 .globl oktag_from_io
61 .type oktag_from_io,@function
62oktag_from_io:
63 addi 5,5,3
64 srwi 5,5,2
65 cmpwi 1,5,0
66 mr 9,3
67 addi 5,5,-1
68 bc 12,6,.L9
69.L11:
70 cmpwi 1,5,0
71exp2: lwz 0,0(4)
72 addi 5,5,-1
73 stw 0,0(3)
74 addi 3,3,4
75 bc 4,6,.L11
76.L9:
77ret2: subf 3,9,3
78 srawi 3,3,2
79 blr
80.Lfe2:
81 .size oktag_from_io,.Lfe2-oktag_from_io
82 .ident "GCC: (GNU) egcs-2.90.29 980515 (egcs-1.0.3 release)"
83
84/*
85 * Exception table.
86 * Second longword shows where to jump when an exception at the addr the first
87 * longword is pointing to is caught.
88 */
89
90.section __ex_table,"a"
91 .align 2
92oktagon_except:
93 .long exp1,ret1
94 .long exp2,ret2
95
96#else
97
98/*
99The code which follows is for 680x0 based assembler and is meant for
100Linux/m68k. It was created by cross compiling the code using the
101instructions given above. I then added the four labels used in the
102exception handler table at the bottom of this file.
103- Kevin <kcozens@interlog.com>
104*/
105
106#ifdef CONFIG_AMIGA
107
108 .file "oktagon_io.c"
109 .version "01.01"
110gcc2_compiled.:
111.text
112 .align 2
113.globl oktag_to_io
114 .type oktag_to_io,@function
115oktag_to_io:
116 link.w %a6,#0
117 move.l %d2,-(%sp)
118 move.l 8(%a6),%a1
119 move.l 12(%a6),%d1
120 move.l %d1,%a0
121 move.l 16(%a6),%d0
122 addq.l #3,%d0
123 lsr.l #2,%d0
124 subq.l #1,%d0
125 moveq.l #-1,%d2
126 cmp.l %d0,%d2
127 jbeq .L3
128.L5:
129exp1:
130 move.l (%a0)+,(%a1)
131 dbra %d0,.L5
132 clr.w %d0
133 subq.l #1,%d0
134 jbcc .L5
135.L3:
136ret1:
137 move.l %a0,%d0
138 sub.l %d1,%d0
139 asr.l #2,%d0
140 move.l -4(%a6),%d2
141 unlk %a6
142 rts
143
144.Lfe1:
145 .size oktag_to_io,.Lfe1-oktag_to_io
146 .align 2
147.globl oktag_from_io
148 .type oktag_from_io,@function
149oktag_from_io:
150 link.w %a6,#0
151 move.l %d2,-(%sp)
152 move.l 8(%a6),%d1
153 move.l 12(%a6),%a1
154 move.l %d1,%a0
155 move.l 16(%a6),%d0
156 addq.l #3,%d0
157 lsr.l #2,%d0
158 subq.l #1,%d0
159 moveq.l #-1,%d2
160 cmp.l %d0,%d2
161 jbeq .L9
162.L11:
163exp2:
164 move.l (%a1),(%a0)+
165 dbra %d0,.L11
166 clr.w %d0
167 subq.l #1,%d0
168 jbcc .L11
169.L9:
170ret2:
171 move.l %a0,%d0
172 sub.l %d1,%d0
173 asr.l #2,%d0
174 move.l -4(%a6),%d2
175 unlk %a6
176 rts
177.Lfe2:
178 .size oktag_from_io,.Lfe2-oktag_from_io
179 .ident "GCC: (GNU) 2.7.2.1"
180
181/*
182 * Exception table.
183 * Second longword shows where to jump when an exception at the addr the first
184 * longword is pointing to is caught.
185 */
186
187.section __ex_table,"a"
188 .align 2
189oktagon_except:
190 .long exp1,ret1
191 .long exp2,ret2
192
193#endif
194#endif
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 17b4a7c4618c..0cd614a0fa73 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -35,7 +35,7 @@
35 35
36#define BOUNCE_SIZE (64*1024) 36#define BOUNCE_SIZE (64*1024)
37 37
38#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE / CD_FRAMESIZE) 38#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9)
39 39
40 40
41struct ps3rom_private { 41struct ps3rom_private {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index adf97320574b..4894dc886b62 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -428,6 +428,19 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
428 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2) 428 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
429 return 0; 429 return 0;
430 430
431 if (ha->sfp_data)
432 goto do_read;
433
434 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
435 &ha->sfp_data_dma);
436 if (!ha->sfp_data) {
437 qla_printk(KERN_WARNING, ha,
438 "Unable to allocate memory for SFP read-data.\n");
439 return 0;
440 }
441
442do_read:
443 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
431 addr = 0xa0; 444 addr = 0xa0;
432 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; 445 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
433 iter++, offset += SFP_BLOCK_SIZE) { 446 iter++, offset += SFP_BLOCK_SIZE) {
@@ -835,7 +848,7 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
835static void 848static void
836qla2x00_get_host_speed(struct Scsi_Host *shost) 849qla2x00_get_host_speed(struct Scsi_Host *shost)
837{ 850{
838 scsi_qla_host_t *ha = shost_priv(shost); 851 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
839 uint32_t speed = 0; 852 uint32_t speed = 0;
840 853
841 switch (ha->link_data_rate) { 854 switch (ha->link_data_rate) {
@@ -848,6 +861,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
848 case PORT_SPEED_4GB: 861 case PORT_SPEED_4GB:
849 speed = 4; 862 speed = 4;
850 break; 863 break;
864 case PORT_SPEED_8GB:
865 speed = 8;
866 break;
851 } 867 }
852 fc_host_speed(shost) = speed; 868 fc_host_speed(shost) = speed;
853} 869}
@@ -855,7 +871,7 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
855static void 871static void
856qla2x00_get_host_port_type(struct Scsi_Host *shost) 872qla2x00_get_host_port_type(struct Scsi_Host *shost)
857{ 873{
858 scsi_qla_host_t *ha = shost_priv(shost); 874 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
859 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 875 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
860 876
861 switch (ha->current_topology) { 877 switch (ha->current_topology) {
@@ -965,7 +981,7 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
965static struct fc_host_statistics * 981static struct fc_host_statistics *
966qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 982qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
967{ 983{
968 scsi_qla_host_t *ha = shost_priv(shost); 984 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
969 int rval; 985 int rval;
970 struct link_statistics *stats; 986 struct link_statistics *stats;
971 dma_addr_t stats_dma; 987 dma_addr_t stats_dma;
@@ -1049,7 +1065,7 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1049static void 1065static void
1050qla2x00_get_host_port_state(struct Scsi_Host *shost) 1066qla2x00_get_host_port_state(struct Scsi_Host *shost)
1051{ 1067{
1052 scsi_qla_host_t *ha = shost_priv(shost); 1068 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
1053 1069
1054 if (!ha->flags.online) 1070 if (!ha->flags.online)
1055 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1071 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b72c7f170854..3750319f4968 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2041,8 +2041,6 @@ typedef struct vport_params {
2041#define VP_RET_CODE_NO_MEM 5 2041#define VP_RET_CODE_NO_MEM 5
2042#define VP_RET_CODE_NOT_FOUND 6 2042#define VP_RET_CODE_NOT_FOUND 6
2043 2043
2044#define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x))
2045
2046/* 2044/*
2047 * ISP operations 2045 * ISP operations
2048 */ 2046 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ba35fc26ce6b..193f688ec3d7 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern int ql2xqfullrampup;
66extern int num_hosts; 66extern int num_hosts;
67 67
68extern int qla2x00_loop_reset(scsi_qla_host_t *); 68extern int qla2x00_loop_reset(scsi_qla_host_t *);
69extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
69 70
70/* 71/*
71 * Global Functions in qla_mid.c source file. 72 * Global Functions in qla_mid.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d0633ca894be..d5c7853e7eba 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -925,6 +925,16 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
925{ 925{
926 int rval; 926 int rval;
927 uint32_t srisc_address = 0; 927 uint32_t srisc_address = 0;
928 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
929 unsigned long flags;
930
931 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
932 /* Disable SRAM, Instruction RAM and GP RAM parity. */
933 spin_lock_irqsave(&ha->hardware_lock, flags);
934 WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
935 RD_REG_WORD(&reg->hccr);
936 spin_unlock_irqrestore(&ha->hardware_lock, flags);
937 }
928 938
929 /* Load firmware sequences */ 939 /* Load firmware sequences */
930 rval = ha->isp_ops->load_risc(ha, &srisc_address); 940 rval = ha->isp_ops->load_risc(ha, &srisc_address);
@@ -968,6 +978,19 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
968 } 978 }
969 } 979 }
970 980
981 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
982 /* Enable proper parity. */
983 spin_lock_irqsave(&ha->hardware_lock, flags);
984 if (IS_QLA2300(ha))
985 /* SRAM parity */
986 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
987 else
988 /* SRAM, Instruction RAM and GP RAM parity */
989 WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
990 RD_REG_WORD(&reg->hccr);
991 spin_unlock_irqrestore(&ha->hardware_lock, flags);
992 }
993
971 if (rval) { 994 if (rval) {
972 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 995 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
973 ha->host_no)); 996 ha->host_no));
@@ -3213,9 +3236,6 @@ int
3213qla2x00_abort_isp(scsi_qla_host_t *ha) 3236qla2x00_abort_isp(scsi_qla_host_t *ha)
3214{ 3237{
3215 int rval; 3238 int rval;
3216 unsigned long flags = 0;
3217 uint16_t cnt;
3218 srb_t *sp;
3219 uint8_t status = 0; 3239 uint8_t status = 0;
3220 3240
3221 if (ha->flags.online) { 3241 if (ha->flags.online) {
@@ -3236,19 +3256,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3236 LOOP_DOWN_TIME); 3256 LOOP_DOWN_TIME);
3237 } 3257 }
3238 3258
3239 spin_lock_irqsave(&ha->hardware_lock, flags);
3240 /* Requeue all commands in outstanding command list. */ 3259 /* Requeue all commands in outstanding command list. */
3241 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3260 qla2x00_abort_all_cmds(ha, DID_RESET << 16);
3242 sp = ha->outstanding_cmds[cnt];
3243 if (sp) {
3244 ha->outstanding_cmds[cnt] = NULL;
3245 sp->flags = 0;
3246 sp->cmd->result = DID_RESET << 16;
3247 sp->cmd->host_scribble = (unsigned char *)NULL;
3248 qla2x00_sp_compl(ha, sp);
3249 }
3250 }
3251 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3252 3261
3253 ha->isp_ops->get_flash_version(ha, ha->request_ring); 3262 ha->isp_ops->get_flash_version(ha, ha->request_ring);
3254 3263
@@ -3273,6 +3282,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3273 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3282 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3274 3283
3275 if (ha->eft) { 3284 if (ha->eft) {
3285 memset(ha->eft, 0, EFT_SIZE);
3276 rval = qla2x00_enable_eft_trace(ha, 3286 rval = qla2x00_enable_eft_trace(ha,
3277 ha->eft_dma, EFT_NUM_BUFFERS); 3287 ha->eft_dma, EFT_NUM_BUFFERS);
3278 if (rval) { 3288 if (rval) {
@@ -3357,60 +3367,15 @@ static int
3357qla2x00_restart_isp(scsi_qla_host_t *ha) 3367qla2x00_restart_isp(scsi_qla_host_t *ha)
3358{ 3368{
3359 uint8_t status = 0; 3369 uint8_t status = 0;
3360 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3361 unsigned long flags = 0;
3362 uint32_t wait_time; 3370 uint32_t wait_time;
3363 3371
3364 /* If firmware needs to be loaded */ 3372 /* If firmware needs to be loaded */
3365 if (qla2x00_isp_firmware(ha)) { 3373 if (qla2x00_isp_firmware(ha)) {
3366 ha->flags.online = 0; 3374 ha->flags.online = 0;
3367 if (!(status = ha->isp_ops->chip_diag(ha))) { 3375 if (!(status = ha->isp_ops->chip_diag(ha)))
3368 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3369 status = qla2x00_setup_chip(ha);
3370 goto done;
3371 }
3372
3373 spin_lock_irqsave(&ha->hardware_lock, flags);
3374
3375 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) &&
3376 !IS_QLA25XX(ha)) {
3377 /*
3378 * Disable SRAM, Instruction RAM and GP RAM
3379 * parity.
3380 */
3381 WRT_REG_WORD(&reg->hccr,
3382 (HCCR_ENABLE_PARITY + 0x0));
3383 RD_REG_WORD(&reg->hccr);
3384 }
3385
3386 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3387
3388 status = qla2x00_setup_chip(ha); 3376 status = qla2x00_setup_chip(ha);
3389
3390 spin_lock_irqsave(&ha->hardware_lock, flags);
3391
3392 if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) &&
3393 !IS_QLA25XX(ha)) {
3394 /* Enable proper parity */
3395 if (IS_QLA2300(ha))
3396 /* SRAM parity */
3397 WRT_REG_WORD(&reg->hccr,
3398 (HCCR_ENABLE_PARITY + 0x1));
3399 else
3400 /*
3401 * SRAM, Instruction RAM and GP RAM
3402 * parity.
3403 */
3404 WRT_REG_WORD(&reg->hccr,
3405 (HCCR_ENABLE_PARITY + 0x7));
3406 RD_REG_WORD(&reg->hccr);
3407 }
3408
3409 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3410 }
3411 } 3377 }
3412 3378
3413 done:
3414 if (!status && !(status = qla2x00_init_rings(ha))) { 3379 if (!status && !(status = qla2x00_init_rings(ha))) {
3415 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3380 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
3416 if (!(status = qla2x00_fw_ready(ha))) { 3381 if (!(status = qla2x00_fw_ready(ha))) {
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 8e3b04464cff..5d1a3f7c408f 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -119,6 +119,13 @@ static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *ha)
119 qla2x00_get_firmware_state(ha, &fw_state); 119 qla2x00_get_firmware_state(ha, &fw_state);
120} 120}
121 121
122static __inline__ scsi_qla_host_t * to_qla_parent(scsi_qla_host_t *);
123static __inline__ scsi_qla_host_t *
124to_qla_parent(scsi_qla_host_t *ha)
125{
126 return ha->parent ? ha->parent : ha;
127}
128
122/** 129/**
123 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary. 130 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
124 * @ha: HA context 131 * @ha: HA context
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 642a0c3f09c6..14e6f22944b7 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1815,6 +1815,8 @@ int
1815qla2x00_request_irqs(scsi_qla_host_t *ha) 1815qla2x00_request_irqs(scsi_qla_host_t *ha)
1816{ 1816{
1817 int ret; 1817 int ret;
1818 device_reg_t __iomem *reg = ha->iobase;
1819 unsigned long flags;
1818 1820
1819 /* If possible, enable MSI-X. */ 1821 /* If possible, enable MSI-X. */
1820 if (!IS_QLA2432(ha) && !IS_QLA2532(ha)) 1822 if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
@@ -1846,7 +1848,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1846 DEBUG2(qla_printk(KERN_INFO, ha, 1848 DEBUG2(qla_printk(KERN_INFO, ha,
1847 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 1849 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1848 ha->fw_attributes)); 1850 ha->fw_attributes));
1849 return ret; 1851 goto clear_risc_ints;
1850 } 1852 }
1851 qla_printk(KERN_WARNING, ha, 1853 qla_printk(KERN_WARNING, ha,
1852 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 1854 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
@@ -1864,15 +1866,30 @@ skip_msi:
1864 1866
1865 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1867 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1866 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1868 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1867 if (!ret) { 1869 if (ret) {
1868 ha->flags.inta_enabled = 1;
1869 ha->host->irq = ha->pdev->irq;
1870 } else {
1871 qla_printk(KERN_WARNING, ha, 1870 qla_printk(KERN_WARNING, ha,
1872 "Failed to reserve interrupt %d already in use.\n", 1871 "Failed to reserve interrupt %d already in use.\n",
1873 ha->pdev->irq); 1872 ha->pdev->irq);
1873 goto fail;
1874 }
1875 ha->flags.inta_enabled = 1;
1876 ha->host->irq = ha->pdev->irq;
1877clear_risc_ints:
1878
1879 ha->isp_ops->disable_intrs(ha);
1880 spin_lock_irqsave(&ha->hardware_lock, flags);
1881 if (IS_FWI2_CAPABLE(ha)) {
1882 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1883 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1884 } else {
1885 WRT_REG_WORD(&reg->isp.semaphore, 0);
1886 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1887 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1874 } 1888 }
1889 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1890 ha->isp_ops->enable_intrs(ha);
1875 1891
1892fail:
1876 return ret; 1893 return ret;
1877} 1894}
1878 1895
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 0c10c0b0fb73..99d29fff836d 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -980,7 +980,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
980 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 980 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
981 ha->host_no)); 981 ha->host_no));
982 982
983 if (ha->fw_attributes & BIT_2) 983 if (ha->flags.npiv_supported)
984 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 984 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
985 else 985 else
986 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 986 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8f69caf83272..3c1b43356adb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -204,10 +204,8 @@ static int qla2x00_do_dpc(void *data);
204 204
205static void qla2x00_rst_aen(scsi_qla_host_t *); 205static void qla2x00_rst_aen(scsi_qla_host_t *);
206 206
207static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *); 207static int qla2x00_mem_alloc(scsi_qla_host_t *);
208static void qla2x00_mem_free(scsi_qla_host_t *ha); 208static void qla2x00_mem_free(scsi_qla_host_t *ha);
209static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
210static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
211static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 209static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
212 210
213/* -------------------------------------------------------------------------- */ 211/* -------------------------------------------------------------------------- */
@@ -1117,6 +1115,27 @@ qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
1117 return ha->isp_ops->abort_target(reset_fcport); 1115 return ha->isp_ops->abort_target(reset_fcport);
1118} 1116}
1119 1117
1118void
1119qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
1120{
1121 int cnt;
1122 unsigned long flags;
1123 srb_t *sp;
1124
1125 spin_lock_irqsave(&ha->hardware_lock, flags);
1126 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1127 sp = ha->outstanding_cmds[cnt];
1128 if (sp) {
1129 ha->outstanding_cmds[cnt] = NULL;
1130 sp->flags = 0;
1131 sp->cmd->result = res;
1132 sp->cmd->host_scribble = (unsigned char *)NULL;
1133 qla2x00_sp_compl(ha, sp);
1134 }
1135 }
1136 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1137}
1138
1120static int 1139static int
1121qla2xxx_slave_alloc(struct scsi_device *sdev) 1140qla2xxx_slave_alloc(struct scsi_device *sdev)
1122{ 1141{
@@ -1557,10 +1576,8 @@ static int __devinit
1557qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 1576qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1558{ 1577{
1559 int ret = -ENODEV; 1578 int ret = -ENODEV;
1560 device_reg_t __iomem *reg;
1561 struct Scsi_Host *host; 1579 struct Scsi_Host *host;
1562 scsi_qla_host_t *ha; 1580 scsi_qla_host_t *ha;
1563 unsigned long flags = 0;
1564 char pci_info[30]; 1581 char pci_info[30];
1565 char fw_str[30]; 1582 char fw_str[30];
1566 struct scsi_host_template *sht; 1583 struct scsi_host_template *sht;
@@ -1608,6 +1625,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1608 ha->parent = NULL; 1625 ha->parent = NULL;
1609 ha->bars = bars; 1626 ha->bars = bars;
1610 ha->mem_only = mem_only; 1627 ha->mem_only = mem_only;
1628 spin_lock_init(&ha->hardware_lock);
1611 1629
1612 /* Set ISP-type information. */ 1630 /* Set ISP-type information. */
1613 qla2x00_set_isp_flags(ha); 1631 qla2x00_set_isp_flags(ha);
@@ -1621,8 +1639,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1621 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1639 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
1622 ha->iobase); 1640 ha->iobase);
1623 1641
1624 spin_lock_init(&ha->hardware_lock);
1625
1626 ha->prev_topology = 0; 1642 ha->prev_topology = 0;
1627 ha->init_cb_size = sizeof(init_cb_t); 1643 ha->init_cb_size = sizeof(init_cb_t);
1628 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx; 1644 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
@@ -1751,34 +1767,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1751 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1767 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1752 ha->host_no, ha)); 1768 ha->host_no, ha));
1753 1769
1754 ha->isp_ops->disable_intrs(ha);
1755
1756 spin_lock_irqsave(&ha->hardware_lock, flags);
1757 reg = ha->iobase;
1758 if (IS_FWI2_CAPABLE(ha)) {
1759 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1760 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1761 } else {
1762 WRT_REG_WORD(&reg->isp.semaphore, 0);
1763 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1764 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1765
1766 /* Enable proper parity */
1767 if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) {
1768 if (IS_QLA2300(ha))
1769 /* SRAM parity */
1770 WRT_REG_WORD(&reg->isp.hccr,
1771 (HCCR_ENABLE_PARITY + 0x1));
1772 else
1773 /* SRAM, Instruction RAM and GP RAM parity */
1774 WRT_REG_WORD(&reg->isp.hccr,
1775 (HCCR_ENABLE_PARITY + 0x7));
1776 }
1777 }
1778 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1779
1780 ha->isp_ops->enable_intrs(ha);
1781
1782 pci_set_drvdata(pdev, ha); 1770 pci_set_drvdata(pdev, ha);
1783 1771
1784 ha->flags.init_done = 1; 1772 ha->flags.init_done = 1;
@@ -1848,10 +1836,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
1848static void 1836static void
1849qla2x00_free_device(scsi_qla_host_t *ha) 1837qla2x00_free_device(scsi_qla_host_t *ha)
1850{ 1838{
1839 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16);
1840
1851 /* Disable timer */ 1841 /* Disable timer */
1852 if (ha->timer_active) 1842 if (ha->timer_active)
1853 qla2x00_stop_timer(ha); 1843 qla2x00_stop_timer(ha);
1854 1844
1845 ha->flags.online = 0;
1846
1855 /* Kill the kernel thread for this host */ 1847 /* Kill the kernel thread for this host */
1856 if (ha->dpc_thread) { 1848 if (ha->dpc_thread) {
1857 struct task_struct *t = ha->dpc_thread; 1849 struct task_struct *t = ha->dpc_thread;
@@ -1870,8 +1862,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1870 if (ha->eft) 1862 if (ha->eft)
1871 qla2x00_disable_eft_trace(ha); 1863 qla2x00_disable_eft_trace(ha);
1872 1864
1873 ha->flags.online = 0;
1874
1875 /* Stop currently executing firmware. */ 1865 /* Stop currently executing firmware. */
1876 qla2x00_try_to_stop_firmware(ha); 1866 qla2x00_try_to_stop_firmware(ha);
1877 1867
@@ -2010,196 +2000,109 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
2010* 2000*
2011* Returns: 2001* Returns:
2012* 0 = success. 2002* 0 = success.
2013* 1 = failure. 2003* !0 = failure.
2014*/ 2004*/
2015static uint8_t 2005static int
2016qla2x00_mem_alloc(scsi_qla_host_t *ha) 2006qla2x00_mem_alloc(scsi_qla_host_t *ha)
2017{ 2007{
2018 char name[16]; 2008 char name[16];
2019 uint8_t status = 1;
2020 int retry= 10;
2021
2022 do {
2023 /*
2024 * This will loop only once if everything goes well, else some
2025 * number of retries will be performed to get around a kernel
2026 * bug where available mem is not allocated until after a
2027 * little delay and a retry.
2028 */
2029 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
2030 (ha->request_q_length + 1) * sizeof(request_t),
2031 &ha->request_dma, GFP_KERNEL);
2032 if (ha->request_ring == NULL) {
2033 qla_printk(KERN_WARNING, ha,
2034 "Memory Allocation failed - request_ring\n");
2035
2036 qla2x00_mem_free(ha);
2037 msleep(100);
2038
2039 continue;
2040 }
2041
2042 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
2043 (ha->response_q_length + 1) * sizeof(response_t),
2044 &ha->response_dma, GFP_KERNEL);
2045 if (ha->response_ring == NULL) {
2046 qla_printk(KERN_WARNING, ha,
2047 "Memory Allocation failed - response_ring\n");
2048
2049 qla2x00_mem_free(ha);
2050 msleep(100);
2051
2052 continue;
2053 }
2054
2055 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2056 &ha->gid_list_dma, GFP_KERNEL);
2057 if (ha->gid_list == NULL) {
2058 qla_printk(KERN_WARNING, ha,
2059 "Memory Allocation failed - gid_list\n");
2060
2061 qla2x00_mem_free(ha);
2062 msleep(100);
2063
2064 continue;
2065 }
2066
2067 /* get consistent memory allocated for init control block */
2068 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev,
2069 ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL);
2070 if (ha->init_cb == NULL) {
2071 qla_printk(KERN_WARNING, ha,
2072 "Memory Allocation failed - init_cb\n");
2073
2074 qla2x00_mem_free(ha);
2075 msleep(100);
2076
2077 continue;
2078 }
2079 memset(ha->init_cb, 0, ha->init_cb_size);
2080
2081 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
2082 ha->host_no);
2083 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2084 DMA_POOL_SIZE, 8, 0);
2085 if (ha->s_dma_pool == NULL) {
2086 qla_printk(KERN_WARNING, ha,
2087 "Memory Allocation failed - s_dma_pool\n");
2088
2089 qla2x00_mem_free(ha);
2090 msleep(100);
2091
2092 continue;
2093 }
2094
2095 if (qla2x00_allocate_sp_pool(ha)) {
2096 qla_printk(KERN_WARNING, ha,
2097 "Memory Allocation failed - "
2098 "qla2x00_allocate_sp_pool()\n");
2099
2100 qla2x00_mem_free(ha);
2101 msleep(100);
2102
2103 continue;
2104 }
2105
2106 /* Allocate memory for SNS commands */
2107 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2108 /* Get consistent memory allocated for SNS commands */
2109 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2110 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma,
2111 GFP_KERNEL);
2112 if (ha->sns_cmd == NULL) {
2113 /* error */
2114 qla_printk(KERN_WARNING, ha,
2115 "Memory Allocation failed - sns_cmd\n");
2116
2117 qla2x00_mem_free(ha);
2118 msleep(100);
2119
2120 continue;
2121 }
2122 memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt));
2123 } else {
2124 /* Get consistent memory allocated for MS IOCB */
2125 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2126 &ha->ms_iocb_dma);
2127 if (ha->ms_iocb == NULL) {
2128 /* error */
2129 qla_printk(KERN_WARNING, ha,
2130 "Memory Allocation failed - ms_iocb\n");
2131
2132 qla2x00_mem_free(ha);
2133 msleep(100);
2134
2135 continue;
2136 }
2137 memset(ha->ms_iocb, 0, sizeof(ms_iocb_entry_t));
2138
2139 /*
2140 * Get consistent memory allocated for CT SNS
2141 * commands
2142 */
2143 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2144 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma,
2145 GFP_KERNEL);
2146 if (ha->ct_sns == NULL) {
2147 /* error */
2148 qla_printk(KERN_WARNING, ha,
2149 "Memory Allocation failed - ct_sns\n");
2150 2009
2151 qla2x00_mem_free(ha); 2010 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
2152 msleep(100); 2011 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma,
2012 GFP_KERNEL);
2013 if (!ha->request_ring)
2014 goto fail;
2015
2016 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
2017 (ha->response_q_length + 1) * sizeof(response_t),
2018 &ha->response_dma, GFP_KERNEL);
2019 if (!ha->response_ring)
2020 goto fail_free_request_ring;
2021
2022 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
2023 &ha->gid_list_dma, GFP_KERNEL);
2024 if (!ha->gid_list)
2025 goto fail_free_response_ring;
2026
2027 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
2028 &ha->init_cb_dma, GFP_KERNEL);
2029 if (!ha->init_cb)
2030 goto fail_free_gid_list;
2031
2032 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
2033 ha->host_no);
2034 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2035 DMA_POOL_SIZE, 8, 0);
2036 if (!ha->s_dma_pool)
2037 goto fail_free_init_cb;
2153 2038
2154 continue; 2039 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2155 } 2040 if (!ha->srb_mempool)
2156 memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt)); 2041 goto fail_free_s_dma_pool;
2157 2042
2158 if (IS_FWI2_CAPABLE(ha)) { 2043 /* Get memory for cached NVRAM */
2159 /* 2044 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2160 * Get consistent memory allocated for SFP 2045 if (!ha->nvram)
2161 * block. 2046 goto fail_free_srb_mempool;
2162 */ 2047
2163 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, 2048 /* Allocate memory for SNS commands */
2164 GFP_KERNEL, &ha->sfp_data_dma); 2049 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2165 if (ha->sfp_data == NULL) { 2050 /* Get consistent memory allocated for SNS commands */
2166 qla_printk(KERN_WARNING, ha, 2051 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2167 "Memory Allocation failed - " 2052 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2168 "sfp_data\n"); 2053 if (!ha->sns_cmd)
2169 2054 goto fail_free_nvram;
2170 qla2x00_mem_free(ha); 2055 } else {
2171 msleep(100); 2056 /* Get consistent memory allocated for MS IOCB */
2172 2057 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2173 continue; 2058 &ha->ms_iocb_dma);
2174 } 2059 if (!ha->ms_iocb)
2175 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE); 2060 goto fail_free_nvram;
2176 }
2177 }
2178
2179 /* Get memory for cached NVRAM */
2180 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2181 if (ha->nvram == NULL) {
2182 /* error */
2183 qla_printk(KERN_WARNING, ha,
2184 "Memory Allocation failed - nvram cache\n");
2185
2186 qla2x00_mem_free(ha);
2187 msleep(100);
2188
2189 continue;
2190 }
2191
2192 /* Done all allocations without any error. */
2193 status = 0;
2194
2195 } while (retry-- && status != 0);
2196 2061
2197 if (status) { 2062 /* Get consistent memory allocated for CT SNS commands */
2198 printk(KERN_WARNING 2063 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2199 "%s(): **** FAILED ****\n", __func__); 2064 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2065 if (!ha->ct_sns)
2066 goto fail_free_ms_iocb;
2200 } 2067 }
2201 2068
2202 return(status); 2069 return 0;
2070
2071fail_free_ms_iocb:
2072 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2073 ha->ms_iocb = NULL;
2074 ha->ms_iocb_dma = 0;
2075fail_free_nvram:
2076 kfree(ha->nvram);
2077 ha->nvram = NULL;
2078fail_free_srb_mempool:
2079 mempool_destroy(ha->srb_mempool);
2080 ha->srb_mempool = NULL;
2081fail_free_s_dma_pool:
2082 dma_pool_destroy(ha->s_dma_pool);
2083 ha->s_dma_pool = NULL;
2084fail_free_init_cb:
2085 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2086 ha->init_cb_dma);
2087 ha->init_cb = NULL;
2088 ha->init_cb_dma = 0;
2089fail_free_gid_list:
2090 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2091 ha->gid_list_dma);
2092 ha->gid_list = NULL;
2093 ha->gid_list_dma = 0;
2094fail_free_response_ring:
2095 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) *
2096 sizeof(response_t), ha->response_ring, ha->response_dma);
2097 ha->response_ring = NULL;
2098 ha->response_dma = 0;
2099fail_free_request_ring:
2100 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
2101 sizeof(request_t), ha->request_ring, ha->request_dma);
2102 ha->request_ring = NULL;
2103 ha->request_dma = 0;
2104fail:
2105 return -ENOMEM;
2203} 2106}
2204 2107
2205/* 2108/*
@@ -2215,14 +2118,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2215 struct list_head *fcpl, *fcptemp; 2118 struct list_head *fcpl, *fcptemp;
2216 fc_port_t *fcport; 2119 fc_port_t *fcport;
2217 2120
2218 if (ha == NULL) { 2121 if (ha->srb_mempool)
2219 /* error */ 2122 mempool_destroy(ha->srb_mempool);
2220 DEBUG2(printk("%s(): ERROR invalid ha pointer.\n", __func__));
2221 return;
2222 }
2223
2224 /* free sp pool */
2225 qla2x00_free_sp_pool(ha);
2226 2123
2227 if (ha->fce) 2124 if (ha->fce)
2228 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2125 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -2270,6 +2167,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2270 (ha->request_q_length + 1) * sizeof(request_t), 2167 (ha->request_q_length + 1) * sizeof(request_t),
2271 ha->request_ring, ha->request_dma); 2168 ha->request_ring, ha->request_dma);
2272 2169
2170 ha->srb_mempool = NULL;
2273 ha->eft = NULL; 2171 ha->eft = NULL;
2274 ha->eft_dma = 0; 2172 ha->eft_dma = 0;
2275 ha->sns_cmd = NULL; 2173 ha->sns_cmd = NULL;
@@ -2308,44 +2206,6 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2308 kfree(ha->nvram); 2206 kfree(ha->nvram);
2309} 2207}
2310 2208
2311/*
2312 * qla2x00_allocate_sp_pool
2313 * This routine is called during initialization to allocate
2314 * memory for local srb_t.
2315 *
2316 * Input:
2317 * ha = adapter block pointer.
2318 *
2319 * Context:
2320 * Kernel context.
2321 */
2322static int
2323qla2x00_allocate_sp_pool(scsi_qla_host_t *ha)
2324{
2325 int rval;
2326
2327 rval = QLA_SUCCESS;
2328 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2329 if (ha->srb_mempool == NULL) {
2330 qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
2331 rval = QLA_FUNCTION_FAILED;
2332 }
2333 return (rval);
2334}
2335
2336/*
2337 * This routine frees all adapter allocated memory.
2338 *
2339 */
2340static void
2341qla2x00_free_sp_pool( scsi_qla_host_t *ha)
2342{
2343 if (ha->srb_mempool) {
2344 mempool_destroy(ha->srb_mempool);
2345 ha->srb_mempool = NULL;
2346 }
2347}
2348
2349/************************************************************************** 2209/**************************************************************************
2350* qla2x00_do_dpc 2210* qla2x00_do_dpc
2351* This kernel thread is a task that is schedule by the interrupt handler 2211* This kernel thread is a task that is schedule by the interrupt handler
@@ -2367,6 +2227,9 @@ qla2x00_do_dpc(void *data)
2367 fc_port_t *fcport; 2227 fc_port_t *fcport;
2368 uint8_t status; 2228 uint8_t status;
2369 uint16_t next_loopid; 2229 uint16_t next_loopid;
2230 struct scsi_qla_host *vha;
2231 int i;
2232
2370 2233
2371 ha = (scsi_qla_host_t *)data; 2234 ha = (scsi_qla_host_t *)data;
2372 2235
@@ -2409,6 +2272,18 @@ qla2x00_do_dpc(void *data)
2409 } 2272 }
2410 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 2273 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
2411 } 2274 }
2275
2276 for_each_mapped_vp_idx(ha, i) {
2277 list_for_each_entry(vha, &ha->vp_list,
2278 vp_list) {
2279 if (i == vha->vp_idx) {
2280 set_bit(ISP_ABORT_NEEDED,
2281 &vha->dpc_flags);
2282 break;
2283 }
2284 }
2285 }
2286
2412 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2287 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2413 ha->host_no)); 2288 ha->host_no));
2414 } 2289 }
@@ -3029,3 +2904,4 @@ MODULE_FIRMWARE(FW_FILE_ISP22XX);
3029MODULE_FIRMWARE(FW_FILE_ISP2300); 2904MODULE_FIRMWARE(FW_FILE_ISP2300);
3030MODULE_FIRMWARE(FW_FILE_ISP2322); 2905MODULE_FIRMWARE(FW_FILE_ISP2322);
3031MODULE_FIRMWARE(FW_FILE_ISP24XX); 2906MODULE_FIRMWARE(FW_FILE_ISP24XX);
2907MODULE_FIRMWARE(FW_FILE_ISP25XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b68fb73613ed..26822c8807ee 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -893,6 +893,8 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
893 } 893 }
894} 894}
895 895
896#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
897
896void 898void
897qla2x00_beacon_blink(struct scsi_qla_host *ha) 899qla2x00_beacon_blink(struct scsi_qla_host *ha)
898{ 900{
@@ -902,15 +904,12 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
902 unsigned long flags; 904 unsigned long flags;
903 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 905 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
904 906
905 if (ha->pio_address)
906 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
907
908 spin_lock_irqsave(&ha->hardware_lock, flags); 907 spin_lock_irqsave(&ha->hardware_lock, flags);
909 908
910 /* Save the Original GPIOE. */ 909 /* Save the Original GPIOE. */
911 if (ha->pio_address) { 910 if (ha->pio_address) {
912 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 911 gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
913 gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 912 gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
914 } else { 913 } else {
915 gpio_enable = RD_REG_WORD(&reg->gpioe); 914 gpio_enable = RD_REG_WORD(&reg->gpioe);
916 gpio_data = RD_REG_WORD(&reg->gpiod); 915 gpio_data = RD_REG_WORD(&reg->gpiod);
@@ -920,7 +919,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
920 gpio_enable |= GPIO_LED_MASK; 919 gpio_enable |= GPIO_LED_MASK;
921 920
922 if (ha->pio_address) { 921 if (ha->pio_address) {
923 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 922 WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
924 } else { 923 } else {
925 WRT_REG_WORD(&reg->gpioe, gpio_enable); 924 WRT_REG_WORD(&reg->gpioe, gpio_enable);
926 RD_REG_WORD(&reg->gpioe); 925 RD_REG_WORD(&reg->gpioe);
@@ -936,7 +935,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
936 935
937 /* Set the modified gpio_data values */ 936 /* Set the modified gpio_data values */
938 if (ha->pio_address) { 937 if (ha->pio_address) {
939 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 938 WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
940 } else { 939 } else {
941 WRT_REG_WORD(&reg->gpiod, gpio_data); 940 WRT_REG_WORD(&reg->gpiod, gpio_data);
942 RD_REG_WORD(&reg->gpiod); 941 RD_REG_WORD(&reg->gpiod);
@@ -962,14 +961,11 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
962 return QLA_FUNCTION_FAILED; 961 return QLA_FUNCTION_FAILED;
963 } 962 }
964 963
965 if (ha->pio_address)
966 reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
967
968 /* Turn off LEDs. */ 964 /* Turn off LEDs. */
969 spin_lock_irqsave(&ha->hardware_lock, flags); 965 spin_lock_irqsave(&ha->hardware_lock, flags);
970 if (ha->pio_address) { 966 if (ha->pio_address) {
971 gpio_enable = RD_REG_WORD_PIO(&reg->gpioe); 967 gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
972 gpio_data = RD_REG_WORD_PIO(&reg->gpiod); 968 gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
973 } else { 969 } else {
974 gpio_enable = RD_REG_WORD(&reg->gpioe); 970 gpio_enable = RD_REG_WORD(&reg->gpioe);
975 gpio_data = RD_REG_WORD(&reg->gpiod); 971 gpio_data = RD_REG_WORD(&reg->gpiod);
@@ -978,7 +974,7 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
978 974
979 /* Set the modified gpio_enable values. */ 975 /* Set the modified gpio_enable values. */
980 if (ha->pio_address) { 976 if (ha->pio_address) {
981 WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable); 977 WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
982 } else { 978 } else {
983 WRT_REG_WORD(&reg->gpioe, gpio_enable); 979 WRT_REG_WORD(&reg->gpioe, gpio_enable);
984 RD_REG_WORD(&reg->gpioe); 980 RD_REG_WORD(&reg->gpioe);
@@ -987,7 +983,7 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
987 /* Clear out previously set LED colour. */ 983 /* Clear out previously set LED colour. */
988 gpio_data &= ~GPIO_LED_MASK; 984 gpio_data &= ~GPIO_LED_MASK;
989 if (ha->pio_address) { 985 if (ha->pio_address) {
990 WRT_REG_WORD_PIO(&reg->gpiod, gpio_data); 986 WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
991 } else { 987 } else {
992 WRT_REG_WORD(&reg->gpiod, gpio_data); 988 WRT_REG_WORD(&reg->gpiod, gpio_data);
993 RD_REG_WORD(&reg->gpiod); 989 RD_REG_WORD(&reg->gpiod);
@@ -1244,13 +1240,12 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
1244 if (ha->pio_address) { 1240 if (ha->pio_address) {
1245 uint16_t data2; 1241 uint16_t data2;
1246 1242
1247 reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1243 WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
1248 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
1249 do { 1244 do {
1250 data = RD_REG_WORD_PIO(&reg->flash_data); 1245 data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
1251 barrier(); 1246 barrier();
1252 cpu_relax(); 1247 cpu_relax();
1253 data2 = RD_REG_WORD_PIO(&reg->flash_data); 1248 data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
1254 } while (data != data2); 1249 } while (data != data2);
1255 } else { 1250 } else {
1256 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1251 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
@@ -1304,9 +1299,8 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
1304 1299
1305 /* Always perform IO mapped accesses to the FLASH registers. */ 1300 /* Always perform IO mapped accesses to the FLASH registers. */
1306 if (ha->pio_address) { 1301 if (ha->pio_address) {
1307 reg = (struct device_reg_2xxx __iomem *)ha->pio_address; 1302 WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
1308 WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr); 1303 WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
1309 WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data);
1310 } else { 1304 } else {
1311 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr); 1305 WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
1312 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */ 1306 RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 2c2f6b4697c7..c5742cc15abb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.00-k7" 10#define QLA2XXX_VERSION "8.02.00-k8"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 49925f92555e..10b3b9a620f3 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1306,6 +1306,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1306 atomic_set(&ddb_entry->relogin_timer, 0); 1306 atomic_set(&ddb_entry->relogin_timer, 0);
1307 clear_bit(DF_RELOGIN, &ddb_entry->flags); 1307 clear_bit(DF_RELOGIN, &ddb_entry->flags);
1308 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); 1308 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
1309 iscsi_unblock_session(ddb_entry->sess);
1309 iscsi_session_event(ddb_entry->sess, 1310 iscsi_session_event(ddb_entry->sess,
1310 ISCSI_KEVENT_CREATE_SESSION); 1311 ISCSI_KEVENT_CREATE_SESSION);
1311 /* 1312 /*
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2e2b9fedffcc..c3c59d763037 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -63,8 +63,6 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
63 enum iscsi_param param, char *buf); 63 enum iscsi_param param, char *buf);
64static int qla4xxx_host_get_param(struct Scsi_Host *shost, 64static int qla4xxx_host_get_param(struct Scsi_Host *shost,
65 enum iscsi_host_param param, char *buf); 65 enum iscsi_host_param param, char *buf);
66static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
67static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
68static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); 66static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
69 67
70/* 68/*
@@ -91,6 +89,8 @@ static struct scsi_host_template qla4xxx_driver_template = {
91 .slave_alloc = qla4xxx_slave_alloc, 89 .slave_alloc = qla4xxx_slave_alloc,
92 .slave_destroy = qla4xxx_slave_destroy, 90 .slave_destroy = qla4xxx_slave_destroy,
93 91
92 .scan_finished = iscsi_scan_finished,
93
94 .this_id = -1, 94 .this_id = -1,
95 .cmd_per_lun = 3, 95 .cmd_per_lun = 3,
96 .use_clustering = ENABLE_CLUSTERING, 96 .use_clustering = ENABLE_CLUSTERING,
@@ -116,8 +116,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
116 .get_conn_param = qla4xxx_conn_get_param, 116 .get_conn_param = qla4xxx_conn_get_param,
117 .get_session_param = qla4xxx_sess_get_param, 117 .get_session_param = qla4xxx_sess_get_param,
118 .get_host_param = qla4xxx_host_get_param, 118 .get_host_param = qla4xxx_host_get_param,
119 .start_conn = qla4xxx_conn_start,
120 .stop_conn = qla4xxx_conn_stop,
121 .session_recovery_timedout = qla4xxx_recovery_timedout, 119 .session_recovery_timedout = qla4xxx_recovery_timedout,
122}; 120};
123 121
@@ -128,48 +126,19 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
128 struct ddb_entry *ddb_entry = session->dd_data; 126 struct ddb_entry *ddb_entry = session->dd_data;
129 struct scsi_qla_host *ha = ddb_entry->ha; 127 struct scsi_qla_host *ha = ddb_entry->ha;
130 128
131 DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count of (%d) " 129 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
132 "secs exhausted, marking device DEAD.\n", ha->host_no, 130 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
133 __func__, ddb_entry->fw_ddb_index,
134 ha->port_down_retry_count));
135
136 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
137
138 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = "
139 "0x%lx\n", ha->host_no, __func__, ha->dpc_flags));
140 queue_work(ha->dpc_thread, &ha->dpc_work);
141}
142
143static int qla4xxx_conn_start(struct iscsi_cls_conn *conn)
144{
145 struct iscsi_cls_session *session;
146 struct ddb_entry *ddb_entry;
147
148 session = iscsi_dev_to_session(conn->dev.parent);
149 ddb_entry = session->dd_data;
150
151 DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n",
152 ddb_entry->ha->host_no, __func__,
153 ddb_entry->fw_ddb_index));
154 iscsi_unblock_session(session);
155 return 0;
156}
157
158static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
159{
160 struct iscsi_cls_session *session;
161 struct ddb_entry *ddb_entry;
162 131
163 session = iscsi_dev_to_session(conn->dev.parent); 132 DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count "
164 ddb_entry = session->dd_data; 133 "of (%d) secs exhausted, marking device DEAD.\n",
134 ha->host_no, __func__, ddb_entry->fw_ddb_index,
135 ha->port_down_retry_count));
165 136
166 DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n", 137 DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc "
167 ddb_entry->ha->host_no, __func__, 138 "flags = 0x%lx\n",
168 ddb_entry->fw_ddb_index)); 139 ha->host_no, __func__, ha->dpc_flags));
169 if (flag == STOP_CONN_RECOVER) 140 queue_work(ha->dpc_thread, &ha->dpc_work);
170 iscsi_block_session(session); 141 }
171 else
172 printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
173} 142}
174 143
175static int qla4xxx_host_get_param(struct Scsi_Host *shost, 144static int qla4xxx_host_get_param(struct Scsi_Host *shost,
@@ -308,6 +277,9 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
308 DEBUG2(printk(KERN_ERR "Could not add connection.\n")); 277 DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
309 return -ENOMEM; 278 return -ENOMEM;
310 } 279 }
280
281 /* finally ready to go */
282 iscsi_unblock_session(ddb_entry->sess);
311 return 0; 283 return 0;
312} 284}
313 285
@@ -364,6 +336,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
364 DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n", 336 DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n",
365 ha->host_no, ddb_entry->bus, ddb_entry->target, 337 ha->host_no, ddb_entry->bus, ddb_entry->target,
366 ddb_entry->fw_ddb_index)); 338 ddb_entry->fw_ddb_index));
339 iscsi_block_session(ddb_entry->sess);
367 iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); 340 iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
368} 341}
369 342
@@ -430,9 +403,21 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
430{ 403{
431 struct scsi_qla_host *ha = to_qla_host(cmd->device->host); 404 struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
432 struct ddb_entry *ddb_entry = cmd->device->hostdata; 405 struct ddb_entry *ddb_entry = cmd->device->hostdata;
406 struct iscsi_cls_session *sess = ddb_entry->sess;
433 struct srb *srb; 407 struct srb *srb;
434 int rval; 408 int rval;
435 409
410 if (!sess) {
411 cmd->result = DID_IMM_RETRY << 16;
412 goto qc_fail_command;
413 }
414
415 rval = iscsi_session_chkready(sess);
416 if (rval) {
417 cmd->result = rval;
418 goto qc_fail_command;
419 }
420
436 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { 421 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
437 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) { 422 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
438 cmd->result = DID_NO_CONNECT << 16; 423 cmd->result = DID_NO_CONNECT << 16;
@@ -1323,7 +1308,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1323 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 1308 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
1324 ha->host_no, ha->firmware_version[0], ha->firmware_version[1], 1309 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
1325 ha->patch_number, ha->build_number); 1310 ha->patch_number, ha->build_number);
1326 1311 scsi_scan_host(host);
1327 return 0; 1312 return 0;
1328 1313
1329remove_host: 1314remove_host:
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b35d19472caa..fecba05b4e77 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -969,9 +969,10 @@ void starget_for_each_device(struct scsi_target *starget, void *data,
969EXPORT_SYMBOL(starget_for_each_device); 969EXPORT_SYMBOL(starget_for_each_device);
970 970
971/** 971/**
972 * __starget_for_each_device - helper to walk all devices of a target 972 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
973 * (UNLOCKED)
974 * @starget: target whose devices we want to iterate over. 973 * @starget: target whose devices we want to iterate over.
974 * @data: parameter for callback @fn()
975 * @fn: callback function that is invoked for each device
975 * 976 *
976 * This traverses over each device of @starget. It does _not_ 977 * This traverses over each device of @starget. It does _not_
977 * take a reference on the scsi_device, so the whole loop must be 978 * take a reference on the scsi_device, so the whole loop must be
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f243fc30c908..135c1d054701 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -301,7 +301,6 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
301 page = sg_page(sg); 301 page = sg_page(sg);
302 off = sg->offset; 302 off = sg->offset;
303 len = sg->length; 303 len = sg->length;
304 data_len += len;
305 304
306 while (len > 0 && data_len > 0) { 305 while (len > 0 && data_len > 0) {
307 /* 306 /*
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0d7b4e79415c..fac7534f3ec4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,10 +30,10 @@
30#include <scsi/scsi_transport_iscsi.h> 30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/iscsi_if.h> 31#include <scsi/iscsi_if.h>
32 32
33#define ISCSI_SESSION_ATTRS 18 33#define ISCSI_SESSION_ATTRS 19
34#define ISCSI_CONN_ATTRS 11 34#define ISCSI_CONN_ATTRS 13
35#define ISCSI_HOST_ATTRS 4 35#define ISCSI_HOST_ATTRS 4
36#define ISCSI_TRANSPORT_VERSION "2.0-867" 36#define ISCSI_TRANSPORT_VERSION "2.0-868"
37 37
38struct iscsi_internal { 38struct iscsi_internal {
39 int daemon_pid; 39 int daemon_pid;
@@ -127,12 +127,13 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
127 memset(ihost, 0, sizeof(*ihost)); 127 memset(ihost, 0, sizeof(*ihost));
128 INIT_LIST_HEAD(&ihost->sessions); 128 INIT_LIST_HEAD(&ihost->sessions);
129 mutex_init(&ihost->mutex); 129 mutex_init(&ihost->mutex);
130 atomic_set(&ihost->nr_scans, 0);
130 131
131 snprintf(ihost->unbind_workq_name, KOBJ_NAME_LEN, "iscsi_unbind_%d", 132 snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
132 shost->host_no); 133 shost->host_no);
133 ihost->unbind_workq = create_singlethread_workqueue( 134 ihost->scan_workq = create_singlethread_workqueue(
134 ihost->unbind_workq_name); 135 ihost->scan_workq_name);
135 if (!ihost->unbind_workq) 136 if (!ihost->scan_workq)
136 return -ENOMEM; 137 return -ENOMEM;
137 return 0; 138 return 0;
138} 139}
@@ -143,7 +144,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
143 struct Scsi_Host *shost = dev_to_shost(dev); 144 struct Scsi_Host *shost = dev_to_shost(dev);
144 struct iscsi_host *ihost = shost->shost_data; 145 struct iscsi_host *ihost = shost->shost_data;
145 146
146 destroy_workqueue(ihost->unbind_workq); 147 destroy_workqueue(ihost->scan_workq);
147 return 0; 148 return 0;
148} 149}
149 150
@@ -221,6 +222,54 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
221 * The following functions can be used by LLDs that allocate 222 * The following functions can be used by LLDs that allocate
222 * their own scsi_hosts or by software iscsi LLDs 223 * their own scsi_hosts or by software iscsi LLDs
223 */ 224 */
225static struct {
226 int value;
227 char *name;
228} iscsi_session_state_names[] = {
229 { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
230 { ISCSI_SESSION_FAILED, "FAILED" },
231 { ISCSI_SESSION_FREE, "FREE" },
232};
233
234const char *iscsi_session_state_name(int state)
235{
236 int i;
237 char *name = NULL;
238
239 for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
240 if (iscsi_session_state_names[i].value == state) {
241 name = iscsi_session_state_names[i].name;
242 break;
243 }
244 }
245 return name;
246}
247
248int iscsi_session_chkready(struct iscsi_cls_session *session)
249{
250 unsigned long flags;
251 int err;
252
253 spin_lock_irqsave(&session->lock, flags);
254 switch (session->state) {
255 case ISCSI_SESSION_LOGGED_IN:
256 err = 0;
257 break;
258 case ISCSI_SESSION_FAILED:
259 err = DID_IMM_RETRY << 16;
260 break;
261 case ISCSI_SESSION_FREE:
262 err = DID_NO_CONNECT << 16;
263 break;
264 default:
265 err = DID_NO_CONNECT << 16;
266 break;
267 }
268 spin_unlock_irqrestore(&session->lock, flags);
269 return err;
270}
271EXPORT_SYMBOL_GPL(iscsi_session_chkready);
272
224static void iscsi_session_release(struct device *dev) 273static void iscsi_session_release(struct device *dev)
225{ 274{
226 struct iscsi_cls_session *session = iscsi_dev_to_session(dev); 275 struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
@@ -236,6 +285,25 @@ static int iscsi_is_session_dev(const struct device *dev)
236 return dev->release == iscsi_session_release; 285 return dev->release == iscsi_session_release;
237} 286}
238 287
288/**
289 * iscsi_scan_finished - helper to report when running scans are done
290 * @shost: scsi host
291 * @time: scan run time
292 *
293 * This function can be used by drives like qla4xxx to report to the scsi
294 * layer when the scans it kicked off at module load time are done.
295 */
296int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
297{
298 struct iscsi_host *ihost = shost->shost_data;
299 /*
300 * qla4xxx will have kicked off some session unblocks before calling
301 * scsi_scan_host, so just wait for them to complete.
302 */
303 return !atomic_read(&ihost->nr_scans);
304}
305EXPORT_SYMBOL_GPL(iscsi_scan_finished);
306
239static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, 307static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
240 uint id, uint lun) 308 uint id, uint lun)
241{ 309{
@@ -254,14 +322,50 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
254 return 0; 322 return 0;
255} 323}
256 324
325static void iscsi_scan_session(struct work_struct *work)
326{
327 struct iscsi_cls_session *session =
328 container_of(work, struct iscsi_cls_session, scan_work);
329 struct Scsi_Host *shost = iscsi_session_to_shost(session);
330 struct iscsi_host *ihost = shost->shost_data;
331 unsigned long flags;
332
333 spin_lock_irqsave(&session->lock, flags);
334 if (session->state != ISCSI_SESSION_LOGGED_IN) {
335 spin_unlock_irqrestore(&session->lock, flags);
336 goto done;
337 }
338 spin_unlock_irqrestore(&session->lock, flags);
339
340 scsi_scan_target(&session->dev, 0, session->target_id,
341 SCAN_WILD_CARD, 1);
342done:
343 atomic_dec(&ihost->nr_scans);
344}
345
257static void session_recovery_timedout(struct work_struct *work) 346static void session_recovery_timedout(struct work_struct *work)
258{ 347{
259 struct iscsi_cls_session *session = 348 struct iscsi_cls_session *session =
260 container_of(work, struct iscsi_cls_session, 349 container_of(work, struct iscsi_cls_session,
261 recovery_work.work); 350 recovery_work.work);
351 unsigned long flags;
352
353 iscsi_cls_session_printk(KERN_INFO, session,
354 "session recovery timed out after %d secs\n",
355 session->recovery_tmo);
262 356
263 dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " 357 spin_lock_irqsave(&session->lock, flags);
264 "out after %d secs\n", session->recovery_tmo); 358 switch (session->state) {
359 case ISCSI_SESSION_FAILED:
360 session->state = ISCSI_SESSION_FREE;
361 break;
362 case ISCSI_SESSION_LOGGED_IN:
363 case ISCSI_SESSION_FREE:
364 /* we raced with the unblock's flush */
365 spin_unlock_irqrestore(&session->lock, flags);
366 return;
367 }
368 spin_unlock_irqrestore(&session->lock, flags);
265 369
266 if (session->transport->session_recovery_timedout) 370 if (session->transport->session_recovery_timedout)
267 session->transport->session_recovery_timedout(session); 371 session->transport->session_recovery_timedout(session);
@@ -269,16 +373,44 @@ static void session_recovery_timedout(struct work_struct *work)
269 scsi_target_unblock(&session->dev); 373 scsi_target_unblock(&session->dev);
270} 374}
271 375
272void iscsi_unblock_session(struct iscsi_cls_session *session) 376void __iscsi_unblock_session(struct iscsi_cls_session *session)
273{ 377{
274 if (!cancel_delayed_work(&session->recovery_work)) 378 if (!cancel_delayed_work(&session->recovery_work))
275 flush_workqueue(iscsi_eh_timer_workq); 379 flush_workqueue(iscsi_eh_timer_workq);
276 scsi_target_unblock(&session->dev); 380 scsi_target_unblock(&session->dev);
277} 381}
382
383void iscsi_unblock_session(struct iscsi_cls_session *session)
384{
385 struct Scsi_Host *shost = iscsi_session_to_shost(session);
386 struct iscsi_host *ihost = shost->shost_data;
387 unsigned long flags;
388
389 spin_lock_irqsave(&session->lock, flags);
390 session->state = ISCSI_SESSION_LOGGED_IN;
391 spin_unlock_irqrestore(&session->lock, flags);
392
393 __iscsi_unblock_session(session);
394 /*
395 * Only do kernel scanning if the driver is properly hooked into
396 * the async scanning code (drivers like iscsi_tcp do login and
397 * scanning from userspace).
398 */
399 if (shost->hostt->scan_finished) {
400 if (queue_work(ihost->scan_workq, &session->scan_work))
401 atomic_inc(&ihost->nr_scans);
402 }
403}
278EXPORT_SYMBOL_GPL(iscsi_unblock_session); 404EXPORT_SYMBOL_GPL(iscsi_unblock_session);
279 405
280void iscsi_block_session(struct iscsi_cls_session *session) 406void iscsi_block_session(struct iscsi_cls_session *session)
281{ 407{
408 unsigned long flags;
409
410 spin_lock_irqsave(&session->lock, flags);
411 session->state = ISCSI_SESSION_FAILED;
412 spin_unlock_irqrestore(&session->lock, flags);
413
282 scsi_target_block(&session->dev); 414 scsi_target_block(&session->dev);
283 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, 415 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
284 session->recovery_tmo * HZ); 416 session->recovery_tmo * HZ);
@@ -311,7 +443,7 @@ static int iscsi_unbind_session(struct iscsi_cls_session *session)
311 struct Scsi_Host *shost = iscsi_session_to_shost(session); 443 struct Scsi_Host *shost = iscsi_session_to_shost(session);
312 struct iscsi_host *ihost = shost->shost_data; 444 struct iscsi_host *ihost = shost->shost_data;
313 445
314 return queue_work(ihost->unbind_workq, &session->unbind_work); 446 return queue_work(ihost->scan_workq, &session->unbind_work);
315} 447}
316 448
317struct iscsi_cls_session * 449struct iscsi_cls_session *
@@ -327,10 +459,13 @@ iscsi_alloc_session(struct Scsi_Host *shost,
327 459
328 session->transport = transport; 460 session->transport = transport;
329 session->recovery_tmo = 120; 461 session->recovery_tmo = 120;
462 session->state = ISCSI_SESSION_FREE;
330 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 463 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
331 INIT_LIST_HEAD(&session->host_list); 464 INIT_LIST_HEAD(&session->host_list);
332 INIT_LIST_HEAD(&session->sess_list); 465 INIT_LIST_HEAD(&session->sess_list);
333 INIT_WORK(&session->unbind_work, __iscsi_unbind_session); 466 INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
467 INIT_WORK(&session->scan_work, iscsi_scan_session);
468 spin_lock_init(&session->lock);
334 469
335 /* this is released in the dev's release function */ 470 /* this is released in the dev's release function */
336 scsi_host_get(shost); 471 scsi_host_get(shost);
@@ -358,8 +493,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
358 session->sid); 493 session->sid);
359 err = device_add(&session->dev); 494 err = device_add(&session->dev);
360 if (err) { 495 if (err) {
361 dev_printk(KERN_ERR, &session->dev, "iscsi: could not " 496 iscsi_cls_session_printk(KERN_ERR, session,
362 "register session's dev\n"); 497 "could not register session's dev\n");
363 goto release_host; 498 goto release_host;
364 } 499 }
365 transport_register_device(&session->dev); 500 transport_register_device(&session->dev);
@@ -444,22 +579,28 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
444 * If we are blocked let commands flow again. The lld or iscsi 579 * If we are blocked let commands flow again. The lld or iscsi
445 * layer should set up the queuecommand to fail commands. 580 * layer should set up the queuecommand to fail commands.
446 */ 581 */
447 iscsi_unblock_session(session); 582 spin_lock_irqsave(&session->lock, flags);
448 iscsi_unbind_session(session); 583 session->state = ISCSI_SESSION_FREE;
584 spin_unlock_irqrestore(&session->lock, flags);
585 __iscsi_unblock_session(session);
586 __iscsi_unbind_session(&session->unbind_work);
587
588 /* flush running scans */
589 flush_workqueue(ihost->scan_workq);
449 /* 590 /*
450 * If the session dropped while removing devices then we need to make 591 * If the session dropped while removing devices then we need to make
451 * sure it is not blocked 592 * sure it is not blocked
452 */ 593 */
453 if (!cancel_delayed_work(&session->recovery_work)) 594 if (!cancel_delayed_work(&session->recovery_work))
454 flush_workqueue(iscsi_eh_timer_workq); 595 flush_workqueue(iscsi_eh_timer_workq);
455 flush_workqueue(ihost->unbind_workq);
456 596
457 /* hw iscsi may not have removed all connections from session */ 597 /* hw iscsi may not have removed all connections from session */
458 err = device_for_each_child(&session->dev, NULL, 598 err = device_for_each_child(&session->dev, NULL,
459 iscsi_iter_destroy_conn_fn); 599 iscsi_iter_destroy_conn_fn);
460 if (err) 600 if (err)
461 dev_printk(KERN_ERR, &session->dev, "iscsi: Could not delete " 601 iscsi_cls_session_printk(KERN_ERR, session,
462 "all connections for session. Error %d.\n", err); 602 "Could not delete all connections "
603 "for session. Error %d.\n", err);
463 604
464 transport_unregister_device(&session->dev); 605 transport_unregister_device(&session->dev);
465 device_del(&session->dev); 606 device_del(&session->dev);
@@ -531,8 +672,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
531 conn->dev.release = iscsi_conn_release; 672 conn->dev.release = iscsi_conn_release;
532 err = device_register(&conn->dev); 673 err = device_register(&conn->dev);
533 if (err) { 674 if (err) {
534 dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register " 675 iscsi_cls_session_printk(KERN_ERR, session, "could not "
535 "connection's dev\n"); 676 "register connection's dev\n");
536 goto release_parent_ref; 677 goto release_parent_ref;
537 } 678 }
538 transport_register_device(&conn->dev); 679 transport_register_device(&conn->dev);
@@ -639,8 +780,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
639 skb = alloc_skb(len, GFP_ATOMIC); 780 skb = alloc_skb(len, GFP_ATOMIC);
640 if (!skb) { 781 if (!skb) {
641 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED); 782 iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
642 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver " 783 iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
643 "control PDU: OOM\n"); 784 "control PDU: OOM\n");
644 return -ENOMEM; 785 return -ENOMEM;
645 } 786 }
646 787
@@ -661,20 +802,27 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
661 802
662void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) 803void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
663{ 804{
805 struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
664 struct nlmsghdr *nlh; 806 struct nlmsghdr *nlh;
665 struct sk_buff *skb; 807 struct sk_buff *skb;
666 struct iscsi_uevent *ev; 808 struct iscsi_uevent *ev;
667 struct iscsi_internal *priv; 809 struct iscsi_internal *priv;
668 int len = NLMSG_SPACE(sizeof(*ev)); 810 int len = NLMSG_SPACE(sizeof(*ev));
811 unsigned long flags;
669 812
670 priv = iscsi_if_transport_lookup(conn->transport); 813 priv = iscsi_if_transport_lookup(conn->transport);
671 if (!priv) 814 if (!priv)
672 return; 815 return;
673 816
817 spin_lock_irqsave(&session->lock, flags);
818 if (session->state == ISCSI_SESSION_LOGGED_IN)
819 session->state = ISCSI_SESSION_FAILED;
820 spin_unlock_irqrestore(&session->lock, flags);
821
674 skb = alloc_skb(len, GFP_ATOMIC); 822 skb = alloc_skb(len, GFP_ATOMIC);
675 if (!skb) { 823 if (!skb) {
676 dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored " 824 iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
677 "conn error (%d)\n", error); 825 "conn error (%d)\n", error);
678 return; 826 return;
679 } 827 }
680 828
@@ -688,8 +836,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
688 836
689 iscsi_broadcast_skb(skb, GFP_ATOMIC); 837 iscsi_broadcast_skb(skb, GFP_ATOMIC);
690 838
691 dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n", 839 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
692 error); 840 error);
693} 841}
694EXPORT_SYMBOL_GPL(iscsi_conn_error); 842EXPORT_SYMBOL_GPL(iscsi_conn_error);
695 843
@@ -744,8 +892,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
744 892
745 skbstat = alloc_skb(len, GFP_ATOMIC); 893 skbstat = alloc_skb(len, GFP_ATOMIC);
746 if (!skbstat) { 894 if (!skbstat) {
747 dev_printk(KERN_ERR, &conn->dev, "iscsi: can not " 895 iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
748 "deliver stats: OOM\n"); 896 "deliver stats: OOM\n");
749 return -ENOMEM; 897 return -ENOMEM;
750 } 898 }
751 899
@@ -801,8 +949,9 @@ int iscsi_session_event(struct iscsi_cls_session *session,
801 949
802 skb = alloc_skb(len, GFP_KERNEL); 950 skb = alloc_skb(len, GFP_KERNEL);
803 if (!skb) { 951 if (!skb) {
804 dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace " 952 iscsi_cls_session_printk(KERN_ERR, session,
805 "of session event %u\n", event); 953 "Cannot notify userspace of session "
954 "event %u\n", event);
806 return -ENOMEM; 955 return -ENOMEM;
807 } 956 }
808 957
@@ -825,8 +974,8 @@ int iscsi_session_event(struct iscsi_cls_session *session,
825 ev->r.unbind_session.sid = session->sid; 974 ev->r.unbind_session.sid = session->sid;
826 break; 975 break;
827 default: 976 default:
828 dev_printk(KERN_ERR, &session->dev, "Invalid event %u.\n", 977 iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
829 event); 978 "%u.\n", event);
830 kfree_skb(skb); 979 kfree_skb(skb);
831 return -EINVAL; 980 return -EINVAL;
832 } 981 }
@@ -837,8 +986,10 @@ int iscsi_session_event(struct iscsi_cls_session *session,
837 */ 986 */
838 rc = iscsi_broadcast_skb(skb, GFP_KERNEL); 987 rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
839 if (rc < 0) 988 if (rc < 0)
840 dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace " 989 iscsi_cls_session_printk(KERN_ERR, session,
841 "of session event %u. Check iscsi daemon\n", event); 990 "Cannot notify userspace of session "
991 "event %u. Check iscsi daemon\n",
992 event);
842 return rc; 993 return rc;
843} 994}
844EXPORT_SYMBOL_GPL(iscsi_session_event); 995EXPORT_SYMBOL_GPL(iscsi_session_event);
@@ -871,16 +1022,15 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
871 1022
872 session = iscsi_session_lookup(ev->u.c_conn.sid); 1023 session = iscsi_session_lookup(ev->u.c_conn.sid);
873 if (!session) { 1024 if (!session) {
874 printk(KERN_ERR "iscsi: invalid session %d\n", 1025 printk(KERN_ERR "iscsi: invalid session %d.\n",
875 ev->u.c_conn.sid); 1026 ev->u.c_conn.sid);
876 return -EINVAL; 1027 return -EINVAL;
877 } 1028 }
878 1029
879 conn = transport->create_conn(session, ev->u.c_conn.cid); 1030 conn = transport->create_conn(session, ev->u.c_conn.cid);
880 if (!conn) { 1031 if (!conn) {
881 printk(KERN_ERR "iscsi: couldn't create a new " 1032 iscsi_cls_session_printk(KERN_ERR, session,
882 "connection for session %d\n", 1033 "couldn't create a new connection.");
883 session->sid);
884 return -ENOMEM; 1034 return -ENOMEM;
885 } 1035 }
886 1036
@@ -1246,6 +1396,15 @@ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
1246iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); 1396iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
1247iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); 1397iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
1248 1398
1399static ssize_t
1400show_priv_session_state(struct class_device *cdev, char *buf)
1401{
1402 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
1403 return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
1404}
1405static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
1406 NULL);
1407
1249#define iscsi_priv_session_attr_show(field, format) \ 1408#define iscsi_priv_session_attr_show(field, format) \
1250static ssize_t \ 1409static ssize_t \
1251show_priv_session_##field(struct class_device *cdev, char *buf) \ 1410show_priv_session_##field(struct class_device *cdev, char *buf) \
@@ -1472,6 +1631,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
1472 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); 1631 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
1473 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); 1632 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
1474 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1633 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
1634 SETUP_PRIV_SESSION_RD_ATTR(state);
1475 1635
1476 BUG_ON(count > ISCSI_SESSION_ATTRS); 1636 BUG_ON(count > ISCSI_SESSION_ATTRS);
1477 priv->session_attrs[count] = NULL; 1637 priv->session_attrs[count] = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 51a5557f42dd..37df8bbe7f46 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -929,6 +929,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
929 unsigned int xfer_size = scsi_bufflen(SCpnt); 929 unsigned int xfer_size = scsi_bufflen(SCpnt);
930 unsigned int good_bytes = result ? 0 : xfer_size; 930 unsigned int good_bytes = result ? 0 : xfer_size;
931 u64 start_lba = SCpnt->request->sector; 931 u64 start_lba = SCpnt->request->sector;
932 u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
932 u64 bad_lba; 933 u64 bad_lba;
933 struct scsi_sense_hdr sshdr; 934 struct scsi_sense_hdr sshdr;
934 int sense_valid = 0; 935 int sense_valid = 0;
@@ -967,26 +968,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
967 goto out; 968 goto out;
968 if (xfer_size <= SCpnt->device->sector_size) 969 if (xfer_size <= SCpnt->device->sector_size)
969 goto out; 970 goto out;
970 switch (SCpnt->device->sector_size) { 971 if (SCpnt->device->sector_size < 512) {
971 case 256: 972 /* only legitimate sector_size here is 256 */
972 start_lba <<= 1; 973 start_lba <<= 1;
973 break; 974 end_lba <<= 1;
974 case 512: 975 } else {
975 break; 976 /* be careful ... don't want any overflows */
976 case 1024: 977 u64 factor = SCpnt->device->sector_size / 512;
977 start_lba >>= 1; 978 do_div(start_lba, factor);
978 break; 979 do_div(end_lba, factor);
979 case 2048:
980 start_lba >>= 2;
981 break;
982 case 4096:
983 start_lba >>= 3;
984 break;
985 default:
986 /* Print something here with limiting frequency. */
987 goto out;
988 break;
989 } 980 }
981
982 if (bad_lba < start_lba || bad_lba >= end_lba)
983 /* the bad lba was reported incorrectly, we have
984 * no idea where the error is
985 */
986 goto out;
987
990 /* This computation should always be done in terms of 988 /* This computation should always be done in terms of
991 * the resolution of the device's medium. 989 * the resolution of the device's medium.
992 */ 990 */
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
new file mode 100644
index 000000000000..2a6e4f472eaa
--- /dev/null
+++ b/drivers/scsi/ses.c
@@ -0,0 +1,689 @@
1/*
2 * SCSI Enclosure Services
3 *
4 * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
5 *
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or
9** modify it under the terms of the GNU General Public License
10** version 2 as published by the Free Software Foundation.
11**
12** This program is distributed in the hope that it will be useful,
13** but WITHOUT ANY WARRANTY; without even the implied warranty of
14** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15** GNU General Public License for more details.
16**
17** You should have received a copy of the GNU General Public License
18** along with this program; if not, write to the Free Software
19** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20**
21**-----------------------------------------------------------------------------
22*/
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/enclosure.h>
27
28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_dbg.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_driver.h>
33#include <scsi/scsi_host.h>
34
35struct ses_device {
36 char *page1;
37 char *page2;
38 char *page10;
39 short page1_len;
40 short page2_len;
41 short page10_len;
42};
43
44struct ses_component {
45 u64 addr;
46 unsigned char *desc;
47};
48
49static int ses_probe(struct device *dev)
50{
51 struct scsi_device *sdev = to_scsi_device(dev);
52 int err = -ENODEV;
53
54 if (sdev->type != TYPE_ENCLOSURE)
55 goto out;
56
57 err = 0;
58 sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n");
59
60 out:
61 return err;
62}
63
64#define SES_TIMEOUT 30
65#define SES_RETRIES 3
66
67static int ses_recv_diag(struct scsi_device *sdev, int page_code,
68 void *buf, int bufflen)
69{
70 char cmd[] = {
71 RECEIVE_DIAGNOSTIC,
72 1, /* Set PCV bit */
73 page_code,
74 bufflen >> 8,
75 bufflen & 0xff,
76 0
77 };
78
79 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
80 NULL, SES_TIMEOUT, SES_RETRIES);
81}
82
83static int ses_send_diag(struct scsi_device *sdev, int page_code,
84 void *buf, int bufflen)
85{
86 u32 result;
87
88 char cmd[] = {
89 SEND_DIAGNOSTIC,
90 0x10, /* Set PF bit */
91 0,
92 bufflen >> 8,
93 bufflen & 0xff,
94 0
95 };
96
97 result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
98 NULL, SES_TIMEOUT, SES_RETRIES);
99 if (result)
100 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
101 result);
102 return result;
103}
104
105static int ses_set_page2_descriptor(struct enclosure_device *edev,
106 struct enclosure_component *ecomp,
107 char *desc)
108{
109 int i, j, count = 0, descriptor = ecomp->number;
110 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
111 struct ses_device *ses_dev = edev->scratch;
112 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
113 char *desc_ptr = ses_dev->page2 + 8;
114
115 /* Clear everything */
116 memset(desc_ptr, 0, ses_dev->page2_len - 8);
117 for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
118 for (j = 0; j < type_ptr[1]; j++) {
119 desc_ptr += 4;
120 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
121 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
122 continue;
123 if (count++ == descriptor) {
124 memcpy(desc_ptr, desc, 4);
125 /* set select */
126 desc_ptr[0] |= 0x80;
127 /* clear reserved, just in case */
128 desc_ptr[0] &= 0xf0;
129 }
130 }
131 }
132
133 return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
134}
135
136static char *ses_get_page2_descriptor(struct enclosure_device *edev,
137 struct enclosure_component *ecomp)
138{
139 int i, j, count = 0, descriptor = ecomp->number;
140 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
141 struct ses_device *ses_dev = edev->scratch;
142 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
143 char *desc_ptr = ses_dev->page2 + 8;
144
145 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
146
147 for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
148 for (j = 0; j < type_ptr[1]; j++) {
149 desc_ptr += 4;
150 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
151 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
152 continue;
153 if (count++ == descriptor)
154 return desc_ptr;
155 }
156 }
157 return NULL;
158}
159
160static void ses_get_fault(struct enclosure_device *edev,
161 struct enclosure_component *ecomp)
162{
163 char *desc;
164
165 desc = ses_get_page2_descriptor(edev, ecomp);
166 ecomp->fault = (desc[3] & 0x60) >> 4;
167}
168
169static int ses_set_fault(struct enclosure_device *edev,
170 struct enclosure_component *ecomp,
171 enum enclosure_component_setting val)
172{
173 char desc[4] = {0 };
174
175 switch (val) {
176 case ENCLOSURE_SETTING_DISABLED:
177 /* zero is disabled */
178 break;
179 case ENCLOSURE_SETTING_ENABLED:
180 desc[2] = 0x02;
181 break;
182 default:
183 /* SES doesn't do the SGPIO blink settings */
184 return -EINVAL;
185 }
186
187 return ses_set_page2_descriptor(edev, ecomp, desc);
188}
189
190static void ses_get_status(struct enclosure_device *edev,
191 struct enclosure_component *ecomp)
192{
193 char *desc;
194
195 desc = ses_get_page2_descriptor(edev, ecomp);
196 ecomp->status = (desc[0] & 0x0f);
197}
198
199static void ses_get_locate(struct enclosure_device *edev,
200 struct enclosure_component *ecomp)
201{
202 char *desc;
203
204 desc = ses_get_page2_descriptor(edev, ecomp);
205 ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
206}
207
208static int ses_set_locate(struct enclosure_device *edev,
209 struct enclosure_component *ecomp,
210 enum enclosure_component_setting val)
211{
212 char desc[4] = {0 };
213
214 switch (val) {
215 case ENCLOSURE_SETTING_DISABLED:
216 /* zero is disabled */
217 break;
218 case ENCLOSURE_SETTING_ENABLED:
219 desc[2] = 0x02;
220 break;
221 default:
222 /* SES doesn't do the SGPIO blink settings */
223 return -EINVAL;
224 }
225 return ses_set_page2_descriptor(edev, ecomp, desc);
226}
227
228static int ses_set_active(struct enclosure_device *edev,
229 struct enclosure_component *ecomp,
230 enum enclosure_component_setting val)
231{
232 char desc[4] = {0 };
233
234 switch (val) {
235 case ENCLOSURE_SETTING_DISABLED:
236 /* zero is disabled */
237 ecomp->active = 0;
238 break;
239 case ENCLOSURE_SETTING_ENABLED:
240 desc[2] = 0x80;
241 ecomp->active = 1;
242 break;
243 default:
244 /* SES doesn't do the SGPIO blink settings */
245 return -EINVAL;
246 }
247 return ses_set_page2_descriptor(edev, ecomp, desc);
248}
249
250static struct enclosure_component_callbacks ses_enclosure_callbacks = {
251 .get_fault = ses_get_fault,
252 .set_fault = ses_set_fault,
253 .get_status = ses_get_status,
254 .get_locate = ses_get_locate,
255 .set_locate = ses_set_locate,
256 .set_active = ses_set_active,
257};
258
259struct ses_host_edev {
260 struct Scsi_Host *shost;
261 struct enclosure_device *edev;
262};
263
264int ses_match_host(struct enclosure_device *edev, void *data)
265{
266 struct ses_host_edev *sed = data;
267 struct scsi_device *sdev;
268
269 if (!scsi_is_sdev_device(edev->cdev.dev))
270 return 0;
271
272 sdev = to_scsi_device(edev->cdev.dev);
273
274 if (sdev->host != sed->shost)
275 return 0;
276
277 sed->edev = edev;
278 return 1;
279}
280
281static void ses_process_descriptor(struct enclosure_component *ecomp,
282 unsigned char *desc)
283{
284 int eip = desc[0] & 0x10;
285 int invalid = desc[0] & 0x80;
286 enum scsi_protocol proto = desc[0] & 0x0f;
287 u64 addr = 0;
288 struct ses_component *scomp = ecomp->scratch;
289 unsigned char *d;
290
291 scomp->desc = desc;
292
293 if (invalid)
294 return;
295
296 switch (proto) {
297 case SCSI_PROTOCOL_SAS:
298 if (eip)
299 d = desc + 8;
300 else
301 d = desc + 4;
302 /* only take the phy0 addr */
303 addr = (u64)d[12] << 56 |
304 (u64)d[13] << 48 |
305 (u64)d[14] << 40 |
306 (u64)d[15] << 32 |
307 (u64)d[16] << 24 |
308 (u64)d[17] << 16 |
309 (u64)d[18] << 8 |
310 (u64)d[19];
311 break;
312 default:
313 /* FIXME: Need to add more protocols than just SAS */
314 break;
315 }
316 scomp->addr = addr;
317}
318
319struct efd {
320 u64 addr;
321 struct device *dev;
322};
323
324static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
325 void *data)
326{
327 struct efd *efd = data;
328 int i;
329 struct ses_component *scomp;
330
331 if (!edev->component[0].scratch)
332 return 0;
333
334 for (i = 0; i < edev->components; i++) {
335 scomp = edev->component[i].scratch;
336 if (scomp->addr != efd->addr)
337 continue;
338
339 enclosure_add_device(edev, i, efd->dev);
340 return 1;
341 }
342 return 0;
343}
344
345#define VPD_INQUIRY_SIZE 512
346
347static void ses_match_to_enclosure(struct enclosure_device *edev,
348 struct scsi_device *sdev)
349{
350 unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
351 unsigned char *desc;
352 int len;
353 struct efd efd = {
354 .addr = 0,
355 };
356 unsigned char cmd[] = {
357 INQUIRY,
358 1,
359 0x83,
360 VPD_INQUIRY_SIZE >> 8,
361 VPD_INQUIRY_SIZE & 0xff,
362 0
363 };
364
365 if (!buf)
366 return;
367
368 if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
369 VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
370 goto free;
371
372 len = (buf[2] << 8) + buf[3];
373 desc = buf + 4;
374 while (desc < buf + len) {
375 enum scsi_protocol proto = desc[0] >> 4;
376 u8 code_set = desc[0] & 0x0f;
377 u8 piv = desc[1] & 0x80;
378 u8 assoc = (desc[1] & 0x30) >> 4;
379 u8 type = desc[1] & 0x0f;
380 u8 len = desc[3];
381
382 if (piv && code_set == 1 && assoc == 1 && code_set == 1
383 && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
384 efd.addr = (u64)desc[4] << 56 |
385 (u64)desc[5] << 48 |
386 (u64)desc[6] << 40 |
387 (u64)desc[7] << 32 |
388 (u64)desc[8] << 24 |
389 (u64)desc[9] << 16 |
390 (u64)desc[10] << 8 |
391 (u64)desc[11];
392
393 desc += len + 4;
394 }
395 if (!efd.addr)
396 goto free;
397
398 efd.dev = &sdev->sdev_gendev;
399
400 enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
401 free:
402 kfree(buf);
403}
404
405#define INIT_ALLOC_SIZE 32
406
407static int ses_intf_add(struct class_device *cdev,
408 struct class_interface *intf)
409{
410 struct scsi_device *sdev = to_scsi_device(cdev->dev);
411 struct scsi_device *tmp_sdev;
412 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr,
413 *addl_desc_ptr;
414 struct ses_device *ses_dev;
415 u32 result;
416 int i, j, types, len, components = 0;
417 int err = -ENOMEM;
418 struct enclosure_device *edev;
419 struct ses_component *scomp;
420
421 if (!scsi_device_enclosure(sdev)) {
422 /* not an enclosure, but might be in one */
423 edev = enclosure_find(&sdev->host->shost_gendev);
424 if (edev) {
425 ses_match_to_enclosure(edev, sdev);
426 class_device_put(&edev->cdev);
427 }
428 return -ENODEV;
429 }
430
431 /* TYPE_ENCLOSURE prints a message in probe */
432 if (sdev->type != TYPE_ENCLOSURE)
433 sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n");
434
435 ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL);
436 hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
437 if (!hdr_buf || !ses_dev)
438 goto err_init_free;
439
440 result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE);
441 if (result)
442 goto recv_failed;
443
444 if (hdr_buf[1] != 0) {
445 /* FIXME: need subenclosure support; I've just never
446 * seen a device with subenclosures and it makes the
447 * traversal routines more complex */
448 sdev_printk(KERN_ERR, sdev,
449 "FIXME driver has no support for subenclosures (%d)\n",
450 buf[1]);
451 goto err_free;
452 }
453
454 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
455 buf = kzalloc(len, GFP_KERNEL);
456 if (!buf)
457 goto err_free;
458
459 ses_dev->page1 = buf;
460 ses_dev->page1_len = len;
461
462 result = ses_recv_diag(sdev, 1, buf, len);
463 if (result)
464 goto recv_failed;
465
466 types = buf[10];
467 len = buf[11];
468
469 type_ptr = buf + 12 + len;
470
471 for (i = 0; i < types; i++, type_ptr += 4) {
472 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
473 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
474 components += type_ptr[1];
475 }
476
477 result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
478 if (result)
479 goto recv_failed;
480
481 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
482 buf = kzalloc(len, GFP_KERNEL);
483 if (!buf)
484 goto err_free;
485
486 /* make sure getting page 2 actually works */
487 result = ses_recv_diag(sdev, 2, buf, len);
488 if (result)
489 goto recv_failed;
490 ses_dev->page2 = buf;
491 ses_dev->page2_len = len;
492
493 /* The additional information page --- allows us
494 * to match up the devices */
495 result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
496 if (result)
497 goto no_page10;
498
499 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
500 buf = kzalloc(len, GFP_KERNEL);
501 if (!buf)
502 goto err_free;
503
504 result = ses_recv_diag(sdev, 10, buf, len);
505 if (result)
506 goto recv_failed;
507 ses_dev->page10 = buf;
508 ses_dev->page10_len = len;
509
510 no_page10:
511 scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
512 if (!scomp)
513 goto err_free;
514
515 edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id,
516 components, &ses_enclosure_callbacks);
517 if (IS_ERR(edev)) {
518 err = PTR_ERR(edev);
519 goto err_free;
520 }
521
522 edev->scratch = ses_dev;
523 for (i = 0; i < components; i++)
524 edev->component[i].scratch = scomp++;
525
526 /* Page 7 for the descriptors is optional */
527 buf = NULL;
528 result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
529 if (result)
530 goto simple_populate;
531
532 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
533 /* add 1 for trailing '\0' we'll use */
534 buf = kzalloc(len + 1, GFP_KERNEL);
535 result = ses_recv_diag(sdev, 7, buf, len);
536 if (result) {
537 simple_populate:
538 kfree(buf);
539 buf = NULL;
540 desc_ptr = NULL;
541 addl_desc_ptr = NULL;
542 } else {
543 desc_ptr = buf + 8;
544 len = (desc_ptr[2] << 8) + desc_ptr[3];
545 /* skip past overall descriptor */
546 desc_ptr += len + 4;
547 addl_desc_ptr = ses_dev->page10 + 8;
548 }
549 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
550 components = 0;
551 for (i = 0; i < types; i++, type_ptr += 4) {
552 for (j = 0; j < type_ptr[1]; j++) {
553 char *name = NULL;
554 struct enclosure_component *ecomp;
555
556 if (desc_ptr) {
557 len = (desc_ptr[2] << 8) + desc_ptr[3];
558 desc_ptr += 4;
559 /* Add trailing zero - pushes into
560 * reserved space */
561 desc_ptr[len] = '\0';
562 name = desc_ptr;
563 }
564 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
565 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
566 continue;
567 ecomp = enclosure_component_register(edev,
568 components++,
569 type_ptr[0],
570 name);
571 if (desc_ptr) {
572 desc_ptr += len;
573 if (!IS_ERR(ecomp))
574 ses_process_descriptor(ecomp,
575 addl_desc_ptr);
576
577 if (addl_desc_ptr)
578 addl_desc_ptr += addl_desc_ptr[1] + 2;
579 }
580 }
581 }
582 kfree(buf);
583 kfree(hdr_buf);
584
585 /* see if there are any devices matching before
586 * we found the enclosure */
587 shost_for_each_device(tmp_sdev, sdev->host) {
588 if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
589 continue;
590 ses_match_to_enclosure(edev, tmp_sdev);
591 }
592
593 return 0;
594
595 recv_failed:
596 sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n",
597 result);
598 err = -ENODEV;
599 err_free:
600 kfree(buf);
601 kfree(ses_dev->page10);
602 kfree(ses_dev->page2);
603 kfree(ses_dev->page1);
604 err_init_free:
605 kfree(ses_dev);
606 kfree(hdr_buf);
607 sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err);
608 return err;
609}
610
611static int ses_remove(struct device *dev)
612{
613 return 0;
614}
615
616static void ses_intf_remove(struct class_device *cdev,
617 struct class_interface *intf)
618{
619 struct scsi_device *sdev = to_scsi_device(cdev->dev);
620 struct enclosure_device *edev;
621 struct ses_device *ses_dev;
622
623 if (!scsi_device_enclosure(sdev))
624 return;
625
626 edev = enclosure_find(cdev->dev);
627 if (!edev)
628 return;
629
630 ses_dev = edev->scratch;
631 edev->scratch = NULL;
632
633 kfree(ses_dev->page1);
634 kfree(ses_dev->page2);
635 kfree(ses_dev);
636
637 kfree(edev->component[0].scratch);
638
639 class_device_put(&edev->cdev);
640 enclosure_unregister(edev);
641}
642
643static struct class_interface ses_interface = {
644 .add = ses_intf_add,
645 .remove = ses_intf_remove,
646};
647
648static struct scsi_driver ses_template = {
649 .owner = THIS_MODULE,
650 .gendrv = {
651 .name = "ses",
652 .probe = ses_probe,
653 .remove = ses_remove,
654 },
655};
656
657static int __init ses_init(void)
658{
659 int err;
660
661 err = scsi_register_interface(&ses_interface);
662 if (err)
663 return err;
664
665 err = scsi_register_driver(&ses_template.gendrv);
666 if (err)
667 goto out_unreg;
668
669 return 0;
670
671 out_unreg:
672 scsi_unregister_interface(&ses_interface);
673 return err;
674}
675
676static void __exit ses_exit(void)
677{
678 scsi_unregister_driver(&ses_template.gendrv);
679 scsi_unregister_interface(&ses_interface);
680}
681
682module_init(ses_init);
683module_exit(ses_exit);
684
685MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE);
686
687MODULE_AUTHOR("James Bottomley");
688MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver");
689MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 50ba49250203..208565bdbe8e 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -163,6 +163,29 @@ static void scsi_cd_put(struct scsi_cd *cd)
163 mutex_unlock(&sr_ref_mutex); 163 mutex_unlock(&sr_ref_mutex);
164} 164}
165 165
166/* identical to scsi_test_unit_ready except that it doesn't
167 * eat the NOT_READY returns for removable media */
168int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
169{
170 int retries = MAX_RETRIES;
171 int the_result;
172 u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 };
173
174 /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
175 * conditions are gone, or a timeout happens
176 */
177 do {
178 the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
179 0, sshdr, SR_TIMEOUT,
180 retries--);
181
182 } while (retries > 0 &&
183 (!scsi_status_is_good(the_result) ||
184 (scsi_sense_valid(sshdr) &&
185 sshdr->sense_key == UNIT_ATTENTION)));
186 return the_result;
187}
188
166/* 189/*
167 * This function checks to see if the media has been changed in the 190 * This function checks to see if the media has been changed in the
168 * CDROM drive. It is possible that we have already sensed a change, 191 * CDROM drive. It is possible that we have already sensed a change,
@@ -185,8 +208,7 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
185 } 208 }
186 209
187 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 210 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
188 retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, 211 retval = sr_test_unit_ready(cd->device, sshdr);
189 sshdr);
190 if (retval || (scsi_sense_valid(sshdr) && 212 if (retval || (scsi_sense_valid(sshdr) &&
191 /* 0x3a is medium not present */ 213 /* 0x3a is medium not present */
192 sshdr->asc == 0x3a)) { 214 sshdr->asc == 0x3a)) {
@@ -733,10 +755,8 @@ static void get_capabilities(struct scsi_cd *cd)
733{ 755{
734 unsigned char *buffer; 756 unsigned char *buffer;
735 struct scsi_mode_data data; 757 struct scsi_mode_data data;
736 unsigned char cmd[MAX_COMMAND_SIZE];
737 struct scsi_sense_hdr sshdr; 758 struct scsi_sense_hdr sshdr;
738 unsigned int the_result; 759 int rc, n;
739 int retries, rc, n;
740 760
741 static const char *loadmech[] = 761 static const char *loadmech[] =
742 { 762 {
@@ -758,23 +778,8 @@ static void get_capabilities(struct scsi_cd *cd)
758 return; 778 return;
759 } 779 }
760 780
761 /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION 781 /* eat unit attentions */
762 * conditions are gone, or a timeout happens 782 sr_test_unit_ready(cd->device, &sshdr);
763 */
764 retries = 0;
765 do {
766 memset((void *)cmd, 0, MAX_COMMAND_SIZE);
767 cmd[0] = TEST_UNIT_READY;
768
769 the_result = scsi_execute_req (cd->device, cmd, DMA_NONE, NULL,
770 0, &sshdr, SR_TIMEOUT,
771 MAX_RETRIES);
772
773 retries++;
774 } while (retries < 5 &&
775 (!scsi_status_is_good(the_result) ||
776 (scsi_sense_valid(&sshdr) &&
777 sshdr.sense_key == UNIT_ATTENTION)));
778 783
779 /* ask for mode page 0x2a */ 784 /* ask for mode page 0x2a */
780 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, 785 rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 81fbc0b78a52..1e144dfdbd4b 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -61,6 +61,7 @@ int sr_select_speed(struct cdrom_device_info *cdi, int speed);
61int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); 61int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
62 62
63int sr_is_xa(Scsi_CD *); 63int sr_is_xa(Scsi_CD *);
64int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr);
64 65
65/* sr_vendor.c */ 66/* sr_vendor.c */
66void sr_vendor_init(Scsi_CD *); 67void sr_vendor_init(Scsi_CD *);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index d5cebff1d646..ae87d08df588 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -306,8 +306,7 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
306 /* we have no changer support */ 306 /* we have no changer support */
307 return -EINVAL; 307 return -EINVAL;
308 } 308 }
309 if (0 == scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, 309 if (0 == sr_test_unit_ready(cd->device, &sshdr))
310 &sshdr))
311 return CDS_DISC_OK; 310 return CDS_DISC_OK;
312 311
313 if (!cdrom_get_media_event(cdi, &med)) { 312 if (!cdrom_get_media_event(cdi, &med)) {
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 1bc41907a038..06152c7fa689 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -1,392 +1,316 @@
1/* sun3x_esp.c: EnhancedScsiProcessor Sun3x SCSI driver code. 1/* sun3x_esp.c: ESP front-end for Sun3x systems.
2 * 2 *
3 * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de) 3 * Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
4 *
5 * Based on David S. Miller's esp driver
6 */ 4 */
7 5
8#include <linux/kernel.h> 6#include <linux/kernel.h>
9#include <linux/types.h> 7#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/slab.h>
12#include <linux/blkdev.h>
13#include <linux/proc_fs.h>
14#include <linux/stat.h>
15#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
16#include <linux/interrupt.h> 13#include <linux/interrupt.h>
17 14
18#include "scsi.h"
19#include <scsi/scsi_host.h>
20#include "NCR53C9x.h"
21
22#include <asm/sun3x.h> 15#include <asm/sun3x.h>
16#include <asm/io.h>
17#include <asm/dma.h>
23#include <asm/dvma.h> 18#include <asm/dvma.h>
24#include <asm/irq.h>
25
26static void dma_barrier(struct NCR_ESP *esp);
27static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
28static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
29static void dma_drain(struct NCR_ESP *esp);
30static void dma_invalidate(struct NCR_ESP *esp);
31static void dma_dump_state(struct NCR_ESP *esp);
32static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
33static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
34static void dma_ints_off(struct NCR_ESP *esp);
35static void dma_ints_on(struct NCR_ESP *esp);
36static int dma_irq_p(struct NCR_ESP *esp);
37static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr);
38static int dma_ports_p(struct NCR_ESP *esp);
39static void dma_reset(struct NCR_ESP *esp);
40static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
41static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
42static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
43static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
44static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
45static void dma_advance_sg (Scsi_Cmnd *sp);
46
47/* Detecting ESP chips on the machine. This is the simple and easy
48 * version.
49 */
50int sun3x_esp_detect(struct scsi_host_template *tpnt)
51{
52 struct NCR_ESP *esp;
53 struct ConfigDev *esp_dev;
54
55 esp_dev = 0;
56 esp = esp_allocate(tpnt, esp_dev, 0);
57
58 /* Do command transfer with DMA */
59 esp->do_pio_cmds = 0;
60
61 /* Required functions */
62 esp->dma_bytes_sent = &dma_bytes_sent;
63 esp->dma_can_transfer = &dma_can_transfer;
64 esp->dma_dump_state = &dma_dump_state;
65 esp->dma_init_read = &dma_init_read;
66 esp->dma_init_write = &dma_init_write;
67 esp->dma_ints_off = &dma_ints_off;
68 esp->dma_ints_on = &dma_ints_on;
69 esp->dma_irq_p = &dma_irq_p;
70 esp->dma_ports_p = &dma_ports_p;
71 esp->dma_setup = &dma_setup;
72
73 /* Optional functions */
74 esp->dma_barrier = &dma_barrier;
75 esp->dma_invalidate = &dma_invalidate;
76 esp->dma_drain = &dma_drain;
77 esp->dma_irq_entry = 0;
78 esp->dma_irq_exit = 0;
79 esp->dma_led_on = 0;
80 esp->dma_led_off = 0;
81 esp->dma_poll = &dma_poll;
82 esp->dma_reset = &dma_reset;
83
84 /* virtual DMA functions */
85 esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
86 esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
87 esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
88 esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
89 esp->dma_advance_sg = &dma_advance_sg;
90
91 /* SCSI chip speed */
92 esp->cfreq = 20000000;
93 esp->eregs = (struct ESP_regs *)(SUN3X_ESP_BASE);
94 esp->dregs = (void *)SUN3X_ESP_DMA;
95 19
96 esp->esp_command = (volatile unsigned char *)dvma_malloc(DVMA_PAGE_SIZE); 20/* DMA controller reg offsets */
97 esp->esp_command_dvma = dvma_vtob((unsigned long)esp->esp_command); 21#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
98 22#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
99 esp->irq = 2; 23#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
100 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED, 24#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
101 "SUN3X SCSI", esp->ehost)) {
102 esp_deallocate(esp);
103 return 0;
104 }
105 25
106 esp->scsi_id = 7; 26#include <scsi/scsi_host.h>
107 esp->diff = 0;
108 27
109 esp_initialize(esp); 28#include "esp_scsi.h"
110 29
111 /* for reasons beyond my knowledge (and which should likely be fixed) 30#define DRV_MODULE_NAME "sun3x_esp"
112 sync mode doesn't work on a 3/80 at 5mhz. but it does at 4. */ 31#define PFX DRV_MODULE_NAME ": "
113 esp->sync_defp = 0x3f; 32#define DRV_VERSION "1.000"
33#define DRV_MODULE_RELDATE "Nov 1, 2007"
114 34
115 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, 35/*
116 esps_in_use); 36 * m68k always assumes readl/writel operate on little endian
117 esps_running = esps_in_use; 37 * mmio space; this is wrong at least for Sun3x, so we
118 return esps_in_use; 38 * need to workaround this until a proper way is found
39 */
40#if 0
41#define dma_read32(REG) \
42 readl(esp->dma_regs + (REG))
43#define dma_write32(VAL, REG) \
44 writel((VAL), esp->dma_regs + (REG))
45#else
46#define dma_read32(REG) \
47 *(volatile u32 *)(esp->dma_regs + (REG))
48#define dma_write32(VAL, REG) \
49 do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0)
50#endif
51
52static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg)
53{
54 writeb(val, esp->regs + (reg * 4UL));
119} 55}
120 56
121static void dma_do_drain(struct NCR_ESP *esp) 57static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
122{ 58{
123 struct sparc_dma_registers *dregs = 59 return readb(esp->regs + (reg * 4UL));
124 (struct sparc_dma_registers *) esp->dregs;
125
126 int count = 500000;
127
128 while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0))
129 udelay(1);
130
131 if(!count) {
132 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
133 }
134
135 dregs->cond_reg |= DMA_FIFO_STDRAIN;
136
137 count = 500000;
138
139 while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0))
140 udelay(1);
141
142 if(!count) {
143 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
144 }
145
146} 60}
147 61
148static void dma_barrier(struct NCR_ESP *esp) 62static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
63 size_t sz, int dir)
149{ 64{
150 struct sparc_dma_registers *dregs = 65 return dma_map_single(esp->dev, buf, sz, dir);
151 (struct sparc_dma_registers *) esp->dregs;
152 int count = 500000;
153
154 while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0))
155 udelay(1);
156
157 if(!count) {
158 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
159 }
160
161 dregs->cond_reg &= ~(DMA_ENABLE);
162} 66}
163 67
164/* This uses various DMA csr fields and the fifo flags count value to 68static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
165 * determine how many bytes were successfully sent/received by the ESP. 69 int num_sg, int dir)
166 */
167static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
168{ 70{
169 struct sparc_dma_registers *dregs = 71 return dma_map_sg(esp->dev, sg, num_sg, dir);
170 (struct sparc_dma_registers *) esp->dregs;
171
172 int rval = dregs->st_addr - esp->esp_command_dvma;
173
174 return rval - fifo_count;
175} 72}
176 73
177static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp) 74static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
75 size_t sz, int dir)
178{ 76{
179 return sp->SCp.this_residual; 77 dma_unmap_single(esp->dev, addr, sz, dir);
180} 78}
181 79
182static void dma_drain(struct NCR_ESP *esp) 80static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
81 int num_sg, int dir)
183{ 82{
184 struct sparc_dma_registers *dregs = 83 dma_unmap_sg(esp->dev, sg, num_sg, dir);
185 (struct sparc_dma_registers *) esp->dregs;
186 int count = 500000;
187
188 if(dregs->cond_reg & DMA_FIFO_ISDRAIN) {
189 dregs->cond_reg |= DMA_FIFO_STDRAIN;
190 while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0))
191 udelay(1);
192 if(!count) {
193 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
194 }
195
196 }
197} 84}
198 85
199static void dma_invalidate(struct NCR_ESP *esp) 86static int sun3x_esp_irq_pending(struct esp *esp)
200{ 87{
201 struct sparc_dma_registers *dregs = 88 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
202 (struct sparc_dma_registers *) esp->dregs; 89 return 1;
203 90 return 0;
204 __u32 tmp; 91}
205 int count = 500000;
206
207 while(((tmp = dregs->cond_reg) & DMA_PEND_READ) && (--count > 0))
208 udelay(1);
209 92
210 if(!count) { 93static void sun3x_esp_reset_dma(struct esp *esp)
211 printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg); 94{
212 } 95 u32 val;
213 96
214 dregs->cond_reg = tmp | DMA_FIFO_INV; 97 val = dma_read32(DMA_CSR);
215 dregs->cond_reg &= ~DMA_FIFO_INV; 98 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
99 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
216 100
101 /* Enable interrupts. */
102 val = dma_read32(DMA_CSR);
103 dma_write32(val | DMA_INT_ENAB, DMA_CSR);
217} 104}
218 105
219static void dma_dump_state(struct NCR_ESP *esp) 106static void sun3x_esp_dma_drain(struct esp *esp)
220{ 107{
221 struct sparc_dma_registers *dregs = 108 u32 csr;
222 (struct sparc_dma_registers *) esp->dregs; 109 int lim;
223 110
224 ESPLOG(("esp%d: dma -- cond_reg<%08lx> addr<%08lx>\n", 111 csr = dma_read32(DMA_CSR);
225 esp->esp_id, dregs->cond_reg, dregs->st_addr)); 112 if (!(csr & DMA_FIFO_ISDRAIN))
226} 113 return;
227 114
228static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length) 115 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
229{
230 struct sparc_dma_registers *dregs =
231 (struct sparc_dma_registers *) esp->dregs;
232 116
233 dregs->st_addr = vaddress; 117 lim = 1000;
234 dregs->cond_reg |= (DMA_ST_WRITE | DMA_ENABLE); 118 while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
119 if (--lim == 0) {
120 printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
121 esp->host->unique_id);
122 break;
123 }
124 udelay(1);
125 }
235} 126}
236 127
237static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length) 128static void sun3x_esp_dma_invalidate(struct esp *esp)
238{ 129{
239 struct sparc_dma_registers *dregs = 130 u32 val;
240 (struct sparc_dma_registers *) esp->dregs; 131 int lim;
241 132
242 /* Set up the DMA counters */ 133 lim = 1000;
134 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
135 if (--lim == 0) {
136 printk(KERN_ALERT PFX "esp%d: DMA will not "
137 "invalidate!\n", esp->host->unique_id);
138 break;
139 }
140 udelay(1);
141 }
243 142
244 dregs->st_addr = vaddress; 143 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
245 dregs->cond_reg = ((dregs->cond_reg & ~(DMA_ST_WRITE)) | DMA_ENABLE); 144 val |= DMA_FIFO_INV;
145 dma_write32(val, DMA_CSR);
146 val &= ~DMA_FIFO_INV;
147 dma_write32(val, DMA_CSR);
246} 148}
247 149
248static void dma_ints_off(struct NCR_ESP *esp) 150static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
151 u32 dma_count, int write, u8 cmd)
249{ 152{
250 DMA_INTSOFF((struct sparc_dma_registers *) esp->dregs); 153 u32 csr;
154
155 BUG_ON(!(cmd & ESP_CMD_DMA));
156
157 sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
158 sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
159 csr = dma_read32(DMA_CSR);
160 csr |= DMA_ENABLE;
161 if (write)
162 csr |= DMA_ST_WRITE;
163 else
164 csr &= ~DMA_ST_WRITE;
165 dma_write32(csr, DMA_CSR);
166 dma_write32(addr, DMA_ADDR);
167
168 scsi_esp_cmd(esp, cmd);
251} 169}
252 170
253static void dma_ints_on(struct NCR_ESP *esp) 171static int sun3x_esp_dma_error(struct esp *esp)
254{ 172{
255 DMA_INTSON((struct sparc_dma_registers *) esp->dregs); 173 u32 csr = dma_read32(DMA_CSR);
256}
257 174
258static int dma_irq_p(struct NCR_ESP *esp) 175 if (csr & DMA_HNDL_ERROR)
259{ 176 return 1;
260 return DMA_IRQ_P((struct sparc_dma_registers *) esp->dregs); 177
178 return 0;
261} 179}
262 180
263static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr) 181static const struct esp_driver_ops sun3x_esp_ops = {
182 .esp_write8 = sun3x_esp_write8,
183 .esp_read8 = sun3x_esp_read8,
184 .map_single = sun3x_esp_map_single,
185 .map_sg = sun3x_esp_map_sg,
186 .unmap_single = sun3x_esp_unmap_single,
187 .unmap_sg = sun3x_esp_unmap_sg,
188 .irq_pending = sun3x_esp_irq_pending,
189 .reset_dma = sun3x_esp_reset_dma,
190 .dma_drain = sun3x_esp_dma_drain,
191 .dma_invalidate = sun3x_esp_dma_invalidate,
192 .send_dma_cmd = sun3x_esp_send_dma_cmd,
193 .dma_error = sun3x_esp_dma_error,
194};
195
196static int __devinit esp_sun3x_probe(struct platform_device *dev)
264{ 197{
265 int count = 50; 198 struct scsi_host_template *tpnt = &scsi_esp_template;
266 dma_do_drain(esp); 199 struct Scsi_Host *host;
200 struct esp *esp;
201 struct resource *res;
202 int err = -ENOMEM;
267 203
268 /* Wait till the first bits settle. */ 204 host = scsi_host_alloc(tpnt, sizeof(struct esp));
269 while((*(volatile unsigned char *)vaddr == 0xff) && (--count > 0)) 205 if (!host)
270 udelay(1); 206 goto fail;
271 207
272 if(!count) { 208 host->max_id = 8;
273// printk("%s:%d timeout expire (data %02x)\n", __FILE__, __LINE__, 209 esp = shost_priv(host);
274// esp_read(esp->eregs->esp_fdata));
275 //mach_halt();
276 vaddr[0] = esp_read(esp->eregs->esp_fdata);
277 vaddr[1] = esp_read(esp->eregs->esp_fdata);
278 }
279 210
280} 211 esp->host = host;
212 esp->dev = dev;
213 esp->ops = &sun3x_esp_ops;
281 214
282static int dma_ports_p(struct NCR_ESP *esp) 215 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
283{ 216 if (!res && !res->start)
284 return (((struct sparc_dma_registers *) esp->dregs)->cond_reg 217 goto fail_unlink;
285 & DMA_INT_ENAB);
286}
287 218
288/* Resetting various pieces of the ESP scsi driver chipset/buses. */ 219 esp->regs = ioremap_nocache(res->start, 0x20);
289static void dma_reset(struct NCR_ESP *esp) 220 if (!esp->regs)
290{ 221 goto fail_unmap_regs;
291 struct sparc_dma_registers *dregs =
292 (struct sparc_dma_registers *)esp->dregs;
293 222
294 /* Punt the DVMA into a known state. */ 223 res = platform_get_resource(dev, IORESOURCE_MEM, 1);
295 dregs->cond_reg |= DMA_RST_SCSI; 224 if (!res && !res->start)
296 dregs->cond_reg &= ~(DMA_RST_SCSI); 225 goto fail_unmap_regs;
297 DMA_INTSON(dregs);
298}
299 226
300static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write) 227 esp->dma_regs = ioremap_nocache(res->start, 0x10);
301{
302 struct sparc_dma_registers *dregs =
303 (struct sparc_dma_registers *) esp->dregs;
304 unsigned long nreg = dregs->cond_reg;
305 228
306// printk("dma_setup %c addr %08x cnt %08x\n", 229 esp->command_block = dma_alloc_coherent(esp->dev, 16,
307// write ? 'W' : 'R', addr, count); 230 &esp->command_block_dma,
231 GFP_KERNEL);
232 if (!esp->command_block)
233 goto fail_unmap_regs_dma;
308 234
309 dma_do_drain(esp); 235 host->irq = platform_get_irq(dev, 0);
236 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
237 "SUN3X ESP", esp);
238 if (err < 0)
239 goto fail_unmap_command_block;
310 240
311 if(write) 241 esp->scsi_id = 7;
312 nreg |= DMA_ST_WRITE; 242 esp->host->this_id = esp->scsi_id;
313 else { 243 esp->scsi_id_mask = (1 << esp->scsi_id);
314 nreg &= ~(DMA_ST_WRITE); 244 esp->cfreq = 20000000;
315 }
316
317 nreg |= DMA_ENABLE;
318 dregs->cond_reg = nreg;
319 dregs->st_addr = addr;
320}
321 245
322static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) 246 dev_set_drvdata(&dev->dev, esp);
323{ 247
324 sp->SCp.have_data_in = dvma_map((unsigned long)sp->SCp.buffer, 248 err = scsi_esp_register(esp, &dev->dev);
325 sp->SCp.this_residual); 249 if (err)
326 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in); 250 goto fail_free_irq;
251
252 return 0;
253
254fail_free_irq:
255 free_irq(host->irq, esp);
256fail_unmap_command_block:
257 dma_free_coherent(esp->dev, 16,
258 esp->command_block,
259 esp->command_block_dma);
260fail_unmap_regs_dma:
261 iounmap(esp->dma_regs);
262fail_unmap_regs:
263 iounmap(esp->regs);
264fail_unlink:
265 scsi_host_put(host);
266fail:
267 return err;
327} 268}
328 269
329static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 270static int __devexit esp_sun3x_remove(struct platform_device *dev)
330{ 271{
331 int sz = sp->SCp.buffers_residual; 272 struct esp *esp = dev_get_drvdata(&dev->dev);
332 struct scatterlist *sg = sp->SCp.buffer; 273 unsigned int irq = esp->host->irq;
333 274 u32 val;
334 while (sz >= 0) {
335 sg[sz].dma_address = dvma_map((unsigned long)sg_virt(&sg[sz]),
336 sg[sz].length);
337 sz--;
338 }
339 sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address);
340}
341 275
342static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) 276 scsi_esp_unregister(esp);
343{
344 dvma_unmap((char *)sp->SCp.have_data_in);
345}
346 277
347static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp) 278 /* Disable interrupts. */
348{ 279 val = dma_read32(DMA_CSR);
349 int sz = sp->use_sg - 1; 280 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
350 struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
351
352 while(sz >= 0) {
353 dvma_unmap((char *)sg[sz].dma_address);
354 sz--;
355 }
356}
357 281
358static void dma_advance_sg (Scsi_Cmnd *sp) 282 free_irq(irq, esp);
359{ 283 dma_free_coherent(esp->dev, 16,
360 sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dma_address); 284 esp->command_block,
361} 285 esp->command_block_dma);
362 286
363static int sun3x_esp_release(struct Scsi_Host *instance) 287 scsi_host_put(esp->host);
364{
365 /* this code does not support being compiled as a module */
366 return 1;
367 288
289 return 0;
368} 290}
369 291
370static struct scsi_host_template driver_template = { 292static struct platform_driver esp_sun3x_driver = {
371 .proc_name = "sun3x_esp", 293 .probe = esp_sun3x_probe,
372 .proc_info = &esp_proc_info, 294 .remove = __devexit_p(esp_sun3x_remove),
373 .name = "Sun ESP 100/100a/200", 295 .driver = {
374 .detect = sun3x_esp_detect, 296 .name = "sun3x_esp",
375 .release = sun3x_esp_release, 297 },
376 .slave_alloc = esp_slave_alloc,
377 .slave_destroy = esp_slave_destroy,
378 .info = esp_info,
379 .queuecommand = esp_queue,
380 .eh_abort_handler = esp_abort,
381 .eh_bus_reset_handler = esp_reset,
382 .can_queue = 7,
383 .this_id = 7,
384 .sg_tablesize = SG_ALL,
385 .cmd_per_lun = 1,
386 .use_clustering = DISABLE_CLUSTERING,
387}; 298};
388 299
300static int __init sun3x_esp_init(void)
301{
302 return platform_driver_register(&esp_sun3x_driver);
303}
389 304
390#include "scsi_module.c" 305static void __exit sun3x_esp_exit(void)
306{
307 platform_driver_unregister(&esp_sun3x_driver);
308}
391 309
310MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
311MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
392MODULE_LICENSE("GPL"); 312MODULE_LICENSE("GPL");
313MODULE_VERSION(DRV_VERSION);
314
315module_init(sun3x_esp_init);
316module_exit(sun3x_esp_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 254bdaeb35ff..35142b5341b5 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3842,7 +3842,7 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
3842 if (cp->startp == cp->phys.head.lastp || 3842 if (cp->startp == cp->phys.head.lastp ||
3843 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), 3843 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
3844 &dp_ofs) < 0) { 3844 &dp_ofs) < 0) {
3845 return cp->data_len; 3845 return cp->data_len - cp->odd_byte_adjustment;
3846 } 3846 }
3847 3847
3848 /* 3848 /*
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 662c00451be4..58d7eee4fe81 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1216,7 +1216,7 @@ static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
1216 cpp->xdir = DTD_IN; 1216 cpp->xdir = DTD_IN;
1217 return; 1217 return;
1218 } 1218 }
1219 else if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) { 1219 else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
1220 cpp->xdir = DTD_OUT; 1220 cpp->xdir = DTD_OUT;
1221 return; 1221 return;
1222 } 1222 }
diff --git a/fs/Kconfig b/fs/Kconfig
index 3bf6ace1720c..d7312825592b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1778,12 +1778,9 @@ config SUNRPC_GSS
1778 tristate 1778 tristate
1779 1779
1780config SUNRPC_XPRT_RDMA 1780config SUNRPC_XPRT_RDMA
1781 tristate "RDMA transport for sunrpc (EXPERIMENTAL)" 1781 tristate
1782 depends on SUNRPC && INFINIBAND && EXPERIMENTAL 1782 depends on SUNRPC && INFINIBAND && EXPERIMENTAL
1783 default m 1783 default SUNRPC && INFINIBAND
1784 help
1785 Adds a client RPC transport for supporting kernel NFS over RDMA
1786 mounts, including Infiniband and iWARP. Experimental.
1787 1784
1788config SUNRPC_BIND34 1785config SUNRPC_BIND34
1789 bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" 1786 bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)"
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 87eb93694af7..7f6063acaa3b 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -112,5 +112,8 @@ const struct file_operations jfs_file_operations = {
112 .splice_write = generic_file_splice_write, 112 .splice_write = generic_file_splice_write,
113 .fsync = jfs_fsync, 113 .fsync = jfs_fsync,
114 .release = jfs_release, 114 .release = jfs_release,
115 .ioctl = jfs_ioctl, 115 .unlocked_ioctl = jfs_ioctl,
116#ifdef CONFIG_COMPAT
117 .compat_ioctl = jfs_compat_ioctl,
118#endif
116}; 119};
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index dfda12a073e1..a1f8e375ad21 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -51,9 +51,9 @@ static long jfs_map_ext2(unsigned long flags, int from)
51} 51}
52 52
53 53
54int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, 54long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
55 unsigned long arg)
56{ 55{
56 struct inode *inode = filp->f_dentry->d_inode;
57 struct jfs_inode_info *jfs_inode = JFS_IP(inode); 57 struct jfs_inode_info *jfs_inode = JFS_IP(inode);
58 unsigned int flags; 58 unsigned int flags;
59 59
@@ -82,6 +82,10 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
82 /* Is it quota file? Do not allow user to mess with it */ 82 /* Is it quota file? Do not allow user to mess with it */
83 if (IS_NOQUOTA(inode)) 83 if (IS_NOQUOTA(inode))
84 return -EPERM; 84 return -EPERM;
85
86 /* Lock against other parallel changes of flags */
87 mutex_lock(&inode->i_mutex);
88
85 jfs_get_inode_flags(jfs_inode); 89 jfs_get_inode_flags(jfs_inode);
86 oldflags = jfs_inode->mode2; 90 oldflags = jfs_inode->mode2;
87 91
@@ -92,8 +96,10 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
92 if ((oldflags & JFS_IMMUTABLE_FL) || 96 if ((oldflags & JFS_IMMUTABLE_FL) ||
93 ((flags ^ oldflags) & 97 ((flags ^ oldflags) &
94 (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) { 98 (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
95 if (!capable(CAP_LINUX_IMMUTABLE)) 99 if (!capable(CAP_LINUX_IMMUTABLE)) {
100 mutex_unlock(&inode->i_mutex);
96 return -EPERM; 101 return -EPERM;
102 }
97 } 103 }
98 104
99 flags = flags & JFS_FL_USER_MODIFIABLE; 105 flags = flags & JFS_FL_USER_MODIFIABLE;
@@ -101,6 +107,7 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
101 jfs_inode->mode2 = flags; 107 jfs_inode->mode2 = flags;
102 108
103 jfs_set_inode_flags(inode); 109 jfs_set_inode_flags(inode);
110 mutex_unlock(&inode->i_mutex);
104 inode->i_ctime = CURRENT_TIME_SEC; 111 inode->i_ctime = CURRENT_TIME_SEC;
105 mark_inode_dirty(inode); 112 mark_inode_dirty(inode);
106 return 0; 113 return 0;
@@ -110,3 +117,21 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
110 } 117 }
111} 118}
112 119
120#ifdef CONFIG_COMPAT
121long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
122{
123 /* While these ioctl numbers defined with 'long' and have different
124 * numbers than the 64bit ABI,
125 * the actual implementation only deals with ints and is compatible.
126 */
127 switch (cmd) {
128 case JFS_IOC_GETFLAGS32:
129 cmd = JFS_IOC_GETFLAGS;
130 break;
131 case JFS_IOC_SETFLAGS32:
132 cmd = JFS_IOC_SETFLAGS;
133 break;
134 }
135 return jfs_ioctl(filp, cmd, arg);
136}
137#endif
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index c387540d3425..395c4c0d0f06 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -170,5 +170,7 @@ struct dinode {
170#define JFS_IOC_GETFLAGS _IOR('f', 1, long) 170#define JFS_IOC_GETFLAGS _IOR('f', 1, long)
171#define JFS_IOC_SETFLAGS _IOW('f', 2, long) 171#define JFS_IOC_SETFLAGS _IOW('f', 2, long)
172 172
173#define JFS_IOC_GETFLAGS32 _IOR('f', 1, int)
174#define JFS_IOC_SETFLAGS32 _IOW('f', 2, int)
173 175
174#endif /*_H_JFS_DINODE */ 176#endif /*_H_JFS_DINODE */
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 95a6a11425e5..adb2fafcc544 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -22,8 +22,8 @@ struct fid;
22 22
23extern struct inode *ialloc(struct inode *, umode_t); 23extern struct inode *ialloc(struct inode *, umode_t);
24extern int jfs_fsync(struct file *, struct dentry *, int); 24extern int jfs_fsync(struct file *, struct dentry *, int);
25extern int jfs_ioctl(struct inode *, struct file *, 25extern long jfs_ioctl(struct file *, unsigned int, unsigned long);
26 unsigned int, unsigned long); 26extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long);
27extern struct inode *jfs_iget(struct super_block *, unsigned long); 27extern struct inode *jfs_iget(struct super_block *, unsigned long);
28extern int jfs_commit_inode(struct inode *, int); 28extern int jfs_commit_inode(struct inode *, int);
29extern int jfs_write_inode(struct inode*, int); 29extern int jfs_write_inode(struct inode*, int);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 403cfc24c6fe..0ba6778edaa2 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1556,7 +1556,10 @@ const struct file_operations jfs_dir_operations = {
1556 .read = generic_read_dir, 1556 .read = generic_read_dir,
1557 .readdir = jfs_readdir, 1557 .readdir = jfs_readdir,
1558 .fsync = jfs_fsync, 1558 .fsync = jfs_fsync,
1559 .ioctl = jfs_ioctl, 1559 .unlocked_ioctl = jfs_ioctl,
1560#ifdef CONFIG_COMPAT
1561 .compat_ioctl = jfs_compat_ioctl,
1562#endif
1560}; 1563};
1561 1564
1562static int jfs_ci_hash(struct dentry *dir, struct qstr *this) 1565static int jfs_ci_hash(struct dentry *dir, struct qstr *this)
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b144b1957dd9..f55c437124a2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -697,6 +697,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
697} 697}
698 698
699/* 699/*
700 * If the page cache is marked as unsafe or invalid, then we can't rely on
701 * the PageUptodate() flag. In this case, we will need to turn off
702 * write optimisations that depend on the page contents being correct.
703 */
704static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
705{
706 return PageUptodate(page) &&
707 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
708}
709
710/*
700 * Update and possibly write a cached page of an NFS file. 711 * Update and possibly write a cached page of an NFS file.
701 * 712 *
702 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 713 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
@@ -717,10 +728,13 @@ int nfs_updatepage(struct file *file, struct page *page,
717 (long long)(page_offset(page) +offset)); 728 (long long)(page_offset(page) +offset));
718 729
719 /* If we're not using byte range locks, and we know the page 730 /* If we're not using byte range locks, and we know the page
720 * is entirely in cache, it may be more efficient to avoid 731 * is up to date, it may be more efficient to extend the write
721 * fragmenting write requests. 732 * to cover the entire page in order to avoid fragmentation
733 * inefficiencies.
722 */ 734 */
723 if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { 735 if (nfs_write_pageuptodate(page, inode) &&
736 inode->i_flock == NULL &&
737 !(file->f_mode & O_SYNC)) {
724 count = max(count + offset, nfs_page_length(page)); 738 count = max(count + offset, nfs_page_length(page));
725 offset = 0; 739 offset = 0;
726 } 740 }
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index b2e832aca567..d25b9af28500 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -38,6 +38,15 @@
38 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
39 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
40 * 40 *
41 * With version 11, we separate out the filesystem locking portion. The
42 * filesystem now has a major.minor version it negotiates. Version 11
43 * introduces this negotiation to the o2dlm protocol, and as such the
44 * version here in tcp_internal.h should not need to be bumped for
45 * filesystem locking changes.
46 *
47 * New in version 11
48 * - Negotiation of filesystem locking in the dlm join.
49 *
41 * New in version 10: 50 * New in version 10:
42 * - Meta/data locks combined 51 * - Meta/data locks combined
43 * 52 *
@@ -66,7 +75,7 @@
66 * - full 64 bit i_size in the metadata lock lvbs 75 * - full 64 bit i_size in the metadata lock lvbs
67 * - introduction of "rw" lock and pushing meta/data locking down 76 * - introduction of "rw" lock and pushing meta/data locking down
68 */ 77 */
69#define O2NET_PROTOCOL_VERSION 10ULL 78#define O2NET_PROTOCOL_VERSION 11ULL
70struct o2net_handshake { 79struct o2net_handshake {
71 __be64 protocol_version; 80 __be64 protocol_version;
72 __be64 connector_id; 81 __be64 connector_id;
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index cfd5cb65cab0..b5786a787fab 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -193,7 +193,12 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm,
193 dlm_astunlockfunc_t *unlockast, 193 dlm_astunlockfunc_t *unlockast,
194 void *data); 194 void *data);
195 195
196struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key); 196struct dlm_protocol_version {
197 u8 pv_major;
198 u8 pv_minor;
199};
200struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key,
201 struct dlm_protocol_version *fs_proto);
197 202
198void dlm_unregister_domain(struct dlm_ctxt *dlm); 203void dlm_unregister_domain(struct dlm_ctxt *dlm);
199 204
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index e90b92f9ece1..9843ee17ea27 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -142,6 +142,12 @@ struct dlm_ctxt
142 spinlock_t work_lock; 142 spinlock_t work_lock;
143 struct list_head dlm_domain_handlers; 143 struct list_head dlm_domain_handlers;
144 struct list_head dlm_eviction_callbacks; 144 struct list_head dlm_eviction_callbacks;
145
146 /* The filesystem specifies this at domain registration. We
147 * cache it here to know what to tell other nodes. */
148 struct dlm_protocol_version fs_locking_proto;
149 /* This is the inter-dlm communication version */
150 struct dlm_protocol_version dlm_locking_proto;
145}; 151};
146 152
147static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i) 153static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
@@ -589,10 +595,24 @@ struct dlm_proxy_ast
589#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) 595#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
590 596
591#define DLM_MOD_KEY (0x666c6172) 597#define DLM_MOD_KEY (0x666c6172)
592enum dlm_query_join_response { 598enum dlm_query_join_response_code {
593 JOIN_DISALLOW = 0, 599 JOIN_DISALLOW = 0,
594 JOIN_OK, 600 JOIN_OK,
595 JOIN_OK_NO_MAP, 601 JOIN_OK_NO_MAP,
602 JOIN_PROTOCOL_MISMATCH,
603};
604
605union dlm_query_join_response {
606 u32 intval;
607 struct {
608 u8 code; /* Response code. dlm_minor and fs_minor
609 are only valid if this is JOIN_OK */
610 u8 dlm_minor; /* The minor version of the protocol the
611 dlm is speaking. */
612 u8 fs_minor; /* The minor version of the protocol the
613 filesystem is speaking. */
614 u8 reserved;
615 } packet;
596}; 616};
597 617
598struct dlm_lock_request 618struct dlm_lock_request
@@ -633,6 +653,8 @@ struct dlm_query_join_request
633 u8 node_idx; 653 u8 node_idx;
634 u8 pad1[2]; 654 u8 pad1[2];
635 u8 name_len; 655 u8 name_len;
656 struct dlm_protocol_version dlm_proto;
657 struct dlm_protocol_version fs_proto;
636 u8 domain[O2NM_MAX_NAME_LEN]; 658 u8 domain[O2NM_MAX_NAME_LEN];
637 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)]; 659 u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
638}; 660};
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6954565b8ccb..638d2ebb892b 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -123,6 +123,17 @@ DEFINE_SPINLOCK(dlm_domain_lock);
123LIST_HEAD(dlm_domains); 123LIST_HEAD(dlm_domains);
124static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); 124static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
125 125
126/*
127 * The supported protocol version for DLM communication. Running domains
128 * will have a negotiated version with the same major number and a minor
129 * number equal or smaller. The dlm_ctxt->dlm_locking_proto field should
130 * be used to determine what a running domain is actually using.
131 */
132static const struct dlm_protocol_version dlm_protocol = {
133 .pv_major = 1,
134 .pv_minor = 0,
135};
136
126#define DLM_DOMAIN_BACKOFF_MS 200 137#define DLM_DOMAIN_BACKOFF_MS 200
127 138
128static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, 139static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -133,6 +144,8 @@ static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
133 void **ret_data); 144 void **ret_data);
134static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, 145static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
135 void **ret_data); 146 void **ret_data);
147static int dlm_protocol_compare(struct dlm_protocol_version *existing,
148 struct dlm_protocol_version *request);
136 149
137static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); 150static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
138 151
@@ -668,11 +681,45 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
668} 681}
669EXPORT_SYMBOL_GPL(dlm_unregister_domain); 682EXPORT_SYMBOL_GPL(dlm_unregister_domain);
670 683
684static int dlm_query_join_proto_check(char *proto_type, int node,
685 struct dlm_protocol_version *ours,
686 struct dlm_protocol_version *request)
687{
688 int rc;
689 struct dlm_protocol_version proto = *request;
690
691 if (!dlm_protocol_compare(ours, &proto)) {
692 mlog(0,
693 "node %u wanted to join with %s locking protocol "
694 "%u.%u, we respond with %u.%u\n",
695 node, proto_type,
696 request->pv_major,
697 request->pv_minor,
698 proto.pv_major, proto.pv_minor);
699 request->pv_minor = proto.pv_minor;
700 rc = 0;
701 } else {
702 mlog(ML_NOTICE,
703 "Node %u wanted to join with %s locking "
704 "protocol %u.%u, but we have %u.%u, disallowing\n",
705 node, proto_type,
706 request->pv_major,
707 request->pv_minor,
708 ours->pv_major,
709 ours->pv_minor);
710 rc = 1;
711 }
712
713 return rc;
714}
715
671static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, 716static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
672 void **ret_data) 717 void **ret_data)
673{ 718{
674 struct dlm_query_join_request *query; 719 struct dlm_query_join_request *query;
675 enum dlm_query_join_response response; 720 union dlm_query_join_response response = {
721 .packet.code = JOIN_DISALLOW,
722 };
676 struct dlm_ctxt *dlm = NULL; 723 struct dlm_ctxt *dlm = NULL;
677 u8 nodenum; 724 u8 nodenum;
678 725
@@ -690,11 +737,11 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
690 mlog(0, "node %u is not in our live map yet\n", 737 mlog(0, "node %u is not in our live map yet\n",
691 query->node_idx); 738 query->node_idx);
692 739
693 response = JOIN_DISALLOW; 740 response.packet.code = JOIN_DISALLOW;
694 goto respond; 741 goto respond;
695 } 742 }
696 743
697 response = JOIN_OK_NO_MAP; 744 response.packet.code = JOIN_OK_NO_MAP;
698 745
699 spin_lock(&dlm_domain_lock); 746 spin_lock(&dlm_domain_lock);
700 dlm = __dlm_lookup_domain_full(query->domain, query->name_len); 747 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
@@ -713,7 +760,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
713 mlog(0, "disallow join as node %u does not " 760 mlog(0, "disallow join as node %u does not "
714 "have node %u in its nodemap\n", 761 "have node %u in its nodemap\n",
715 query->node_idx, nodenum); 762 query->node_idx, nodenum);
716 response = JOIN_DISALLOW; 763 response.packet.code = JOIN_DISALLOW;
717 goto unlock_respond; 764 goto unlock_respond;
718 } 765 }
719 } 766 }
@@ -733,30 +780,48 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
733 /*If this is a brand new context and we 780 /*If this is a brand new context and we
734 * haven't started our join process yet, then 781 * haven't started our join process yet, then
735 * the other node won the race. */ 782 * the other node won the race. */
736 response = JOIN_OK_NO_MAP; 783 response.packet.code = JOIN_OK_NO_MAP;
737 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { 784 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
738 /* Disallow parallel joins. */ 785 /* Disallow parallel joins. */
739 response = JOIN_DISALLOW; 786 response.packet.code = JOIN_DISALLOW;
740 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { 787 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
741 mlog(0, "node %u trying to join, but recovery " 788 mlog(0, "node %u trying to join, but recovery "
742 "is ongoing.\n", bit); 789 "is ongoing.\n", bit);
743 response = JOIN_DISALLOW; 790 response.packet.code = JOIN_DISALLOW;
744 } else if (test_bit(bit, dlm->recovery_map)) { 791 } else if (test_bit(bit, dlm->recovery_map)) {
745 mlog(0, "node %u trying to join, but it " 792 mlog(0, "node %u trying to join, but it "
746 "still needs recovery.\n", bit); 793 "still needs recovery.\n", bit);
747 response = JOIN_DISALLOW; 794 response.packet.code = JOIN_DISALLOW;
748 } else if (test_bit(bit, dlm->domain_map)) { 795 } else if (test_bit(bit, dlm->domain_map)) {
749 mlog(0, "node %u trying to join, but it " 796 mlog(0, "node %u trying to join, but it "
750 "is still in the domain! needs recovery?\n", 797 "is still in the domain! needs recovery?\n",
751 bit); 798 bit);
752 response = JOIN_DISALLOW; 799 response.packet.code = JOIN_DISALLOW;
753 } else { 800 } else {
754 /* Alright we're fully a part of this domain 801 /* Alright we're fully a part of this domain
755 * so we keep some state as to who's joining 802 * so we keep some state as to who's joining
756 * and indicate to him that needs to be fixed 803 * and indicate to him that needs to be fixed
757 * up. */ 804 * up. */
758 response = JOIN_OK; 805
759 __dlm_set_joining_node(dlm, query->node_idx); 806 /* Make sure we speak compatible locking protocols. */
807 if (dlm_query_join_proto_check("DLM", bit,
808 &dlm->dlm_locking_proto,
809 &query->dlm_proto)) {
810 response.packet.code =
811 JOIN_PROTOCOL_MISMATCH;
812 } else if (dlm_query_join_proto_check("fs", bit,
813 &dlm->fs_locking_proto,
814 &query->fs_proto)) {
815 response.packet.code =
816 JOIN_PROTOCOL_MISMATCH;
817 } else {
818 response.packet.dlm_minor =
819 query->dlm_proto.pv_minor;
820 response.packet.fs_minor =
821 query->fs_proto.pv_minor;
822 response.packet.code = JOIN_OK;
823 __dlm_set_joining_node(dlm, query->node_idx);
824 }
760 } 825 }
761 826
762 spin_unlock(&dlm->spinlock); 827 spin_unlock(&dlm->spinlock);
@@ -765,9 +830,9 @@ unlock_respond:
765 spin_unlock(&dlm_domain_lock); 830 spin_unlock(&dlm_domain_lock);
766 831
767respond: 832respond:
768 mlog(0, "We respond with %u\n", response); 833 mlog(0, "We respond with %u\n", response.packet.code);
769 834
770 return response; 835 return response.intval;
771} 836}
772 837
773static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, 838static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -899,10 +964,11 @@ static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
899 964
900static int dlm_request_join(struct dlm_ctxt *dlm, 965static int dlm_request_join(struct dlm_ctxt *dlm,
901 int node, 966 int node,
902 enum dlm_query_join_response *response) 967 enum dlm_query_join_response_code *response)
903{ 968{
904 int status, retval; 969 int status;
905 struct dlm_query_join_request join_msg; 970 struct dlm_query_join_request join_msg;
971 union dlm_query_join_response join_resp;
906 972
907 mlog(0, "querying node %d\n", node); 973 mlog(0, "querying node %d\n", node);
908 974
@@ -910,12 +976,15 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
910 join_msg.node_idx = dlm->node_num; 976 join_msg.node_idx = dlm->node_num;
911 join_msg.name_len = strlen(dlm->name); 977 join_msg.name_len = strlen(dlm->name);
912 memcpy(join_msg.domain, dlm->name, join_msg.name_len); 978 memcpy(join_msg.domain, dlm->name, join_msg.name_len);
979 join_msg.dlm_proto = dlm->dlm_locking_proto;
980 join_msg.fs_proto = dlm->fs_locking_proto;
913 981
914 /* copy live node map to join message */ 982 /* copy live node map to join message */
915 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); 983 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
916 984
917 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, 985 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
918 sizeof(join_msg), node, &retval); 986 sizeof(join_msg), node,
987 &join_resp.intval);
919 if (status < 0 && status != -ENOPROTOOPT) { 988 if (status < 0 && status != -ENOPROTOOPT) {
920 mlog_errno(status); 989 mlog_errno(status);
921 goto bail; 990 goto bail;
@@ -928,14 +997,41 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
928 if (status == -ENOPROTOOPT) { 997 if (status == -ENOPROTOOPT) {
929 status = 0; 998 status = 0;
930 *response = JOIN_OK_NO_MAP; 999 *response = JOIN_OK_NO_MAP;
931 } else if (retval == JOIN_DISALLOW || 1000 } else if (join_resp.packet.code == JOIN_DISALLOW ||
932 retval == JOIN_OK || 1001 join_resp.packet.code == JOIN_OK_NO_MAP) {
933 retval == JOIN_OK_NO_MAP) { 1002 *response = join_resp.packet.code;
934 *response = retval; 1003 } else if (join_resp.packet.code == JOIN_PROTOCOL_MISMATCH) {
1004 mlog(ML_NOTICE,
1005 "This node requested DLM locking protocol %u.%u and "
1006 "filesystem locking protocol %u.%u. At least one of "
1007 "the protocol versions on node %d is not compatible, "
1008 "disconnecting\n",
1009 dlm->dlm_locking_proto.pv_major,
1010 dlm->dlm_locking_proto.pv_minor,
1011 dlm->fs_locking_proto.pv_major,
1012 dlm->fs_locking_proto.pv_minor,
1013 node);
1014 status = -EPROTO;
1015 *response = join_resp.packet.code;
1016 } else if (join_resp.packet.code == JOIN_OK) {
1017 *response = join_resp.packet.code;
1018 /* Use the same locking protocol as the remote node */
1019 dlm->dlm_locking_proto.pv_minor =
1020 join_resp.packet.dlm_minor;
1021 dlm->fs_locking_proto.pv_minor =
1022 join_resp.packet.fs_minor;
1023 mlog(0,
1024 "Node %d responds JOIN_OK with DLM locking protocol "
1025 "%u.%u and fs locking protocol %u.%u\n",
1026 node,
1027 dlm->dlm_locking_proto.pv_major,
1028 dlm->dlm_locking_proto.pv_minor,
1029 dlm->fs_locking_proto.pv_major,
1030 dlm->fs_locking_proto.pv_minor);
935 } else { 1031 } else {
936 status = -EINVAL; 1032 status = -EINVAL;
937 mlog(ML_ERROR, "invalid response %d from node %u\n", retval, 1033 mlog(ML_ERROR, "invalid response %d from node %u\n",
938 node); 1034 join_resp.packet.code, node);
939 } 1035 }
940 1036
941 mlog(0, "status %d, node %d response is %d\n", status, node, 1037 mlog(0, "status %d, node %d response is %d\n", status, node,
@@ -1008,7 +1104,7 @@ struct domain_join_ctxt {
1008 1104
1009static int dlm_should_restart_join(struct dlm_ctxt *dlm, 1105static int dlm_should_restart_join(struct dlm_ctxt *dlm,
1010 struct domain_join_ctxt *ctxt, 1106 struct domain_join_ctxt *ctxt,
1011 enum dlm_query_join_response response) 1107 enum dlm_query_join_response_code response)
1012{ 1108{
1013 int ret; 1109 int ret;
1014 1110
@@ -1034,7 +1130,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1034{ 1130{
1035 int status = 0, tmpstat, node; 1131 int status = 0, tmpstat, node;
1036 struct domain_join_ctxt *ctxt; 1132 struct domain_join_ctxt *ctxt;
1037 enum dlm_query_join_response response = JOIN_DISALLOW; 1133 enum dlm_query_join_response_code response = JOIN_DISALLOW;
1038 1134
1039 mlog_entry("%p", dlm); 1135 mlog_entry("%p", dlm);
1040 1136
@@ -1450,10 +1546,38 @@ leave:
1450} 1546}
1451 1547
1452/* 1548/*
1453 * dlm_register_domain: one-time setup per "domain" 1549 * Compare a requested locking protocol version against the current one.
1550 *
1551 * If the major numbers are different, they are incompatible.
1552 * If the current minor is greater than the request, they are incompatible.
1553 * If the current minor is less than or equal to the request, they are
1554 * compatible, and the requester should run at the current minor version.
1555 */
1556static int dlm_protocol_compare(struct dlm_protocol_version *existing,
1557 struct dlm_protocol_version *request)
1558{
1559 if (existing->pv_major != request->pv_major)
1560 return 1;
1561
1562 if (existing->pv_minor > request->pv_minor)
1563 return 1;
1564
1565 if (existing->pv_minor < request->pv_minor)
1566 request->pv_minor = existing->pv_minor;
1567
1568 return 0;
1569}
1570
1571/*
1572 * dlm_register_domain: one-time setup per "domain".
1573 *
1574 * The filesystem passes in the requested locking version via proto.
1575 * If registration was successful, proto will contain the negotiated
1576 * locking protocol.
1454 */ 1577 */
1455struct dlm_ctxt * dlm_register_domain(const char *domain, 1578struct dlm_ctxt * dlm_register_domain(const char *domain,
1456 u32 key) 1579 u32 key,
1580 struct dlm_protocol_version *fs_proto)
1457{ 1581{
1458 int ret; 1582 int ret;
1459 struct dlm_ctxt *dlm = NULL; 1583 struct dlm_ctxt *dlm = NULL;
@@ -1496,6 +1620,15 @@ retry:
1496 goto retry; 1620 goto retry;
1497 } 1621 }
1498 1622
1623 if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
1624 mlog(ML_ERROR,
1625 "Requested locking protocol version is not "
1626 "compatible with already registered domain "
1627 "\"%s\"\n", domain);
1628 ret = -EPROTO;
1629 goto leave;
1630 }
1631
1499 __dlm_get(dlm); 1632 __dlm_get(dlm);
1500 dlm->num_joins++; 1633 dlm->num_joins++;
1501 1634
@@ -1526,6 +1659,13 @@ retry:
1526 list_add_tail(&dlm->list, &dlm_domains); 1659 list_add_tail(&dlm->list, &dlm_domains);
1527 spin_unlock(&dlm_domain_lock); 1660 spin_unlock(&dlm_domain_lock);
1528 1661
1662 /*
1663 * Pass the locking protocol version into the join. If the join
1664 * succeeds, it will have the negotiated protocol set.
1665 */
1666 dlm->dlm_locking_proto = dlm_protocol;
1667 dlm->fs_locking_proto = *fs_proto;
1668
1529 ret = dlm_join_domain(dlm); 1669 ret = dlm_join_domain(dlm);
1530 if (ret) { 1670 if (ret) {
1531 mlog_errno(ret); 1671 mlog_errno(ret);
@@ -1533,6 +1673,9 @@ retry:
1533 goto leave; 1673 goto leave;
1534 } 1674 }
1535 1675
1676 /* Tell the caller what locking protocol we negotiated */
1677 *fs_proto = dlm->fs_locking_proto;
1678
1536 ret = 0; 1679 ret = 0;
1537leave: 1680leave:
1538 if (new_ctxt) 1681 if (new_ctxt)
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 6639baab0798..61a000f8524c 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -60,6 +60,8 @@
60#define MLOG_MASK_PREFIX ML_DLMFS 60#define MLOG_MASK_PREFIX ML_DLMFS
61#include "cluster/masklog.h" 61#include "cluster/masklog.h"
62 62
63#include "ocfs2_lockingver.h"
64
63static const struct super_operations dlmfs_ops; 65static const struct super_operations dlmfs_ops;
64static const struct file_operations dlmfs_file_operations; 66static const struct file_operations dlmfs_file_operations;
65static const struct inode_operations dlmfs_dir_inode_operations; 67static const struct inode_operations dlmfs_dir_inode_operations;
@@ -70,6 +72,16 @@ static struct kmem_cache *dlmfs_inode_cache;
70struct workqueue_struct *user_dlm_worker; 72struct workqueue_struct *user_dlm_worker;
71 73
72/* 74/*
75 * This is the userdlmfs locking protocol version.
76 *
77 * See fs/ocfs2/dlmglue.c for more details on locking versions.
78 */
79static const struct dlm_protocol_version user_locking_protocol = {
80 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
81 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
82};
83
84/*
73 * decodes a set of open flags into a valid lock level and a set of flags. 85 * decodes a set of open flags into a valid lock level and a set of flags.
74 * returns < 0 if we have invalid flags 86 * returns < 0 if we have invalid flags
75 * flags which mean something to us: 87 * flags which mean something to us:
@@ -416,6 +428,7 @@ static int dlmfs_mkdir(struct inode * dir,
416 struct qstr *domain = &dentry->d_name; 428 struct qstr *domain = &dentry->d_name;
417 struct dlmfs_inode_private *ip; 429 struct dlmfs_inode_private *ip;
418 struct dlm_ctxt *dlm; 430 struct dlm_ctxt *dlm;
431 struct dlm_protocol_version proto = user_locking_protocol;
419 432
420 mlog(0, "mkdir %.*s\n", domain->len, domain->name); 433 mlog(0, "mkdir %.*s\n", domain->len, domain->name);
421 434
@@ -435,7 +448,7 @@ static int dlmfs_mkdir(struct inode * dir,
435 448
436 ip = DLMFS_I(inode); 449 ip = DLMFS_I(inode);
437 450
438 dlm = user_dlm_register_context(domain); 451 dlm = user_dlm_register_context(domain, &proto);
439 if (IS_ERR(dlm)) { 452 if (IS_ERR(dlm)) {
440 status = PTR_ERR(dlm); 453 status = PTR_ERR(dlm);
441 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", 454 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index 7d2f578b267d..4cb1d3dae250 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -645,7 +645,8 @@ bail:
645 return status; 645 return status;
646} 646}
647 647
648struct dlm_ctxt *user_dlm_register_context(struct qstr *name) 648struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
649 struct dlm_protocol_version *proto)
649{ 650{
650 struct dlm_ctxt *dlm; 651 struct dlm_ctxt *dlm;
651 u32 dlm_key; 652 u32 dlm_key;
@@ -661,7 +662,7 @@ struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
661 662
662 snprintf(domain, name->len + 1, "%.*s", name->len, name->name); 663 snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
663 664
664 dlm = dlm_register_domain(domain, dlm_key); 665 dlm = dlm_register_domain(domain, dlm_key, proto);
665 if (IS_ERR(dlm)) 666 if (IS_ERR(dlm))
666 mlog_errno(PTR_ERR(dlm)); 667 mlog_errno(PTR_ERR(dlm));
667 668
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h
index c400e93bbf79..39ec27738499 100644
--- a/fs/ocfs2/dlm/userdlm.h
+++ b/fs/ocfs2/dlm/userdlm.h
@@ -83,7 +83,8 @@ void user_dlm_write_lvb(struct inode *inode,
83void user_dlm_read_lvb(struct inode *inode, 83void user_dlm_read_lvb(struct inode *inode,
84 char *val, 84 char *val,
85 unsigned int len); 85 unsigned int len);
86struct dlm_ctxt *user_dlm_register_context(struct qstr *name); 86struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
87 struct dlm_protocol_version *proto);
87void user_dlm_unregister_context(struct dlm_ctxt *dlm); 88void user_dlm_unregister_context(struct dlm_ctxt *dlm);
88 89
89struct dlmfs_inode_private { 90struct dlmfs_inode_private {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3867244fb144..351130c9b734 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -43,6 +43,7 @@
43#include <cluster/masklog.h> 43#include <cluster/masklog.h>
44 44
45#include "ocfs2.h" 45#include "ocfs2.h"
46#include "ocfs2_lockingver.h"
46 47
47#include "alloc.h" 48#include "alloc.h"
48#include "dcache.h" 49#include "dcache.h"
@@ -258,6 +259,31 @@ static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
258 .flags = 0, 259 .flags = 0,
259}; 260};
260 261
262/*
263 * This is the filesystem locking protocol version.
264 *
265 * Whenever the filesystem does new things with locks (adds or removes a
266 * lock, orders them differently, does different things underneath a lock),
267 * the version must be changed. The protocol is negotiated when joining
268 * the dlm domain. A node may join the domain if its major version is
269 * identical to all other nodes and its minor version is greater than
270 * or equal to all other nodes. When its minor version is greater than
271 * the other nodes, it will run at the minor version specified by the
272 * other nodes.
273 *
274 * If a locking change is made that will not be compatible with older
275 * versions, the major number must be increased and the minor version set
276 * to zero. If a change merely adds a behavior that can be disabled when
277 * speaking to older versions, the minor version must be increased. If a
278 * change adds a fully backwards compatible change (eg, LVB changes that
279 * are just ignored by older versions), the version does not need to be
280 * updated.
281 */
282const struct dlm_protocol_version ocfs2_locking_protocol = {
283 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
284 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
285};
286
261static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) 287static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
262{ 288{
263 return lockres->l_type == OCFS2_LOCK_TYPE_META || 289 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
@@ -2506,7 +2532,8 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
2506 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str)); 2532 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2507 2533
2508 /* for now, uuid == domain */ 2534 /* for now, uuid == domain */
2509 dlm = dlm_register_domain(osb->uuid_str, dlm_key); 2535 dlm = dlm_register_domain(osb->uuid_str, dlm_key,
2536 &osb->osb_locking_proto);
2510 if (IS_ERR(dlm)) { 2537 if (IS_ERR(dlm)) {
2511 status = PTR_ERR(dlm); 2538 status = PTR_ERR(dlm);
2512 mlog_errno(status); 2539 mlog_errno(status);
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 5f17243ba501..1d5b0699d0a9 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -116,4 +116,5 @@ void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb);
116struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void); 116struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void);
117void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug); 117void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
118 118
119extern const struct dlm_protocol_version ocfs2_locking_protocol;
119#endif /* DLMGLUE_H */ 120#endif /* DLMGLUE_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index d08480580470..e8b7292e0152 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -251,6 +251,7 @@ struct ocfs2_super
251 struct ocfs2_lock_res osb_rename_lockres; 251 struct ocfs2_lock_res osb_rename_lockres;
252 struct dlm_eviction_cb osb_eviction_cb; 252 struct dlm_eviction_cb osb_eviction_cb;
253 struct ocfs2_dlm_debug *osb_dlm_debug; 253 struct ocfs2_dlm_debug *osb_dlm_debug;
254 struct dlm_protocol_version osb_locking_proto;
254 255
255 struct dentry *osb_debug_root; 256 struct dentry *osb_debug_root;
256 257
diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h
new file mode 100644
index 000000000000..82d5eeac0fff
--- /dev/null
+++ b/fs/ocfs2/ocfs2_lockingver.h
@@ -0,0 +1,30 @@
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * ocfs2_lockingver.h
5 *
6 * Defines OCFS2 Locking version values.
7 *
8 * Copyright (C) 2008 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License, version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20#ifndef OCFS2_LOCKINGVER_H
21#define OCFS2_LOCKINGVER_H
22
23/*
24 * The protocol version for ocfs2 cluster locking. See dlmglue.c for
25 * more details.
26 */
27#define OCFS2_LOCKING_PROTOCOL_MAJOR 1
28#define OCFS2_LOCKING_PROTOCOL_MINOR 0
29
30#endif /* OCFS2_LOCKINGVER_H */
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 01fe40ee5ea9..bec75aff3d9f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1355,6 +1355,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1355 sb->s_fs_info = osb; 1355 sb->s_fs_info = osb;
1356 sb->s_op = &ocfs2_sops; 1356 sb->s_op = &ocfs2_sops;
1357 sb->s_export_op = &ocfs2_export_ops; 1357 sb->s_export_op = &ocfs2_export_ops;
1358 osb->osb_locking_proto = ocfs2_locking_protocol;
1358 sb->s_time_gran = 1; 1359 sb->s_time_gran = 1;
1359 sb->s_flags |= MS_NOATIME; 1360 sb->s_flags |= MS_NOATIME;
1360 /* this is needed to support O_LARGEFILE */ 1361 /* this is needed to support O_LARGEFILE */
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 739da701ae7b..9a64045ff845 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -319,6 +319,14 @@ void delete_partition(struct gendisk *disk, int part)
319 put_device(&p->dev); 319 put_device(&p->dev);
320} 320}
321 321
322static ssize_t whole_disk_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 return 0;
326}
327static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
328 whole_disk_show, NULL);
329
322void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags) 330void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags)
323{ 331{
324 struct hd_struct *p; 332 struct hd_struct *p;
@@ -352,13 +360,8 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len,
352 device_add(&p->dev); 360 device_add(&p->dev);
353 partition_sysfs_add_subdir(p); 361 partition_sysfs_add_subdir(p);
354 p->dev.uevent_suppress = 0; 362 p->dev.uevent_suppress = 0;
355 if (flags & ADDPART_FLAG_WHOLEDISK) { 363 if (flags & ADDPART_FLAG_WHOLEDISK)
356 static struct attribute addpartattr = { 364 err = device_create_file(&p->dev, &dev_attr_whole_disk);
357 .name = "whole_disk",
358 .mode = S_IRUSR | S_IRGRP | S_IROTH,
359 };
360 err = sysfs_create_file(&p->dev.kobj, &addpartattr);
361 }
362 365
363 /* suppress uevent if the disk supresses it */ 366 /* suppress uevent if the disk supresses it */
364 if (!disk->dev.uevent_suppress) 367 if (!disk->dev.uevent_suppress)
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 0871c3dadce1..477904915032 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -77,7 +77,12 @@ void sysfs_remove_group(struct kobject * kobj,
77 77
78 if (grp->name) { 78 if (grp->name) {
79 sd = sysfs_get_dirent(dir_sd, grp->name); 79 sd = sysfs_get_dirent(dir_sd, grp->name);
80 BUG_ON(!sd); 80 if (!sd) {
81 printk(KERN_WARNING "sysfs group %p not found for "
82 "kobject '%s'\n", grp, kobject_name(kobj));
83 WARN_ON(!sd);
84 return;
85 }
81 } else 86 } else
82 sd = sysfs_get(dir_sd); 87 sd = sysfs_get(dir_sd);
83 88
diff --git a/include/asm-arm/arch-pxa/pxa27x_keyboard.h b/include/asm-arm/arch-pxa/pxa27x_keyboard.h
deleted file mode 100644
index 3aaff923b2ca..000000000000
--- a/include/asm-arm/arch-pxa/pxa27x_keyboard.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#define PXAKBD_MAXROW 8
2#define PXAKBD_MAXCOL 8
3
4struct pxa27x_keyboard_platform_data {
5 int nr_rows, nr_cols;
6 int keycodes[PXAKBD_MAXROW][PXAKBD_MAXCOL];
7 int gpio_modes[PXAKBD_MAXROW + PXAKBD_MAXCOL];
8
9#ifdef CONFIG_PM
10 u32 reg_kpc;
11 u32 reg_kprec;
12#endif
13};
diff --git a/include/asm-arm/arch-pxa/pxa27x_keypad.h b/include/asm-arm/arch-pxa/pxa27x_keypad.h
new file mode 100644
index 000000000000..644f7609b523
--- /dev/null
+++ b/include/asm-arm/arch-pxa/pxa27x_keypad.h
@@ -0,0 +1,56 @@
1#ifndef __ASM_ARCH_PXA27x_KEYPAD_H
2#define __ASM_ARCH_PXA27x_KEYPAD_H
3
4#include <linux/input.h>
5
6#define MAX_MATRIX_KEY_ROWS (8)
7#define MAX_MATRIX_KEY_COLS (8)
8
9/* pxa3xx keypad platform specific parameters
10 *
11 * NOTE:
12 * 1. direct_key_num indicates the number of keys in the direct keypad
13 * _plus_ the number of rotary-encoder sensor inputs, this can be
14 * left as 0 if only rotary encoders are enabled, the driver will
15 * automatically calculate this
16 *
17 * 2. direct_key_map is the key code map for the direct keys, if rotary
18 * encoder(s) are enabled, direct key 0/1(2/3) will be ignored
19 *
20 * 3. rotary can be either interpreted as a relative input event (e.g.
21 * REL_WHEEL/REL_HWHEEL) or specific keys (e.g. UP/DOWN/LEFT/RIGHT)
22 *
23 * 4. matrix key and direct key will use the same debounce_interval by
24 * default, which should be sufficient in most cases
25 */
26struct pxa27x_keypad_platform_data {
27
28 /* code map for the matrix keys */
29 unsigned int matrix_key_rows;
30 unsigned int matrix_key_cols;
31 unsigned int *matrix_key_map;
32 int matrix_key_map_size;
33
34 /* direct keys */
35 int direct_key_num;
36 unsigned int direct_key_map[8];
37
38 /* rotary encoders 0 */
39 int enable_rotary0;
40 int rotary0_rel_code;
41 int rotary0_up_key;
42 int rotary0_down_key;
43
44 /* rotary encoders 1 */
45 int enable_rotary1;
46 int rotary1_rel_code;
47 int rotary1_up_key;
48 int rotary1_down_key;
49
50 /* key debounce interval */
51 unsigned int debounce_interval;
52};
53
54#define KEY(row, col, val) (((row) << 28) | ((col) << 24) | (val))
55
56#endif /* __ASM_ARCH_PXA27x_KEYPAD_H */
diff --git a/include/asm-arm/arch-pxa/tosa.h b/include/asm-arm/arch-pxa/tosa.h
index c3364a2c4758..c05e4faf85a6 100644
--- a/include/asm-arm/arch-pxa/tosa.h
+++ b/include/asm-arm/arch-pxa/tosa.h
@@ -163,4 +163,34 @@
163 163
164extern struct platform_device tosascoop_jc_device; 164extern struct platform_device tosascoop_jc_device;
165extern struct platform_device tosascoop_device; 165extern struct platform_device tosascoop_device;
166
167#define TOSA_KEY_SYNC KEY_102ND /* ??? */
168
169
170#ifndef CONFIG_KEYBOARD_TOSA_USE_EXT_KEYCODES
171#define TOSA_KEY_RECORD KEY_YEN
172#define TOSA_KEY_ADDRESSBOOK KEY_KATAKANA
173#define TOSA_KEY_CANCEL KEY_ESC
174#define TOSA_KEY_CENTER KEY_HIRAGANA
175#define TOSA_KEY_OK KEY_HENKAN
176#define TOSA_KEY_CALENDAR KEY_KATAKANAHIRAGANA
177#define TOSA_KEY_HOMEPAGE KEY_HANGEUL
178#define TOSA_KEY_LIGHT KEY_MUHENKAN
179#define TOSA_KEY_MENU KEY_HANJA
180#define TOSA_KEY_FN KEY_RIGHTALT
181#define TOSA_KEY_MAIL KEY_ZENKAKUHANKAKU
182#else
183#define TOSA_KEY_RECORD KEY_RECORD
184#define TOSA_KEY_ADDRESSBOOK KEY_ADDRESSBOOK
185#define TOSA_KEY_CANCEL KEY_CANCEL
186#define TOSA_KEY_CENTER KEY_SELECT /* ??? */
187#define TOSA_KEY_OK KEY_OK
188#define TOSA_KEY_CALENDAR KEY_CALENDAR
189#define TOSA_KEY_HOMEPAGE KEY_HOMEPAGE
190#define TOSA_KEY_LIGHT KEY_KBDILLUMTOGGLE
191#define TOSA_KEY_MENU KEY_MENU
192#define TOSA_KEY_FN KEY_FN
193#define TOSA_KEY_MAIL KEY_MAIL
194#endif
195
166#endif /* _ASM_ARCH_TOSA_H_ */ 196#endif /* _ASM_ARCH_TOSA_H_ */
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
new file mode 100644
index 000000000000..a5978f18ca40
--- /dev/null
+++ b/include/linux/enclosure.h
@@ -0,0 +1,129 @@
1/*
2 * Enclosure Services
3 *
4 * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
5 *
6**-----------------------------------------------------------------------------
7**
8** This program is free software; you can redistribute it and/or
9** modify it under the terms of the GNU General Public License
10** version 2 as published by the Free Software Foundation.
11**
12** This program is distributed in the hope that it will be useful,
13** but WITHOUT ANY WARRANTY; without even the implied warranty of
14** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15** GNU General Public License for more details.
16**
17** You should have received a copy of the GNU General Public License
18** along with this program; if not, write to the Free Software
19** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20**
21**-----------------------------------------------------------------------------
22*/
23#ifndef _LINUX_ENCLOSURE_H_
24#define _LINUX_ENCLOSURE_H_
25
26#include <linux/device.h>
27#include <linux/list.h>
28
29/* A few generic types ... taken from ses-2 */
30enum enclosure_component_type {
31 ENCLOSURE_COMPONENT_DEVICE = 0x01,
32 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
33};
34
35/* ses-2 common element status */
36enum enclosure_status {
37 ENCLOSURE_STATUS_UNSUPPORTED = 0,
38 ENCLOSURE_STATUS_OK,
39 ENCLOSURE_STATUS_CRITICAL,
40 ENCLOSURE_STATUS_NON_CRITICAL,
41 ENCLOSURE_STATUS_UNRECOVERABLE,
42 ENCLOSURE_STATUS_NOT_INSTALLED,
43 ENCLOSURE_STATUS_UNKNOWN,
44 ENCLOSURE_STATUS_UNAVAILABLE,
45};
46
47/* SFF-8485 activity light settings */
48enum enclosure_component_setting {
49 ENCLOSURE_SETTING_DISABLED = 0,
50 ENCLOSURE_SETTING_ENABLED = 1,
51 ENCLOSURE_SETTING_BLINK_A_ON_OFF = 2,
52 ENCLOSURE_SETTING_BLINK_A_OFF_ON = 3,
53 ENCLOSURE_SETTING_BLINK_B_ON_OFF = 6,
54 ENCLOSURE_SETTING_BLINK_B_OFF_ON = 7,
55};
56
57struct enclosure_device;
58struct enclosure_component;
59struct enclosure_component_callbacks {
60 void (*get_status)(struct enclosure_device *,
61 struct enclosure_component *);
62 int (*set_status)(struct enclosure_device *,
63 struct enclosure_component *,
64 enum enclosure_status);
65 void (*get_fault)(struct enclosure_device *,
66 struct enclosure_component *);
67 int (*set_fault)(struct enclosure_device *,
68 struct enclosure_component *,
69 enum enclosure_component_setting);
70 void (*get_active)(struct enclosure_device *,
71 struct enclosure_component *);
72 int (*set_active)(struct enclosure_device *,
73 struct enclosure_component *,
74 enum enclosure_component_setting);
75 void (*get_locate)(struct enclosure_device *,
76 struct enclosure_component *);
77 int (*set_locate)(struct enclosure_device *,
78 struct enclosure_component *,
79 enum enclosure_component_setting);
80};
81
82
83struct enclosure_component {
84 void *scratch;
85 struct class_device cdev;
86 enum enclosure_component_type type;
87 int number;
88 int fault;
89 int active;
90 int locate;
91 enum enclosure_status status;
92};
93
94struct enclosure_device {
95 void *scratch;
96 struct list_head node;
97 struct class_device cdev;
98 struct enclosure_component_callbacks *cb;
99 int components;
100 struct enclosure_component component[0];
101};
102
103static inline struct enclosure_device *
104to_enclosure_device(struct class_device *dev)
105{
106 return container_of(dev, struct enclosure_device, cdev);
107}
108
109static inline struct enclosure_component *
110to_enclosure_component(struct class_device *dev)
111{
112 return container_of(dev, struct enclosure_component, cdev);
113}
114
115struct enclosure_device *
116enclosure_register(struct device *, const char *, int,
117 struct enclosure_component_callbacks *);
118void enclosure_unregister(struct enclosure_device *);
119struct enclosure_component *
120enclosure_component_register(struct enclosure_device *, unsigned int,
121 enum enclosure_component_type, const char *);
122int enclosure_add_device(struct enclosure_device *enclosure, int component,
123 struct device *dev);
124int enclosure_remove_device(struct enclosure_device *enclosure, int component);
125struct enclosure_device *enclosure_find(struct device *dev);
126int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
127 void *data);
128
129#endif /* _LINUX_ENCLOSURE_H_ */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 8371b664b41f..203591e23210 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -225,11 +225,14 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
225 * idea of the (in)accuracy of timers. Timer values are rounded up to 225 * idea of the (in)accuracy of timers. Timer values are rounded up to
226 * this resolution values. 226 * this resolution values.
227 */ 227 */
228# define KTIME_HIGH_RES (ktime_t) { .tv64 = 1 } 228# define HIGH_RES_NSEC 1
229# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC }
230# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
229# define KTIME_MONOTONIC_RES KTIME_HIGH_RES 231# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
230 232
231#else 233#else
232 234
235# define MONOTONIC_RES_NSEC LOW_RES_NSEC
233# define KTIME_MONOTONIC_RES KTIME_LOW_RES 236# define KTIME_MONOTONIC_RES KTIME_LOW_RES
234 237
235/* 238/*
diff --git a/include/linux/input.h b/include/linux/input.h
index 056a17a4f34f..1bdc39a8c76c 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1020,7 +1020,6 @@ struct ff_effect {
1020 * @going_away: marks devices that are in a middle of unregistering and 1020 * @going_away: marks devices that are in a middle of unregistering and
1021 * causes input_open_device*() fail with -ENODEV. 1021 * causes input_open_device*() fail with -ENODEV.
1022 * @dev: driver model's view of this device 1022 * @dev: driver model's view of this device
1023 * @cdev: union for struct device pointer
1024 * @h_list: list of input handles associated with the device. When 1023 * @h_list: list of input handles associated with the device. When
1025 * accessing the list dev->mutex must be held 1024 * accessing the list dev->mutex must be held
1026 * @node: used to place the device onto input_dev_list 1025 * @node: used to place the device onto input_dev_list
@@ -1085,9 +1084,6 @@ struct input_dev {
1085 int going_away; 1084 int going_away;
1086 1085
1087 struct device dev; 1086 struct device dev;
1088 union { /* temporarily so while we switching to struct device */
1089 struct device *dev;
1090 } cdev;
1091 1087
1092 struct list_head h_list; 1088 struct list_head h_list;
1093 struct list_head node; 1089 struct list_head node;
@@ -1311,6 +1307,9 @@ static inline void input_set_abs_params(struct input_dev *dev, int axis, int min
1311 dev->absbit[BIT_WORD(axis)] |= BIT_MASK(axis); 1307 dev->absbit[BIT_WORD(axis)] |= BIT_MASK(axis);
1312} 1308}
1313 1309
1310int input_get_keycode(struct input_dev *dev, int scancode, int *keycode);
1311int input_set_keycode(struct input_dev *dev, int scancode, int keycode);
1312
1314extern struct class input_class; 1313extern struct class input_class;
1315 1314
1316/** 1315/**
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index a6ddec141f96..36c542b70c6d 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -316,7 +316,8 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
316 * idea of the (in)accuracy of timers. Timer values are rounded up to 316 * idea of the (in)accuracy of timers. Timer values are rounded up to
317 * this resolution values. 317 * this resolution values.
318 */ 318 */
319#define KTIME_LOW_RES (ktime_t){ .tv64 = TICK_NSEC } 319#define LOW_RES_NSEC TICK_NSEC
320#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
320 321
321/* Get the monotonic time in timespec format: */ 322/* Get the monotonic time in timespec format: */
322extern void ktime_get_ts(struct timespec *ts); 323extern void ktime_get_ts(struct timespec *ts);
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index 318a909e7ae1..5ffec8ad6964 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -45,8 +45,8 @@
45/* initiator tags; opaque for target */ 45/* initiator tags; opaque for target */
46typedef uint32_t __bitwise__ itt_t; 46typedef uint32_t __bitwise__ itt_t;
47/* below makes sense only for initiator that created this tag */ 47/* below makes sense only for initiator that created this tag */
48#define build_itt(itt, id, age) ((__force itt_t)\ 48#define build_itt(itt, age) ((__force itt_t)\
49 ((itt) | ((id) << ISCSI_CID_SHIFT) | ((age) << ISCSI_AGE_SHIFT))) 49 ((itt) | ((age) << ISCSI_AGE_SHIFT)))
50#define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK) 50#define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK)
51#define RESERVED_ITT ((__force itt_t)0xffffffff) 51#define RESERVED_ITT ((__force itt_t)0xffffffff)
52 52
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 889f51fabab9..7b90b63fb5c7 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -70,8 +70,6 @@ enum {
70#define ISCSI_SUSPEND_BIT 1 70#define ISCSI_SUSPEND_BIT 1
71 71
72#define ISCSI_ITT_MASK (0xfff) 72#define ISCSI_ITT_MASK (0xfff)
73#define ISCSI_CID_SHIFT 12
74#define ISCSI_CID_MASK (0xffff << ISCSI_CID_SHIFT)
75#define ISCSI_AGE_SHIFT 28 73#define ISCSI_AGE_SHIFT 28
76#define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT) 74#define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
77 75
@@ -135,6 +133,14 @@ static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
135 return (void*)ctask->hdr + ctask->hdr_len; 133 return (void*)ctask->hdr + ctask->hdr_len;
136} 134}
137 135
136/* Connection's states */
137enum {
138 ISCSI_CONN_INITIAL_STAGE,
139 ISCSI_CONN_STARTED,
140 ISCSI_CONN_STOPPED,
141 ISCSI_CONN_CLEANUP_WAIT,
142};
143
138struct iscsi_conn { 144struct iscsi_conn {
139 struct iscsi_cls_conn *cls_conn; /* ptr to class connection */ 145 struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
140 void *dd_data; /* iscsi_transport data */ 146 void *dd_data; /* iscsi_transport data */
@@ -227,6 +233,17 @@ struct iscsi_pool {
227 int max; /* Max number of elements */ 233 int max; /* Max number of elements */
228}; 234};
229 235
236/* Session's states */
237enum {
238 ISCSI_STATE_FREE = 1,
239 ISCSI_STATE_LOGGED_IN,
240 ISCSI_STATE_FAILED,
241 ISCSI_STATE_TERMINATE,
242 ISCSI_STATE_IN_RECOVERY,
243 ISCSI_STATE_RECOVERY_FAILED,
244 ISCSI_STATE_LOGGING_OUT,
245};
246
230struct iscsi_session { 247struct iscsi_session {
231 /* 248 /*
232 * Syncs up the scsi eh thread with the iscsi eh thread when sending 249 * Syncs up the scsi eh thread with the iscsi eh thread when sending
@@ -325,6 +342,10 @@ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
325#define session_to_cls(_sess) \ 342#define session_to_cls(_sess) \
326 hostdata_session(_sess->host->hostdata) 343 hostdata_session(_sess->host->hostdata)
327 344
345#define iscsi_session_printk(prefix, _sess, fmt, a...) \
346 iscsi_cls_session_printk(prefix, \
347 (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
348
328/* 349/*
329 * connection management 350 * connection management
330 */ 351 */
@@ -339,6 +360,9 @@ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
339extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, 360extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
340 enum iscsi_param param, char *buf); 361 enum iscsi_param param, char *buf);
341 362
363#define iscsi_conn_printk(prefix, _c, fmt, a...) \
364 iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
365
342/* 366/*
343 * pdu and task processing 367 * pdu and task processing
344 */ 368 */
@@ -349,8 +373,6 @@ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
349 char *, uint32_t); 373 char *, uint32_t);
350extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *, 374extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
351 char *, int); 375 char *, int);
352extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
353 char *, int);
354extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *, 376extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
355 uint32_t *); 377 uint32_t *);
356extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask); 378extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 82251575a9b4..1f74bcd603fe 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -235,6 +235,20 @@ static inline int scsi_status_is_good(int status)
235#define TYPE_RBC 0x0e 235#define TYPE_RBC 0x0e
236#define TYPE_NO_LUN 0x7f 236#define TYPE_NO_LUN 0x7f
237 237
238/* SCSI protocols; these are taken from SPC-3 section 7.5 */
239enum scsi_protocol {
240 SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
241 SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
242 SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
243 SCSI_PROTOCOL_SBP = 3, /* firewire */
244 SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
245 SCSI_PROTOCOL_ISCSI = 5,
246 SCSI_PROTOCOL_SAS = 6,
247 SCSI_PROTOCOL_ADT = 7, /* Media Changers */
248 SCSI_PROTOCOL_ATA = 8,
249 SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
250};
251
238/* Returns a human-readable name for the device */ 252/* Returns a human-readable name for the device */
239extern const char * scsi_device_type(unsigned type); 253extern const char * scsi_device_type(unsigned type);
240 254
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 5c58d594126a..d1299e999723 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -280,39 +280,45 @@ struct scsi_host_template {
280 * If the host wants to be called before the scan starts, but 280 * If the host wants to be called before the scan starts, but
281 * after the midlayer has set up ready for the scan, it can fill 281 * after the midlayer has set up ready for the scan, it can fill
282 * in this function. 282 * in this function.
283 *
284 * Status: OPTIONAL
283 */ 285 */
284 void (* scan_start)(struct Scsi_Host *); 286 void (* scan_start)(struct Scsi_Host *);
285 287
286 /* 288 /*
287 * fill in this function to allow the queue depth of this host 289 * Fill in this function to allow the queue depth of this host
288 * to be changeable (on a per device basis). returns either 290 * to be changeable (on a per device basis). Returns either
289 * the current queue depth setting (may be different from what 291 * the current queue depth setting (may be different from what
290 * was passed in) or an error. An error should only be 292 * was passed in) or an error. An error should only be
291 * returned if the requested depth is legal but the driver was 293 * returned if the requested depth is legal but the driver was
292 * unable to set it. If the requested depth is illegal, the 294 * unable to set it. If the requested depth is illegal, the
293 * driver should set and return the closest legal queue depth. 295 * driver should set and return the closest legal queue depth.
294 * 296 *
297 * Status: OPTIONAL
295 */ 298 */
296 int (* change_queue_depth)(struct scsi_device *, int); 299 int (* change_queue_depth)(struct scsi_device *, int);
297 300
298 /* 301 /*
299 * fill in this function to allow the changing of tag types 302 * Fill in this function to allow the changing of tag types
300 * (this also allows the enabling/disabling of tag command 303 * (this also allows the enabling/disabling of tag command
301 * queueing). An error should only be returned if something 304 * queueing). An error should only be returned if something
302 * went wrong in the driver while trying to set the tag type. 305 * went wrong in the driver while trying to set the tag type.
303 * If the driver doesn't support the requested tag type, then 306 * If the driver doesn't support the requested tag type, then
304 * it should set the closest type it does support without 307 * it should set the closest type it does support without
305 * returning an error. Returns the actual tag type set. 308 * returning an error. Returns the actual tag type set.
309 *
310 * Status: OPTIONAL
306 */ 311 */
307 int (* change_queue_type)(struct scsi_device *, int); 312 int (* change_queue_type)(struct scsi_device *, int);
308 313
309 /* 314 /*
310 * This function determines the bios parameters for a given 315 * This function determines the BIOS parameters for a given
311 * harddisk. These tend to be numbers that are made up by 316 * harddisk. These tend to be numbers that are made up by
312 * the host adapter. Parameters: 317 * the host adapter. Parameters:
313 * size, device, list (heads, sectors, cylinders) 318 * size, device, list (heads, sectors, cylinders)
314 * 319 *
315 * Status: OPTIONAL */ 320 * Status: OPTIONAL
321 */
316 int (* bios_param)(struct scsi_device *, struct block_device *, 322 int (* bios_param)(struct scsi_device *, struct block_device *,
317 sector_t, int []); 323 sector_t, int []);
318 324
@@ -351,7 +357,7 @@ struct scsi_host_template {
351 357
352 /* 358 /*
353 * This determines if we will use a non-interrupt driven 359 * This determines if we will use a non-interrupt driven
354 * or an interrupt driven scheme, It is set to the maximum number 360 * or an interrupt driven scheme. It is set to the maximum number
355 * of simultaneous commands a given host adapter will accept. 361 * of simultaneous commands a given host adapter will accept.
356 */ 362 */
357 int can_queue; 363 int can_queue;
@@ -372,12 +378,12 @@ struct scsi_host_template {
372 unsigned short sg_tablesize; 378 unsigned short sg_tablesize;
373 379
374 /* 380 /*
375 * If the host adapter has limitations beside segment count 381 * Set this if the host adapter has limitations beside segment count.
376 */ 382 */
377 unsigned short max_sectors; 383 unsigned short max_sectors;
378 384
379 /* 385 /*
380 * dma scatter gather segment boundary limit. a segment crossing this 386 * DMA scatter gather segment boundary limit. A segment crossing this
381 * boundary will be split in two. 387 * boundary will be split in two.
382 */ 388 */
383 unsigned long dma_boundary; 389 unsigned long dma_boundary;
@@ -386,7 +392,7 @@ struct scsi_host_template {
386 * This specifies "machine infinity" for host templates which don't 392 * This specifies "machine infinity" for host templates which don't
387 * limit the transfer size. Note this limit represents an absolute 393 * limit the transfer size. Note this limit represents an absolute
388 * maximum, and may be over the transfer limits allowed for 394 * maximum, and may be over the transfer limits allowed for
389 * individual devices (e.g. 256 for SCSI-1) 395 * individual devices (e.g. 256 for SCSI-1).
390 */ 396 */
391#define SCSI_DEFAULT_MAX_SECTORS 1024 397#define SCSI_DEFAULT_MAX_SECTORS 1024
392 398
@@ -413,12 +419,12 @@ struct scsi_host_template {
413 unsigned supported_mode:2; 419 unsigned supported_mode:2;
414 420
415 /* 421 /*
416 * true if this host adapter uses unchecked DMA onto an ISA bus. 422 * True if this host adapter uses unchecked DMA onto an ISA bus.
417 */ 423 */
418 unsigned unchecked_isa_dma:1; 424 unsigned unchecked_isa_dma:1;
419 425
420 /* 426 /*
421 * true if this host adapter can make good use of clustering. 427 * True if this host adapter can make good use of clustering.
422 * I originally thought that if the tablesize was large that it 428 * I originally thought that if the tablesize was large that it
423 * was a waste of CPU cycles to prepare a cluster list, but 429 * was a waste of CPU cycles to prepare a cluster list, but
424 * it works out that the Buslogic is faster if you use a smaller 430 * it works out that the Buslogic is faster if you use a smaller
@@ -428,7 +434,7 @@ struct scsi_host_template {
428 unsigned use_clustering:1; 434 unsigned use_clustering:1;
429 435
430 /* 436 /*
431 * True for emulated SCSI host adapters (e.g. ATAPI) 437 * True for emulated SCSI host adapters (e.g. ATAPI).
432 */ 438 */
433 unsigned emulated:1; 439 unsigned emulated:1;
434 440
@@ -438,12 +444,12 @@ struct scsi_host_template {
438 unsigned skip_settle_delay:1; 444 unsigned skip_settle_delay:1;
439 445
440 /* 446 /*
441 * ordered write support 447 * True if we are using ordered write support.
442 */ 448 */
443 unsigned ordered_tag:1; 449 unsigned ordered_tag:1;
444 450
445 /* 451 /*
446 * Countdown for host blocking with no commands outstanding 452 * Countdown for host blocking with no commands outstanding.
447 */ 453 */
448 unsigned int max_host_blocked; 454 unsigned int max_host_blocked;
449 455
@@ -522,8 +528,8 @@ struct Scsi_Host {
522 struct scsi_transport_template *transportt; 528 struct scsi_transport_template *transportt;
523 529
524 /* 530 /*
525 * area to keep a shared tag map (if needed, will be 531 * Area to keep a shared tag map (if needed, will be
526 * NULL if not) 532 * NULL if not).
527 */ 533 */
528 struct blk_queue_tag *bqt; 534 struct blk_queue_tag *bqt;
529 535
@@ -596,16 +602,16 @@ struct Scsi_Host {
596 /* 602 /*
597 * Host uses correct SCSI ordering not PC ordering. The bit is 603 * Host uses correct SCSI ordering not PC ordering. The bit is
598 * set for the minority of drivers whose authors actually read 604 * set for the minority of drivers whose authors actually read
599 * the spec ;) 605 * the spec ;).
600 */ 606 */
601 unsigned reverse_ordering:1; 607 unsigned reverse_ordering:1;
602 608
603 /* 609 /*
604 * ordered write support 610 * Ordered write support
605 */ 611 */
606 unsigned ordered_tag:1; 612 unsigned ordered_tag:1;
607 613
608 /* task mgmt function in progress */ 614 /* Task mgmt function in progress */
609 unsigned tmf_in_progress:1; 615 unsigned tmf_in_progress:1;
610 616
611 /* Asynchronous scan in progress */ 617 /* Asynchronous scan in progress */
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 404f11d331d6..dbc96ef4cc72 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -149,13 +149,6 @@ extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
149extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 149extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
150 char *data, uint32_t data_size); 150 char *data, uint32_t data_size);
151 151
152
153/* Connection's states */
154#define ISCSI_CONN_INITIAL_STAGE 0
155#define ISCSI_CONN_STARTED 1
156#define ISCSI_CONN_STOPPED 2
157#define ISCSI_CONN_CLEANUP_WAIT 3
158
159struct iscsi_cls_conn { 152struct iscsi_cls_conn {
160 struct list_head conn_list; /* item in connlist */ 153 struct list_head conn_list; /* item in connlist */
161 void *dd_data; /* LLD private data */ 154 void *dd_data; /* LLD private data */
@@ -169,27 +162,31 @@ struct iscsi_cls_conn {
169#define iscsi_dev_to_conn(_dev) \ 162#define iscsi_dev_to_conn(_dev) \
170 container_of(_dev, struct iscsi_cls_conn, dev) 163 container_of(_dev, struct iscsi_cls_conn, dev)
171 164
172/* Session's states */ 165#define iscsi_conn_to_session(_conn) \
173#define ISCSI_STATE_FREE 1 166 iscsi_dev_to_session(_conn->dev.parent)
174#define ISCSI_STATE_LOGGED_IN 2 167
175#define ISCSI_STATE_FAILED 3 168/* iscsi class session state */
176#define ISCSI_STATE_TERMINATE 4 169enum {
177#define ISCSI_STATE_IN_RECOVERY 5 170 ISCSI_SESSION_LOGGED_IN,
178#define ISCSI_STATE_RECOVERY_FAILED 6 171 ISCSI_SESSION_FAILED,
179#define ISCSI_STATE_LOGGING_OUT 7 172 ISCSI_SESSION_FREE,
173};
180 174
181struct iscsi_cls_session { 175struct iscsi_cls_session {
182 struct list_head sess_list; /* item in session_list */ 176 struct list_head sess_list; /* item in session_list */
183 struct list_head host_list; 177 struct list_head host_list;
184 struct iscsi_transport *transport; 178 struct iscsi_transport *transport;
179 spinlock_t lock;
180 struct work_struct scan_work;
181 struct work_struct unbind_work;
185 182
186 /* recovery fields */ 183 /* recovery fields */
187 int recovery_tmo; 184 int recovery_tmo;
188 struct delayed_work recovery_work; 185 struct delayed_work recovery_work;
189 struct work_struct unbind_work;
190 186
191 int target_id; 187 int target_id;
192 188
189 int state;
193 int sid; /* session id */ 190 int sid; /* session id */
194 void *dd_data; /* LLD private data */ 191 void *dd_data; /* LLD private data */
195 struct device dev; /* sysfs transport/container device */ 192 struct device dev; /* sysfs transport/container device */
@@ -206,14 +203,22 @@ struct iscsi_cls_session {
206 203
207struct iscsi_host { 204struct iscsi_host {
208 struct list_head sessions; 205 struct list_head sessions;
206 atomic_t nr_scans;
209 struct mutex mutex; 207 struct mutex mutex;
210 struct workqueue_struct *unbind_workq; 208 struct workqueue_struct *scan_workq;
211 char unbind_workq_name[KOBJ_NAME_LEN]; 209 char scan_workq_name[KOBJ_NAME_LEN];
212}; 210};
213 211
214/* 212/*
215 * session and connection functions that can be used by HW iSCSI LLDs 213 * session and connection functions that can be used by HW iSCSI LLDs
216 */ 214 */
215#define iscsi_cls_session_printk(prefix, _cls_session, fmt, a...) \
216 dev_printk(prefix, &(_cls_session)->dev, fmt, ##a)
217
218#define iscsi_cls_conn_printk(prefix, _cls_conn, fmt, a...) \
219 dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a)
220
221extern int iscsi_session_chkready(struct iscsi_cls_session *session);
217extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost, 222extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
218 struct iscsi_transport *transport); 223 struct iscsi_transport *transport);
219extern int iscsi_add_session(struct iscsi_cls_session *session, 224extern int iscsi_add_session(struct iscsi_cls_session *session,
@@ -231,6 +236,6 @@ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
231extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); 236extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
232extern void iscsi_unblock_session(struct iscsi_cls_session *session); 237extern void iscsi_unblock_session(struct iscsi_cls_session *session);
233extern void iscsi_block_session(struct iscsi_cls_session *session); 238extern void iscsi_block_session(struct iscsi_cls_session *session);
234 239extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
235 240
236#endif 241#endif