diff options
Diffstat (limited to 'drivers')
49 files changed, 2063 insertions, 356 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 402296670d3a..78d928f9d9f1 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
| @@ -291,7 +291,7 @@ config SX | |||
| 291 | 291 | ||
| 292 | config RIO | 292 | config RIO |
| 293 | tristate "Specialix RIO system support" | 293 | tristate "Specialix RIO system support" |
| 294 | depends on SERIAL_NONSTANDARD && !64BIT | 294 | depends on SERIAL_NONSTANDARD |
| 295 | help | 295 | help |
| 296 | This is a driver for the Specialix RIO, a smart serial card which | 296 | This is a driver for the Specialix RIO, a smart serial card which |
| 297 | drives an outboard box that can support up to 128 ports. Product | 297 | drives an outboard box that can support up to 128 ports. Product |
diff --git a/drivers/char/rio/host.h b/drivers/char/rio/host.h index 3ec73d1a279a..179cdbea712b 100644 --- a/drivers/char/rio/host.h +++ b/drivers/char/rio/host.h | |||
| @@ -33,12 +33,6 @@ | |||
| 33 | #ifndef __rio_host_h__ | 33 | #ifndef __rio_host_h__ |
| 34 | #define __rio_host_h__ | 34 | #define __rio_host_h__ |
| 35 | 35 | ||
| 36 | #ifdef SCCS_LABELS | ||
| 37 | #ifndef lint | ||
| 38 | static char *_host_h_sccs_ = "@(#)host.h 1.2"; | ||
| 39 | #endif | ||
| 40 | #endif | ||
| 41 | |||
| 42 | /* | 36 | /* |
| 43 | ** the host structure - one per host card in the system. | 37 | ** the host structure - one per host card in the system. |
| 44 | */ | 38 | */ |
| @@ -77,9 +71,6 @@ struct Host { | |||
| 77 | #define RC_STARTUP 1 | 71 | #define RC_STARTUP 1 |
| 78 | #define RC_RUNNING 2 | 72 | #define RC_RUNNING 2 |
| 79 | #define RC_STUFFED 3 | 73 | #define RC_STUFFED 3 |
| 80 | #define RC_SOMETHING 4 | ||
| 81 | #define RC_SOMETHING_NEW 5 | ||
| 82 | #define RC_SOMETHING_ELSE 6 | ||
| 83 | #define RC_READY 7 | 74 | #define RC_READY 7 |
| 84 | #define RUN_STATE 7 | 75 | #define RUN_STATE 7 |
| 85 | /* | 76 | /* |
diff --git a/drivers/char/rio/rioboot.c b/drivers/char/rio/rioboot.c index acda9326c2ef..290143addd34 100644 --- a/drivers/char/rio/rioboot.c +++ b/drivers/char/rio/rioboot.c | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
| 35 | #include <linux/termios.h> | 35 | #include <linux/termios.h> |
| 36 | #include <linux/serial.h> | 36 | #include <linux/serial.h> |
| 37 | #include <linux/vmalloc.h> | ||
| 37 | #include <asm/semaphore.h> | 38 | #include <asm/semaphore.h> |
| 38 | #include <linux/generic_serial.h> | 39 | #include <linux/generic_serial.h> |
| 39 | #include <linux/errno.h> | 40 | #include <linux/errno.h> |
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c index d31aba62bb7f..75b2557c37ec 100644 --- a/drivers/char/rio/rioctrl.c +++ b/drivers/char/rio/rioctrl.c | |||
| @@ -1394,14 +1394,17 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd) | |||
| 1394 | return RIO_FAIL; | 1394 | return RIO_FAIL; |
| 1395 | } | 1395 | } |
| 1396 | 1396 | ||
| 1397 | if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) { | 1397 | if ((PortP->InUse == (typeof(PortP->InUse))-1) || |
| 1398 | rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum); | 1398 | !(CmdBlkP = RIOGetCmdBlk())) { |
| 1399 | rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block " | ||
| 1400 | "for command %d on port %d\n", Cmd, PortP->PortNum); | ||
| 1399 | return RIO_FAIL; | 1401 | return RIO_FAIL; |
| 1400 | } | 1402 | } |
| 1401 | 1403 | ||
| 1402 | rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", CmdBlkP, PortP->InUse); | 1404 | rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", |
| 1405 | CmdBlkP, PortP->InUse); | ||
| 1403 | 1406 | ||
| 1404 | PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0]; | 1407 | PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0]; |
| 1405 | 1408 | ||
| 1406 | CmdBlkP->Packet.src_unit = 0; | 1409 | CmdBlkP->Packet.src_unit = 0; |
| 1407 | if (PortP->SecondBlock) | 1410 | if (PortP->SecondBlock) |
| @@ -1425,38 +1428,46 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd) | |||
| 1425 | 1428 | ||
| 1426 | switch (Cmd) { | 1429 | switch (Cmd) { |
| 1427 | case MEMDUMP: | 1430 | case MEMDUMP: |
| 1428 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p (addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); | 1431 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p " |
| 1432 | "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); | ||
| 1429 | PktCmdP->SubCommand = MEMDUMP; | 1433 | PktCmdP->SubCommand = MEMDUMP; |
| 1430 | PktCmdP->SubAddr = SubCmd.Addr; | 1434 | PktCmdP->SubAddr = SubCmd.Addr; |
| 1431 | break; | 1435 | break; |
| 1432 | case FCLOSE: | 1436 | case FCLOSE: |
| 1433 | rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", CmdBlkP); | 1437 | rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", |
| 1438 | CmdBlkP); | ||
| 1434 | break; | 1439 | break; |
| 1435 | case READ_REGISTER: | 1440 | case READ_REGISTER: |
| 1436 | rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk %p\n", (int) SubCmd.Addr, CmdBlkP); | 1441 | rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) " |
| 1442 | "command blk %p\n", (int) SubCmd.Addr, CmdBlkP); | ||
| 1437 | PktCmdP->SubCommand = READ_REGISTER; | 1443 | PktCmdP->SubCommand = READ_REGISTER; |
| 1438 | PktCmdP->SubAddr = SubCmd.Addr; | 1444 | PktCmdP->SubAddr = SubCmd.Addr; |
| 1439 | break; | 1445 | break; |
| 1440 | case RESUME: | 1446 | case RESUME: |
| 1441 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", CmdBlkP); | 1447 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", |
| 1448 | CmdBlkP); | ||
| 1442 | break; | 1449 | break; |
| 1443 | case RFLUSH: | 1450 | case RFLUSH: |
| 1444 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", CmdBlkP); | 1451 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", |
| 1452 | CmdBlkP); | ||
| 1445 | CmdBlkP->PostFuncP = RIORFlushEnable; | 1453 | CmdBlkP->PostFuncP = RIORFlushEnable; |
| 1446 | break; | 1454 | break; |
| 1447 | case SUSPEND: | 1455 | case SUSPEND: |
| 1448 | rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", CmdBlkP); | 1456 | rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", |
| 1457 | CmdBlkP); | ||
| 1449 | break; | 1458 | break; |
| 1450 | 1459 | ||
| 1451 | case MGET: | 1460 | case MGET: |
| 1452 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", CmdBlkP); | 1461 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", |
| 1462 | CmdBlkP); | ||
| 1453 | break; | 1463 | break; |
| 1454 | 1464 | ||
| 1455 | case MSET: | 1465 | case MSET: |
| 1456 | case MBIC: | 1466 | case MBIC: |
| 1457 | case MBIS: | 1467 | case MBIS: |
| 1458 | CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; | 1468 | CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; |
| 1459 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk %p\n", CmdBlkP); | 1469 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command " |
| 1470 | "blk %p\n", CmdBlkP); | ||
| 1460 | break; | 1471 | break; |
| 1461 | 1472 | ||
| 1462 | case WFLUSH: | 1473 | case WFLUSH: |
| @@ -1465,12 +1476,14 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd) | |||
| 1465 | ** allowed then we should not bother sending any more to the | 1476 | ** allowed then we should not bother sending any more to the |
| 1466 | ** RTA. | 1477 | ** RTA. |
| 1467 | */ | 1478 | */ |
| 1468 | if ((int) ((char) PortP->WflushFlag) == (int) -1) { | 1479 | if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) { |
| 1469 | rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!"); | 1480 | rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, " |
| 1481 | "WflushFlag about to wrap!"); | ||
| 1470 | RIOFreeCmdBlk(CmdBlkP); | 1482 | RIOFreeCmdBlk(CmdBlkP); |
| 1471 | return (RIO_FAIL); | 1483 | return (RIO_FAIL); |
| 1472 | } else { | 1484 | } else { |
| 1473 | rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk %p\n", CmdBlkP); | 1485 | rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command " |
| 1486 | "blk %p\n", CmdBlkP); | ||
| 1474 | CmdBlkP->PostFuncP = RIOWFlushMark; | 1487 | CmdBlkP->PostFuncP = RIOWFlushMark; |
| 1475 | } | 1488 | } |
| 1476 | break; | 1489 | break; |
diff --git a/drivers/char/rio/rioioctl.h b/drivers/char/rio/rioioctl.h index 14b83fae75c8..e8af5b30519e 100644 --- a/drivers/char/rio/rioioctl.h +++ b/drivers/char/rio/rioioctl.h | |||
| @@ -33,10 +33,6 @@ | |||
| 33 | #ifndef __rioioctl_h__ | 33 | #ifndef __rioioctl_h__ |
| 34 | #define __rioioctl_h__ | 34 | #define __rioioctl_h__ |
| 35 | 35 | ||
| 36 | #ifdef SCCS_LABELS | ||
| 37 | static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2"; | ||
| 38 | #endif | ||
| 39 | |||
| 40 | /* | 36 | /* |
| 41 | ** RIO device driver - user ioctls and associated structures. | 37 | ** RIO device driver - user ioctls and associated structures. |
| 42 | */ | 38 | */ |
| @@ -44,55 +40,13 @@ static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2"; | |||
| 44 | struct portStats { | 40 | struct portStats { |
| 45 | int port; | 41 | int port; |
| 46 | int gather; | 42 | int gather; |
| 47 | ulong txchars; | 43 | unsigned long txchars; |
| 48 | ulong rxchars; | 44 | unsigned long rxchars; |
| 49 | ulong opens; | 45 | unsigned long opens; |
| 50 | ulong closes; | 46 | unsigned long closes; |
| 51 | ulong ioctls; | 47 | unsigned long ioctls; |
| 52 | }; | 48 | }; |
| 53 | 49 | ||
| 54 | |||
| 55 | #define rIOC ('r'<<8) | ||
| 56 | #define TCRIOSTATE (rIOC | 1) | ||
| 57 | #define TCRIOXPON (rIOC | 2) | ||
| 58 | #define TCRIOXPOFF (rIOC | 3) | ||
| 59 | #define TCRIOXPCPS (rIOC | 4) | ||
| 60 | #define TCRIOXPRINT (rIOC | 5) | ||
| 61 | #define TCRIOIXANYON (rIOC | 6) | ||
| 62 | #define TCRIOIXANYOFF (rIOC | 7) | ||
| 63 | #define TCRIOIXONON (rIOC | 8) | ||
| 64 | #define TCRIOIXONOFF (rIOC | 9) | ||
| 65 | #define TCRIOMBIS (rIOC | 10) | ||
| 66 | #define TCRIOMBIC (rIOC | 11) | ||
| 67 | #define TCRIOTRIAD (rIOC | 12) | ||
| 68 | #define TCRIOTSTATE (rIOC | 13) | ||
| 69 | |||
| 70 | /* | ||
| 71 | ** 15.10.1998 ARG - ESIL 0761 part fix | ||
| 72 | ** Add RIO ioctls for manipulating RTS and CTS flow control, (as LynxOS | ||
| 73 | ** appears to not support hardware flow control). | ||
| 74 | */ | ||
| 75 | #define TCRIOCTSFLOWEN (rIOC | 14) /* enable CTS flow control */ | ||
| 76 | #define TCRIOCTSFLOWDIS (rIOC | 15) /* disable CTS flow control */ | ||
| 77 | #define TCRIORTSFLOWEN (rIOC | 16) /* enable RTS flow control */ | ||
| 78 | #define TCRIORTSFLOWDIS (rIOC | 17) /* disable RTS flow control */ | ||
| 79 | |||
| 80 | /* | ||
| 81 | ** 09.12.1998 ARG - ESIL 0776 part fix | ||
| 82 | ** Definition for 'RIOC' also appears in daemon.h, so we'd better do a | ||
| 83 | ** #ifndef here first. | ||
| 84 | ** 'RIO_QUICK_CHECK' also #define'd here as this ioctl is now | ||
| 85 | ** allowed to be used by customers. | ||
| 86 | ** | ||
| 87 | ** 05.02.1999 ARG - | ||
| 88 | ** This is what I've decied to do with ioctls etc., which are intended to be | ||
| 89 | ** invoked from users applications : | ||
| 90 | ** Anything that needs to be defined here will be removed from daemon.h, that | ||
| 91 | ** way it won't end up having to be defined/maintained in two places. The only | ||
| 92 | ** consequence of this is that this file should now be #include'd by daemon.h | ||
| 93 | ** | ||
| 94 | ** 'stats' ioctls now #define'd here as they are to be used by customers. | ||
| 95 | */ | ||
| 96 | #define RIOC ('R'<<8)|('i'<<16)|('o'<<24) | 50 | #define RIOC ('R'<<8)|('i'<<16)|('o'<<24) |
| 97 | 51 | ||
| 98 | #define RIO_QUICK_CHECK (RIOC | 105) | 52 | #define RIO_QUICK_CHECK (RIOC | 105) |
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 1efde3b27619..fe00c7dfb649 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig | |||
| @@ -22,7 +22,7 @@ config TCG_TPM | |||
| 22 | 22 | ||
| 23 | config TCG_TIS | 23 | config TCG_TIS |
| 24 | tristate "TPM Interface Specification 1.2 Interface" | 24 | tristate "TPM Interface Specification 1.2 Interface" |
| 25 | depends on TCG_TPM | 25 | depends on TCG_TPM && PNPACPI |
| 26 | ---help--- | 26 | ---help--- |
| 27 | If you have a TPM security chip that is compliant with the | 27 | If you have a TPM security chip that is compliant with the |
| 28 | TCG TIS 1.2 TPM specification say Yes and it will be accessible | 28 | TCG TIS 1.2 TPM specification say Yes and it will be accessible |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 54a4c804e25f..050ced247f68 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
| @@ -140,7 +140,7 @@ extern int tpm_pm_resume(struct device *); | |||
| 140 | extern struct dentry ** tpm_bios_log_setup(char *); | 140 | extern struct dentry ** tpm_bios_log_setup(char *); |
| 141 | extern void tpm_bios_log_teardown(struct dentry **); | 141 | extern void tpm_bios_log_teardown(struct dentry **); |
| 142 | #else | 142 | #else |
| 143 | static inline struct dentry* tpm_bios_log_setup(char *name) | 143 | static inline struct dentry ** tpm_bios_log_setup(char *name) |
| 144 | { | 144 | { |
| 145 | return NULL; | 145 | return NULL; |
| 146 | } | 146 | } |
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index b9cae9a238bb..f621168f38ae 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
| @@ -55,7 +55,7 @@ enum tis_int_flags { | |||
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | enum tis_defaults { | 57 | enum tis_defaults { |
| 58 | TIS_MEM_BASE = 0xFED4000, | 58 | TIS_MEM_BASE = 0xFED40000, |
| 59 | TIS_MEM_LEN = 0x5000, | 59 | TIS_MEM_LEN = 0x5000, |
| 60 | TIS_SHORT_TIMEOUT = 750, /* ms */ | 60 | TIS_SHORT_TIMEOUT = 750, /* ms */ |
| 61 | TIS_LONG_TIMEOUT = 2000, /* 2 sec */ | 61 | TIS_LONG_TIMEOUT = 2000, /* 2 sec */ |
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c index a13395e2c372..fa2ba9ebe42a 100644 --- a/drivers/char/watchdog/i8xx_tco.c +++ b/drivers/char/watchdog/i8xx_tco.c | |||
| @@ -33,11 +33,6 @@ | |||
| 33 | * 82801E (C-ICH) : document number 273599-001, 273645-002, | 33 | * 82801E (C-ICH) : document number 273599-001, 273645-002, |
| 34 | * 82801EB (ICH5) : document number 252516-001, 252517-003, | 34 | * 82801EB (ICH5) : document number 252516-001, 252517-003, |
| 35 | * 82801ER (ICH5R) : document number 252516-001, 252517-003, | 35 | * 82801ER (ICH5R) : document number 252516-001, 252517-003, |
| 36 | * 82801FB (ICH6) : document number 301473-002, 301474-007, | ||
| 37 | * 82801FR (ICH6R) : document number 301473-002, 301474-007, | ||
| 38 | * 82801FBM (ICH6-M) : document number 301473-002, 301474-007, | ||
| 39 | * 82801FW (ICH6W) : document number 301473-001, 301474-007, | ||
| 40 | * 82801FRW (ICH6RW) : document number 301473-001, 301474-007 | ||
| 41 | * | 36 | * |
| 42 | * 20000710 Nils Faerber | 37 | * 20000710 Nils Faerber |
| 43 | * Initial Version 0.01 | 38 | * Initial Version 0.01 |
| @@ -66,6 +61,10 @@ | |||
| 66 | * 20050807 Wim Van Sebroeck <wim@iguana.be> | 61 | * 20050807 Wim Van Sebroeck <wim@iguana.be> |
| 67 | * 0.08 Make sure that the watchdog is only "armed" when started. | 62 | * 0.08 Make sure that the watchdog is only "armed" when started. |
| 68 | * (Kernel Bug 4251) | 63 | * (Kernel Bug 4251) |
| 64 | * 20060416 Wim Van Sebroeck <wim@iguana.be> | ||
| 65 | * 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and | ||
| 66 | * ICH7 chipsets. (See Kernel Bug 6031 - other code will support these | ||
| 67 | * chipsets) | ||
| 69 | */ | 68 | */ |
| 70 | 69 | ||
| 71 | /* | 70 | /* |
| @@ -90,7 +89,7 @@ | |||
| 90 | #include "i8xx_tco.h" | 89 | #include "i8xx_tco.h" |
| 91 | 90 | ||
| 92 | /* Module and version information */ | 91 | /* Module and version information */ |
| 93 | #define TCO_VERSION "0.08" | 92 | #define TCO_VERSION "0.09" |
| 94 | #define TCO_MODULE_NAME "i8xx TCO timer" | 93 | #define TCO_MODULE_NAME "i8xx TCO timer" |
| 95 | #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION | 94 | #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION |
| 96 | #define PFX TCO_MODULE_NAME ": " | 95 | #define PFX TCO_MODULE_NAME ": " |
| @@ -391,11 +390,6 @@ static struct pci_device_id i8xx_tco_pci_tbl[] = { | |||
| 391 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, }, | 390 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, }, |
| 392 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, }, | 391 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, }, |
| 393 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, }, | 392 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, }, |
| 394 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, }, | ||
| 395 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, }, | ||
| 396 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, }, | ||
| 397 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, }, | ||
| 398 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, }, | ||
| 399 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, }, | 393 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, }, |
| 400 | { 0, }, /* End of list */ | 394 | { 0, }, /* End of list */ |
| 401 | }; | 395 | }; |
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c index 9dc54736e4eb..1ea04e9b2b0b 100644 --- a/drivers/char/watchdog/s3c2410_wdt.c +++ b/drivers/char/watchdog/s3c2410_wdt.c | |||
| @@ -423,6 +423,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev) | |||
| 423 | if (tmr_atboot && started == 0) { | 423 | if (tmr_atboot && started == 0) { |
| 424 | printk(KERN_INFO PFX "Starting Watchdog Timer\n"); | 424 | printk(KERN_INFO PFX "Starting Watchdog Timer\n"); |
| 425 | s3c2410wdt_start(); | 425 | s3c2410wdt_start(); |
| 426 | } else if (!tmr_atboot) { | ||
| 427 | /* if we're not enabling the watchdog, then ensure it is | ||
| 428 | * disabled if it has been left running from the bootloader | ||
| 429 | * or other source */ | ||
| 430 | |||
| 431 | s3c2410wdt_stop(); | ||
| 426 | } | 432 | } |
| 427 | 433 | ||
| 428 | return 0; | 434 | return 0; |
diff --git a/drivers/char/watchdog/sc1200wdt.c b/drivers/char/watchdog/sc1200wdt.c index 515ce7572049..20b88f9b7be2 100644 --- a/drivers/char/watchdog/sc1200wdt.c +++ b/drivers/char/watchdog/sc1200wdt.c | |||
| @@ -377,7 +377,7 @@ static int __init sc1200wdt_init(void) | |||
| 377 | { | 377 | { |
| 378 | int ret; | 378 | int ret; |
| 379 | 379 | ||
| 380 | printk(banner); | 380 | printk("%s\n", banner); |
| 381 | 381 | ||
| 382 | spin_lock_init(&sc1200wdt_lock); | 382 | spin_lock_init(&sc1200wdt_lock); |
| 383 | sema_init(&open_sem, 1); | 383 | sema_init(&open_sem, 1); |
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index 4961f1e764a7..602797a44208 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c | |||
| @@ -392,6 +392,7 @@ static struct pcmcia_device_id ide_ids[] = { | |||
| 392 | PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), | 392 | PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), |
| 393 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), | 393 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), |
| 394 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), | 394 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), |
| 395 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), | ||
| 395 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 396 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
| 396 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 397 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
| 397 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 398 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 19222878aae9..11f13778f139 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c | |||
| @@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci) | |||
| 553 | * register content. | 553 | * register content. |
| 554 | * To actually enable physical responses is the job of our interrupt | 554 | * To actually enable physical responses is the job of our interrupt |
| 555 | * handler which programs the physical request filter. */ | 555 | * handler which programs the physical request filter. */ |
| 556 | reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); | 556 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000); |
| 557 | 557 | ||
| 558 | DBGMSG("physUpperBoundOffset=%08x", | 558 | DBGMSG("physUpperBoundOffset=%08x", |
| 559 | reg_read(ohci, OHCI1394_PhyUpperBound)); | 559 | reg_read(ohci, OHCI1394_PhyUpperBound)); |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f4206604db03..8a23fb54c693 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <linux/kernel.h> | 42 | #include <linux/kernel.h> |
| 43 | #include <linux/list.h> | 43 | #include <linux/list.h> |
| 44 | #include <linux/string.h> | 44 | #include <linux/string.h> |
| 45 | #include <linux/stringify.h> | ||
| 45 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
| 46 | #include <linux/interrupt.h> | 47 | #include <linux/interrupt.h> |
| 47 | #include <linux/fs.h> | 48 | #include <linux/fs.h> |
| @@ -117,7 +118,8 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default | |||
| 117 | */ | 118 | */ |
| 118 | static int max_sectors = SBP2_MAX_SECTORS; | 119 | static int max_sectors = SBP2_MAX_SECTORS; |
| 119 | module_param(max_sectors, int, 0444); | 120 | module_param(max_sectors, int, 0444); |
| 120 | MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)"); | 121 | MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = " |
| 122 | __stringify(SBP2_MAX_SECTORS) ")"); | ||
| 121 | 123 | ||
| 122 | /* | 124 | /* |
| 123 | * Exclusive login to sbp2 device? In most cases, the sbp2 driver should | 125 | * Exclusive login to sbp2 device? In most cases, the sbp2 driver should |
| @@ -135,18 +137,45 @@ module_param(exclusive_login, int, 0644); | |||
| 135 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"); | 137 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"); |
| 136 | 138 | ||
| 137 | /* | 139 | /* |
| 138 | * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on | 140 | * If any of the following workarounds is required for your device to work, |
| 139 | * if your sbp2 device is not properly handling the SCSI inquiry command. | 141 | * please submit the kernel messages logged by sbp2 to the linux1394-devel |
| 140 | * This hack makes the inquiry look more like a typical MS Windows inquiry | 142 | * mailing list. |
| 141 | * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8. | ||
| 142 | * | 143 | * |
| 143 | * If force_inquiry_hack=1 is required for your device to work, | 144 | * - 128kB max transfer |
| 144 | * please submit the logged sbp2_firmware_revision value of this device to | 145 | * Limit transfer size. Necessary for some old bridges. |
| 145 | * the linux1394-devel mailing list. | 146 | * |
| 147 | * - 36 byte inquiry | ||
| 148 | * When scsi_mod probes the device, let the inquiry command look like that | ||
| 149 | * from MS Windows. | ||
| 150 | * | ||
| 151 | * - skip mode page 8 | ||
| 152 | * Suppress sending of mode_sense for mode page 8 if the device pretends to | ||
| 153 | * support the SCSI Primary Block commands instead of Reduced Block Commands. | ||
| 154 | * | ||
| 155 | * - fix capacity | ||
| 156 | * Tell sd_mod to correct the last sector number reported by read_capacity. | ||
| 157 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | ||
| 158 | * Don't use this with devices which don't have this bug. | ||
| 159 | * | ||
| 160 | * - override internal blacklist | ||
| 161 | * Instead of adding to the built-in blacklist, use only the workarounds | ||
| 162 | * specified in the module load parameter. | ||
| 163 | * Useful if a blacklist entry interfered with a non-broken device. | ||
| 146 | */ | 164 | */ |
| 165 | static int sbp2_default_workarounds; | ||
| 166 | module_param_named(workarounds, sbp2_default_workarounds, int, 0644); | ||
| 167 | MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | ||
| 168 | ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS) | ||
| 169 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | ||
| 170 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | ||
| 171 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | ||
| 172 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | ||
| 173 | ", or a combination)"); | ||
| 174 | |||
| 175 | /* legacy parameter */ | ||
| 147 | static int force_inquiry_hack; | 176 | static int force_inquiry_hack; |
| 148 | module_param(force_inquiry_hack, int, 0644); | 177 | module_param(force_inquiry_hack, int, 0644); |
| 149 | MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); | 178 | MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'"); |
| 150 | 179 | ||
| 151 | /* | 180 | /* |
| 152 | * Export information about protocols/devices supported by this driver. | 181 | * Export information about protocols/devices supported by this driver. |
| @@ -266,14 +295,55 @@ static struct hpsb_protocol_driver sbp2_driver = { | |||
| 266 | }; | 295 | }; |
| 267 | 296 | ||
| 268 | /* | 297 | /* |
| 269 | * List of device firmwares that require the inquiry hack. | 298 | * List of devices with known bugs. |
| 270 | * Yields a few false positives but did not break other devices so far. | 299 | * |
| 300 | * The firmware_revision field, masked with 0xffff00, is the best indicator | ||
| 301 | * for the type of bridge chip of a device. It yields a few false positives | ||
| 302 | * but this did not break correctly behaving devices so far. | ||
| 271 | */ | 303 | */ |
| 272 | static u32 sbp2_broken_inquiry_list[] = { | 304 | static const struct { |
| 273 | 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */ | 305 | u32 firmware_revision; |
| 274 | /* DViCO Momobay CX-1 */ | 306 | u32 model_id; |
| 275 | 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */ | 307 | unsigned workarounds; |
| 276 | /* QPS Fire DVDBurner */ | 308 | } sbp2_workarounds_table[] = { |
| 309 | /* TSB42AA9 */ { | ||
| 310 | .firmware_revision = 0x002800, | ||
| 311 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | ||
| 312 | SBP2_WORKAROUND_MODE_SENSE_8, | ||
| 313 | }, | ||
| 314 | /* Initio bridges, actually only needed for some older ones */ { | ||
| 315 | .firmware_revision = 0x000200, | ||
| 316 | .workarounds = SBP2_WORKAROUND_INQUIRY_36, | ||
| 317 | }, | ||
| 318 | /* Symbios bridge */ { | ||
| 319 | .firmware_revision = 0xa0b800, | ||
| 320 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
| 321 | }, | ||
| 322 | /* | ||
| 323 | * Note about the following Apple iPod blacklist entries: | ||
| 324 | * | ||
| 325 | * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our | ||
| 326 | * matching logic treats 0 as a wildcard, we cannot match this ID | ||
| 327 | * without rewriting the matching routine. Fortunately these iPods | ||
| 328 | * do not feature the read_capacity bug according to one report. | ||
| 329 | * Read_capacity behaviour as well as model_id could change due to | ||
| 330 | * Apple-supplied firmware updates though. | ||
| 331 | */ | ||
| 332 | /* iPod 4th generation */ { | ||
| 333 | .firmware_revision = 0x0a2700, | ||
| 334 | .model_id = 0x000021, | ||
| 335 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
| 336 | }, | ||
| 337 | /* iPod mini */ { | ||
| 338 | .firmware_revision = 0x0a2700, | ||
| 339 | .model_id = 0x000023, | ||
| 340 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
| 341 | }, | ||
| 342 | /* iPod Photo */ { | ||
| 343 | .firmware_revision = 0x0a2700, | ||
| 344 | .model_id = 0x00007e, | ||
| 345 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
| 346 | } | ||
| 277 | }; | 347 | }; |
| 278 | 348 | ||
| 279 | /************************************** | 349 | /************************************** |
| @@ -765,11 +835,16 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud | |||
| 765 | 835 | ||
| 766 | /* Register the status FIFO address range. We could use the same FIFO | 836 | /* Register the status FIFO address range. We could use the same FIFO |
| 767 | * for targets at different nodes. However we need different FIFOs per | 837 | * for targets at different nodes. However we need different FIFOs per |
| 768 | * target in order to support multi-unit devices. */ | 838 | * target in order to support multi-unit devices. |
| 839 | * The FIFO is located out of the local host controller's physical range | ||
| 840 | * but, if possible, within the posted write area. Status writes will | ||
| 841 | * then be performed as unified transactions. This slightly reduces | ||
| 842 | * bandwidth usage, and some Prolific based devices seem to require it. | ||
| 843 | */ | ||
| 769 | scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( | 844 | scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( |
| 770 | &sbp2_highlevel, ud->ne->host, &sbp2_ops, | 845 | &sbp2_highlevel, ud->ne->host, &sbp2_ops, |
| 771 | sizeof(struct sbp2_status_block), sizeof(quadlet_t), | 846 | sizeof(struct sbp2_status_block), sizeof(quadlet_t), |
| 772 | ~0ULL, ~0ULL); | 847 | 0x010000000000ULL, CSR1212_ALL_SPACE_END); |
| 773 | if (!scsi_id->status_fifo_addr) { | 848 | if (!scsi_id->status_fifo_addr) { |
| 774 | SBP2_ERR("failed to allocate status FIFO address range"); | 849 | SBP2_ERR("failed to allocate status FIFO address range"); |
| 775 | goto failed_alloc; | 850 | goto failed_alloc; |
| @@ -1450,7 +1525,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, | |||
| 1450 | struct csr1212_dentry *dentry; | 1525 | struct csr1212_dentry *dentry; |
| 1451 | u64 management_agent_addr; | 1526 | u64 management_agent_addr; |
| 1452 | u32 command_set_spec_id, command_set, unit_characteristics, | 1527 | u32 command_set_spec_id, command_set, unit_characteristics, |
| 1453 | firmware_revision, workarounds; | 1528 | firmware_revision; |
| 1529 | unsigned workarounds; | ||
| 1454 | int i; | 1530 | int i; |
| 1455 | 1531 | ||
| 1456 | SBP2_DEBUG_ENTER(); | 1532 | SBP2_DEBUG_ENTER(); |
| @@ -1506,12 +1582,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, | |||
| 1506 | case SBP2_FIRMWARE_REVISION_KEY: | 1582 | case SBP2_FIRMWARE_REVISION_KEY: |
| 1507 | /* Firmware revision */ | 1583 | /* Firmware revision */ |
| 1508 | firmware_revision = kv->value.immediate; | 1584 | firmware_revision = kv->value.immediate; |
| 1509 | if (force_inquiry_hack) | 1585 | SBP2_DEBUG("sbp2_firmware_revision = %x", |
| 1510 | SBP2_INFO("sbp2_firmware_revision = %x", | 1586 | (unsigned int)firmware_revision); |
| 1511 | (unsigned int)firmware_revision); | ||
| 1512 | else | ||
| 1513 | SBP2_DEBUG("sbp2_firmware_revision = %x", | ||
| 1514 | (unsigned int)firmware_revision); | ||
| 1515 | break; | 1587 | break; |
| 1516 | 1588 | ||
| 1517 | default: | 1589 | default: |
| @@ -1519,41 +1591,44 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, | |||
| 1519 | } | 1591 | } |
| 1520 | } | 1592 | } |
| 1521 | 1593 | ||
| 1522 | /* This is the start of our broken device checking. We try to hack | 1594 | workarounds = sbp2_default_workarounds; |
| 1523 | * around oddities and known defects. */ | 1595 | if (force_inquiry_hack) { |
| 1524 | workarounds = 0x0; | 1596 | SBP2_WARN("force_inquiry_hack is deprecated. " |
| 1597 | "Use parameter 'workarounds' instead."); | ||
| 1598 | workarounds |= SBP2_WORKAROUND_INQUIRY_36; | ||
| 1599 | } | ||
| 1525 | 1600 | ||
| 1526 | /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a | 1601 | if (!(workarounds & SBP2_WORKAROUND_OVERRIDE)) |
| 1527 | * bridge with 128KB max transfer size limitation. For sanity, we | 1602 | for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { |
| 1528 | * only voice this when the current max_sectors setting | 1603 | if (sbp2_workarounds_table[i].firmware_revision && |
| 1529 | * exceeds the 128k limit. By default, that is not the case. | 1604 | sbp2_workarounds_table[i].firmware_revision != |
| 1530 | * | 1605 | (firmware_revision & 0xffff00)) |
| 1531 | * It would be really nice if we could detect this before the scsi | 1606 | continue; |
| 1532 | * host gets initialized. That way we can down-force the | 1607 | if (sbp2_workarounds_table[i].model_id && |
| 1533 | * max_sectors to account for it. That is not currently | 1608 | sbp2_workarounds_table[i].model_id != ud->model_id) |
| 1534 | * possible. */ | 1609 | continue; |
| 1535 | if ((firmware_revision & 0xffff00) == | 1610 | workarounds |= sbp2_workarounds_table[i].workarounds; |
| 1536 | SBP2_128KB_BROKEN_FIRMWARE && | 1611 | break; |
| 1537 | (max_sectors * 512) > (128*1024)) { | ||
| 1538 | SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.", | ||
| 1539 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid)); | ||
| 1540 | SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!", | ||
| 1541 | max_sectors); | ||
| 1542 | workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER; | ||
| 1543 | } | ||
| 1544 | |||
| 1545 | /* Check for a blacklisted set of devices that require us to force | ||
| 1546 | * a 36 byte host inquiry. This can be overriden as a module param | ||
| 1547 | * (to force all hosts). */ | ||
| 1548 | for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) { | ||
| 1549 | if ((firmware_revision & 0xffff00) == | ||
| 1550 | sbp2_broken_inquiry_list[i]) { | ||
| 1551 | SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround", | ||
| 1552 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid)); | ||
| 1553 | workarounds |= SBP2_BREAKAGE_INQUIRY_HACK; | ||
| 1554 | break; /* No need to continue. */ | ||
| 1555 | } | 1612 | } |
| 1556 | } | 1613 | |
| 1614 | if (workarounds) | ||
| 1615 | SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x " | ||
| 1616 | "(firmware_revision 0x%06x, vendor_id 0x%06x," | ||
| 1617 | " model_id 0x%06x)", | ||
| 1618 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), | ||
| 1619 | workarounds, firmware_revision, | ||
| 1620 | ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, | ||
| 1621 | ud->model_id); | ||
| 1622 | |||
| 1623 | /* We would need one SCSI host template for each target to adjust | ||
| 1624 | * max_sectors on the fly, therefore warn only. */ | ||
| 1625 | if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && | ||
| 1626 | (max_sectors * 512) > (128 * 1024)) | ||
| 1627 | SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB " | ||
| 1628 | "max transfer size. WARNING: Current max_sectors " | ||
| 1629 | "setting is larger than 128KB (%d sectors)", | ||
| 1630 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), | ||
| 1631 | max_sectors); | ||
| 1557 | 1632 | ||
| 1558 | /* If this is a logical unit directory entry, process the parent | 1633 | /* If this is a logical unit directory entry, process the parent |
| 1559 | * to get the values. */ | 1634 | * to get the values. */ |
| @@ -2447,19 +2522,25 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) | |||
| 2447 | 2522 | ||
| 2448 | scsi_id->sdev = sdev; | 2523 | scsi_id->sdev = sdev; |
| 2449 | 2524 | ||
| 2450 | if (force_inquiry_hack || | 2525 | if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) |
| 2451 | scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) { | ||
| 2452 | sdev->inquiry_len = 36; | 2526 | sdev->inquiry_len = 36; |
| 2453 | sdev->skip_ms_page_8 = 1; | ||
| 2454 | } | ||
| 2455 | return 0; | 2527 | return 0; |
| 2456 | } | 2528 | } |
| 2457 | 2529 | ||
| 2458 | static int sbp2scsi_slave_configure(struct scsi_device *sdev) | 2530 | static int sbp2scsi_slave_configure(struct scsi_device *sdev) |
| 2459 | { | 2531 | { |
| 2532 | struct scsi_id_instance_data *scsi_id = | ||
| 2533 | (struct scsi_id_instance_data *)sdev->host->hostdata[0]; | ||
| 2534 | |||
| 2460 | blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); | 2535 | blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); |
| 2461 | sdev->use_10_for_rw = 1; | 2536 | sdev->use_10_for_rw = 1; |
| 2462 | sdev->use_10_for_ms = 1; | 2537 | sdev->use_10_for_ms = 1; |
| 2538 | |||
| 2539 | if (sdev->type == TYPE_DISK && | ||
| 2540 | scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) | ||
| 2541 | sdev->skip_ms_page_8 = 1; | ||
| 2542 | if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) | ||
| 2543 | sdev->fix_capacity = 1; | ||
| 2463 | return 0; | 2544 | return 0; |
| 2464 | } | 2545 | } |
| 2465 | 2546 | ||
| @@ -2603,7 +2684,9 @@ static int sbp2_module_init(void) | |||
| 2603 | scsi_driver_template.cmd_per_lun = 1; | 2684 | scsi_driver_template.cmd_per_lun = 1; |
| 2604 | } | 2685 | } |
| 2605 | 2686 | ||
| 2606 | /* Set max sectors (module load option). Default is 255 sectors. */ | 2687 | if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && |
| 2688 | (max_sectors * 512) > (128 * 1024)) | ||
| 2689 | max_sectors = 128 * 1024 / 512; | ||
| 2607 | scsi_driver_template.max_sectors = max_sectors; | 2690 | scsi_driver_template.max_sectors = max_sectors; |
| 2608 | 2691 | ||
| 2609 | /* Register our high level driver with 1394 stack */ | 2692 | /* Register our high level driver with 1394 stack */ |
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index e2d357a9ea3a..f4ccc9d0fba4 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
| @@ -227,11 +227,6 @@ struct sbp2_status_block { | |||
| 227 | #define SBP2_SW_VERSION_ENTRY 0x00010483 | 227 | #define SBP2_SW_VERSION_ENTRY 0x00010483 |
| 228 | 228 | ||
| 229 | /* | 229 | /* |
| 230 | * Other misc defines | ||
| 231 | */ | ||
| 232 | #define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800 | ||
| 233 | |||
| 234 | /* | ||
| 235 | * SCSI specific stuff | 230 | * SCSI specific stuff |
| 236 | */ | 231 | */ |
| 237 | 232 | ||
| @@ -239,6 +234,13 @@ struct sbp2_status_block { | |||
| 239 | #define SBP2_MAX_SECTORS 255 /* Max sectors supported */ | 234 | #define SBP2_MAX_SECTORS 255 /* Max sectors supported */ |
| 240 | #define SBP2_MAX_CMDS 8 /* This should be safe */ | 235 | #define SBP2_MAX_CMDS 8 /* This should be safe */ |
| 241 | 236 | ||
| 237 | /* Flags for detected oddities and brokeness */ | ||
| 238 | #define SBP2_WORKAROUND_128K_MAX_TRANS 0x1 | ||
| 239 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | ||
| 240 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | ||
| 241 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | ||
| 242 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | ||
| 243 | |||
| 242 | /* This is the two dma types we use for cmd_dma below */ | 244 | /* This is the two dma types we use for cmd_dma below */ |
| 243 | enum cmd_dma_types { | 245 | enum cmd_dma_types { |
| 244 | CMD_DMA_NONE, | 246 | CMD_DMA_NONE, |
| @@ -268,10 +270,6 @@ struct sbp2_command_info { | |||
| 268 | 270 | ||
| 269 | }; | 271 | }; |
| 270 | 272 | ||
| 271 | /* A list of flags for detected oddities and brokeness. */ | ||
| 272 | #define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1 | ||
| 273 | #define SBP2_BREAKAGE_INQUIRY_HACK 0x2 | ||
| 274 | |||
| 275 | struct sbp2scsi_host_info; | 273 | struct sbp2scsi_host_info; |
| 276 | 274 | ||
| 277 | /* | 275 | /* |
| @@ -345,7 +343,7 @@ struct scsi_id_instance_data { | |||
| 345 | struct Scsi_Host *scsi_host; | 343 | struct Scsi_Host *scsi_host; |
| 346 | 344 | ||
| 347 | /* Device specific workarounds/brokeness */ | 345 | /* Device specific workarounds/brokeness */ |
| 348 | u32 workarounds; | 346 | unsigned workarounds; |
| 349 | }; | 347 | }; |
| 350 | 348 | ||
| 351 | /* Sbp2 host data structure (one per IEEE1394 host) */ | 349 | /* Sbp2 host data structure (one per IEEE1394 host) */ |
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index 36a32c315668..efe147dbeb42 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c | |||
| @@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) | |||
| 211 | */ | 211 | */ |
| 212 | 212 | ||
| 213 | work = kmalloc(sizeof *work, GFP_KERNEL); | 213 | work = kmalloc(sizeof *work, GFP_KERNEL); |
| 214 | if (!work) | 214 | if (!work) { |
| 215 | mmput(mm); | ||
| 215 | return; | 216 | return; |
| 217 | } | ||
| 216 | 218 | ||
| 217 | INIT_WORK(&work->work, ib_umem_account, work); | 219 | INIT_WORK(&work->work, ib_umem_account, work); |
| 218 | work->mm = mm; | 220 | work->mm = mm; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 1985b5dfa481..798e13e14faf 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
| @@ -182,7 +182,7 @@ struct mthca_cmd_context { | |||
| 182 | u8 status; | 182 | u8 status; |
| 183 | }; | 183 | }; |
| 184 | 184 | ||
| 185 | static int fw_cmd_doorbell = 1; | 185 | static int fw_cmd_doorbell = 0; |
| 186 | module_param(fw_cmd_doorbell, int, 0644); | 186 | module_param(fw_cmd_doorbell, int, 0644); |
| 187 | MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " | 187 | MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " |
| 188 | "(and supported by FW)"); | 188 | "(and supported by FW)"); |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 19765f6f8d58..07c13be07a4a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
| @@ -1727,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 1727 | 1727 | ||
| 1728 | ind = qp->rq.next_ind; | 1728 | ind = qp->rq.next_ind; |
| 1729 | 1729 | ||
| 1730 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 1730 | for (nreq = 0; wr; wr = wr->next) { |
| 1731 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
| 1732 | nreq = 0; | ||
| 1733 | |||
| 1734 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
| 1735 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
| 1736 | |||
| 1737 | wmb(); | ||
| 1738 | |||
| 1739 | mthca_write64(doorbell, | ||
| 1740 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
| 1741 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
| 1742 | |||
| 1743 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
| 1744 | size0 = 0; | ||
| 1745 | } | ||
| 1746 | |||
| 1747 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | 1731 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
| 1748 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | 1732 | mthca_err(dev, "RQ %06x full (%u head, %u tail," |
| 1749 | " %d max, %d nreq)\n", qp->qpn, | 1733 | " %d max, %d nreq)\n", qp->qpn, |
| @@ -1797,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 1797 | ++ind; | 1781 | ++ind; |
| 1798 | if (unlikely(ind >= qp->rq.max)) | 1782 | if (unlikely(ind >= qp->rq.max)) |
| 1799 | ind -= qp->rq.max; | 1783 | ind -= qp->rq.max; |
| 1784 | |||
| 1785 | ++nreq; | ||
| 1786 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
| 1787 | nreq = 0; | ||
| 1788 | |||
| 1789 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
| 1790 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
| 1791 | |||
| 1792 | wmb(); | ||
| 1793 | |||
| 1794 | mthca_write64(doorbell, | ||
| 1795 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
| 1796 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
| 1797 | |||
| 1798 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
| 1799 | size0 = 0; | ||
| 1800 | } | ||
| 1800 | } | 1801 | } |
| 1801 | 1802 | ||
| 1802 | out: | 1803 | out: |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index c32ce4348e1b..9cbdffa08dc2 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
| 340 | /* XXX should send SRP_I_LOGOUT request */ | 340 | /* XXX should send SRP_I_LOGOUT request */ |
| 341 | 341 | ||
| 342 | init_completion(&target->done); | 342 | init_completion(&target->done); |
| 343 | ib_send_cm_dreq(target->cm_id, NULL, 0); | 343 | if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { |
| 344 | printk(KERN_DEBUG PFX "Sending CM DREQ failed\n"); | ||
| 345 | return; | ||
| 346 | } | ||
| 344 | wait_for_completion(&target->done); | 347 | wait_for_completion(&target->done); |
| 345 | } | 348 | } |
| 346 | 349 | ||
| @@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr) | |||
| 351 | spin_lock_irq(target->scsi_host->host_lock); | 354 | spin_lock_irq(target->scsi_host->host_lock); |
| 352 | if (target->state != SRP_TARGET_DEAD) { | 355 | if (target->state != SRP_TARGET_DEAD) { |
| 353 | spin_unlock_irq(target->scsi_host->host_lock); | 356 | spin_unlock_irq(target->scsi_host->host_lock); |
| 354 | scsi_host_put(target->scsi_host); | ||
| 355 | return; | 357 | return; |
| 356 | } | 358 | } |
| 357 | target->state = SRP_TARGET_REMOVED; | 359 | target->state = SRP_TARGET_REMOVED; |
| @@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr) | |||
| 365 | ib_destroy_cm_id(target->cm_id); | 367 | ib_destroy_cm_id(target->cm_id); |
| 366 | srp_free_target_ib(target); | 368 | srp_free_target_ib(target); |
| 367 | scsi_host_put(target->scsi_host); | 369 | scsi_host_put(target->scsi_host); |
| 368 | /* And another put to really free the target port... */ | ||
| 369 | scsi_host_put(target->scsi_host); | ||
| 370 | } | 370 | } |
| 371 | 371 | ||
| 372 | static int srp_connect_target(struct srp_target_port *target) | 372 | static int srp_connect_target(struct srp_target_port *target) |
| @@ -1241,7 +1241,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
| 1241 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | 1241 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) |
| 1242 | if (req->scmnd->device == scmnd->device) { | 1242 | if (req->scmnd->device == scmnd->device) { |
| 1243 | req->scmnd->result = DID_RESET << 16; | 1243 | req->scmnd->result = DID_RESET << 16; |
| 1244 | scmnd->scsi_done(scmnd); | 1244 | req->scmnd->scsi_done(req->scmnd); |
| 1245 | srp_remove_req(target, req); | 1245 | srp_remove_req(target, req); |
| 1246 | } | 1246 | } |
| 1247 | 1247 | ||
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 9b493f0becc4..173c899a1fb4 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c | |||
| @@ -1499,7 +1499,6 @@ static int __init capi_init(void) | |||
| 1499 | printk(KERN_ERR "capi20: unable to get major %d\n", capi_major); | 1499 | printk(KERN_ERR "capi20: unable to get major %d\n", capi_major); |
| 1500 | return major_ret; | 1500 | return major_ret; |
| 1501 | } | 1501 | } |
| 1502 | capi_major = major_ret; | ||
| 1503 | capi_class = class_create(THIS_MODULE, "capi"); | 1502 | capi_class = class_create(THIS_MODULE, "capi"); |
| 1504 | if (IS_ERR(capi_class)) { | 1503 | if (IS_ERR(capi_class)) { |
| 1505 | unregister_chrdev(capi_major, "capi20"); | 1504 | unregister_chrdev(capi_major, "capi20"); |
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index bfb73fd5077e..d86ab68114b0 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c | |||
| @@ -710,8 +710,8 @@ static int gigaset_probe(struct usb_interface *interface, | |||
| 710 | retval = -ENODEV; //FIXME | 710 | retval = -ENODEV; //FIXME |
| 711 | 711 | ||
| 712 | /* See if the device offered us matches what we can accept */ | 712 | /* See if the device offered us matches what we can accept */ |
| 713 | if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) || | 713 | if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) || |
| 714 | (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID))) | 714 | (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) |
| 715 | return -ENODEV; | 715 | return -ENODEV; |
| 716 | 716 | ||
| 717 | /* this starts to become ascii art... */ | 717 | /* this starts to become ascii art... */ |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 3f5b64794542..626506234b76 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
| @@ -4,8 +4,11 @@ menu "LED devices" | |||
| 4 | config NEW_LEDS | 4 | config NEW_LEDS |
| 5 | bool "LED Support" | 5 | bool "LED Support" |
| 6 | help | 6 | help |
| 7 | Say Y to enable Linux LED support. This is not related to standard | 7 | Say Y to enable Linux LED support. This allows control of supported |
| 8 | keyboard LEDs which are controlled via the input system. | 8 | LEDs from both userspace and optionally, by kernel events (triggers). |
| 9 | |||
| 10 | This is not related to standard keyboard LEDs which are controlled | ||
| 11 | via the input system. | ||
| 9 | 12 | ||
| 10 | config LEDS_CLASS | 13 | config LEDS_CLASS |
| 11 | tristate "LED Class Support" | 14 | tristate "LED Class Support" |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index b0b5d05fadd6..c75d0ef1609c 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
| 20 | #include <linux/timer.h> | 20 | #include <linux/timer.h> |
| 21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
| 22 | #include <linux/ctype.h> | ||
| 22 | #include <linux/leds.h> | 23 | #include <linux/leds.h> |
| 23 | #include "leds.h" | 24 | #include "leds.h" |
| 24 | 25 | ||
| @@ -43,9 +44,13 @@ static ssize_t led_brightness_store(struct class_device *dev, | |||
| 43 | ssize_t ret = -EINVAL; | 44 | ssize_t ret = -EINVAL; |
| 44 | char *after; | 45 | char *after; |
| 45 | unsigned long state = simple_strtoul(buf, &after, 10); | 46 | unsigned long state = simple_strtoul(buf, &after, 10); |
| 47 | size_t count = after - buf; | ||
| 46 | 48 | ||
| 47 | if (after - buf > 0) { | 49 | if (*after && isspace(*after)) |
| 48 | ret = after - buf; | 50 | count++; |
| 51 | |||
| 52 | if (count == size) { | ||
| 53 | ret = count; | ||
| 49 | led_set_brightness(led_cdev, state); | 54 | led_set_brightness(led_cdev, state); |
| 50 | } | 55 | } |
| 51 | 56 | ||
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index f484b5d6dbf8..fbf141ef46ec 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
| 21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
| 22 | #include <linux/timer.h> | 22 | #include <linux/timer.h> |
| 23 | #include <linux/ctype.h> | ||
| 23 | #include <linux/leds.h> | 24 | #include <linux/leds.h> |
| 24 | #include "leds.h" | 25 | #include "leds.h" |
| 25 | 26 | ||
| @@ -69,11 +70,15 @@ static ssize_t led_delay_on_store(struct class_device *dev, const char *buf, | |||
| 69 | int ret = -EINVAL; | 70 | int ret = -EINVAL; |
| 70 | char *after; | 71 | char *after; |
| 71 | unsigned long state = simple_strtoul(buf, &after, 10); | 72 | unsigned long state = simple_strtoul(buf, &after, 10); |
| 73 | size_t count = after - buf; | ||
| 72 | 74 | ||
| 73 | if (after - buf > 0) { | 75 | if (*after && isspace(*after)) |
| 76 | count++; | ||
| 77 | |||
| 78 | if (count == size) { | ||
| 74 | timer_data->delay_on = state; | 79 | timer_data->delay_on = state; |
| 75 | mod_timer(&timer_data->timer, jiffies + 1); | 80 | mod_timer(&timer_data->timer, jiffies + 1); |
| 76 | ret = after - buf; | 81 | ret = count; |
| 77 | } | 82 | } |
| 78 | 83 | ||
| 79 | return ret; | 84 | return ret; |
| @@ -97,11 +102,15 @@ static ssize_t led_delay_off_store(struct class_device *dev, const char *buf, | |||
| 97 | int ret = -EINVAL; | 102 | int ret = -EINVAL; |
| 98 | char *after; | 103 | char *after; |
| 99 | unsigned long state = simple_strtoul(buf, &after, 10); | 104 | unsigned long state = simple_strtoul(buf, &after, 10); |
| 105 | size_t count = after - buf; | ||
| 106 | |||
| 107 | if (*after && isspace(*after)) | ||
| 108 | count++; | ||
| 100 | 109 | ||
| 101 | if (after - buf > 0) { | 110 | if (count == size) { |
| 102 | timer_data->delay_off = state; | 111 | timer_data->delay_off = state; |
| 103 | mod_timer(&timer_data->timer, jiffies + 1); | 112 | mod_timer(&timer_data->timer, jiffies + 1); |
| 104 | ret = after - buf; | 113 | ret = count; |
| 105 | } | 114 | } |
| 106 | 115 | ||
| 107 | return ret; | 116 | return ret; |
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c index 914d62b24064..5dc4bee7abeb 100644 --- a/drivers/mmc/au1xmmc.c +++ b/drivers/mmc/au1xmmc.c | |||
| @@ -310,7 +310,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) | |||
| 310 | } | 310 | } |
| 311 | else | 311 | else |
| 312 | data->bytes_xfered = | 312 | data->bytes_xfered = |
| 313 | (data->blocks * (1 << data->blksz_bits)) - | 313 | (data->blocks * data->blksz) - |
| 314 | host->pio.len; | 314 | host->pio.len; |
| 315 | } | 315 | } |
| 316 | 316 | ||
| @@ -575,7 +575,7 @@ static int | |||
| 575 | au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) | 575 | au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) |
| 576 | { | 576 | { |
| 577 | 577 | ||
| 578 | int datalen = data->blocks * (1 << data->blksz_bits); | 578 | int datalen = data->blocks * data->blksz; |
| 579 | 579 | ||
| 580 | if (dma != 0) | 580 | if (dma != 0) |
| 581 | host->flags |= HOST_F_DMA; | 581 | host->flags |= HOST_F_DMA; |
| @@ -596,7 +596,7 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) | |||
| 596 | if (host->dma.len == 0) | 596 | if (host->dma.len == 0) |
| 597 | return MMC_ERR_TIMEOUT; | 597 | return MMC_ERR_TIMEOUT; |
| 598 | 598 | ||
| 599 | au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host)); | 599 | au_writel(data->blksz - 1, HOST_BLKSIZE(host)); |
| 600 | 600 | ||
| 601 | if (host->flags & HOST_F_DMA) { | 601 | if (host->flags & HOST_F_DMA) { |
| 602 | int i; | 602 | int i; |
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c index 79358e223f57..a4eb1d0e7a71 100644 --- a/drivers/mmc/imxmmc.c +++ b/drivers/mmc/imxmmc.c | |||
| @@ -218,8 +218,10 @@ static int imxmci_busy_wait_for_status(struct imxmci_host *host, | |||
| 218 | if(!loops) | 218 | if(!loops) |
| 219 | return 0; | 219 | return 0; |
| 220 | 220 | ||
| 221 | dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", | 221 | /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ |
| 222 | loops, where, *pstat, stat_mask); | 222 | if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000)) |
| 223 | dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", | ||
| 224 | loops, where, *pstat, stat_mask); | ||
| 223 | return loops; | 225 | return loops; |
| 224 | } | 226 | } |
| 225 | 227 | ||
| @@ -333,6 +335,9 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, | |||
| 333 | WARN_ON(host->cmd != NULL); | 335 | WARN_ON(host->cmd != NULL); |
| 334 | host->cmd = cmd; | 336 | host->cmd = cmd; |
| 335 | 337 | ||
| 338 | /* Ensure, that clock are stopped else command programming and start fails */ | ||
| 339 | imxmci_stop_clock(host); | ||
| 340 | |||
| 336 | if (cmd->flags & MMC_RSP_BUSY) | 341 | if (cmd->flags & MMC_RSP_BUSY) |
| 337 | cmdat |= CMD_DAT_CONT_BUSY; | 342 | cmdat |= CMD_DAT_CONT_BUSY; |
| 338 | 343 | ||
| @@ -553,7 +558,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) | |||
| 553 | int trans_done = 0; | 558 | int trans_done = 0; |
| 554 | unsigned int stat = *pstat; | 559 | unsigned int stat = *pstat; |
| 555 | 560 | ||
| 556 | if(host->actual_bus_width == MMC_BUS_WIDTH_4) | 561 | if(host->actual_bus_width != MMC_BUS_WIDTH_4) |
| 557 | burst_len = 16; | 562 | burst_len = 16; |
| 558 | else | 563 | else |
| 559 | burst_len = 64; | 564 | burst_len = 64; |
| @@ -591,8 +596,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) | |||
| 591 | stat = MMC_STATUS; | 596 | stat = MMC_STATUS; |
| 592 | 597 | ||
| 593 | /* Flush extra bytes from FIFO */ | 598 | /* Flush extra bytes from FIFO */ |
| 594 | while(flush_len >= 2){ | 599 | while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){ |
| 595 | flush_len -= 2; | ||
| 596 | i = MMC_BUFFER_ACCESS; | 600 | i = MMC_BUFFER_ACCESS; |
| 597 | stat = MMC_STATUS; | 601 | stat = MMC_STATUS; |
| 598 | stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */ | 602 | stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */ |
| @@ -746,10 +750,6 @@ static void imxmci_tasklet_fnc(unsigned long data) | |||
| 746 | data_dir_mask = STATUS_DATA_TRANS_DONE; | 750 | data_dir_mask = STATUS_DATA_TRANS_DONE; |
| 747 | } | 751 | } |
| 748 | 752 | ||
| 749 | imxmci_busy_wait_for_status(host, &stat, | ||
| 750 | data_dir_mask, | ||
| 751 | 50, "imxmci_tasklet_fnc data"); | ||
| 752 | |||
| 753 | if(stat & data_dir_mask) { | 753 | if(stat & data_dir_mask) { |
| 754 | clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); | 754 | clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); |
| 755 | imxmci_data_done(host, stat); | 755 | imxmci_data_done(host, stat); |
| @@ -865,7 +865,11 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 865 | 865 | ||
| 866 | imxmci_stop_clock(host); | 866 | imxmci_stop_clock(host); |
| 867 | MMC_CLK_RATE = (prescaler<<3) | clk; | 867 | MMC_CLK_RATE = (prescaler<<3) | clk; |
| 868 | imxmci_start_clock(host); | 868 | /* |
| 869 | * Under my understanding, clock should not be started there, because it would | ||
| 870 | * initiate SDHC sequencer and send last or random command into card | ||
| 871 | */ | ||
| 872 | /*imxmci_start_clock(host);*/ | ||
| 869 | 873 | ||
| 870 | dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); | 874 | dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); |
| 871 | } else { | 875 | } else { |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 1ca2c8b9c9b5..6201f3086a02 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
| @@ -951,6 +951,7 @@ static void mmc_read_scrs(struct mmc_host *host) | |||
| 951 | data.timeout_ns = card->csd.tacc_ns * 10; | 951 | data.timeout_ns = card->csd.tacc_ns * 10; |
| 952 | data.timeout_clks = card->csd.tacc_clks * 10; | 952 | data.timeout_clks = card->csd.tacc_clks * 10; |
| 953 | data.blksz_bits = 3; | 953 | data.blksz_bits = 3; |
| 954 | data.blksz = 1 << 3; | ||
| 954 | data.blocks = 1; | 955 | data.blocks = 1; |
| 955 | data.flags = MMC_DATA_READ; | 956 | data.flags = MMC_DATA_READ; |
| 956 | data.sg = &sg; | 957 | data.sg = &sg; |
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index 06bd1f4cb9b1..e39cc05c64c2 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c | |||
| @@ -175,6 +175,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
| 175 | brq.data.timeout_ns = card->csd.tacc_ns * 10; | 175 | brq.data.timeout_ns = card->csd.tacc_ns * 10; |
| 176 | brq.data.timeout_clks = card->csd.tacc_clks * 10; | 176 | brq.data.timeout_clks = card->csd.tacc_clks * 10; |
| 177 | brq.data.blksz_bits = md->block_bits; | 177 | brq.data.blksz_bits = md->block_bits; |
| 178 | brq.data.blksz = 1 << md->block_bits; | ||
| 178 | brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); | 179 | brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); |
| 179 | brq.stop.opcode = MMC_STOP_TRANSMISSION; | 180 | brq.stop.opcode = MMC_STOP_TRANSMISSION; |
| 180 | brq.stop.arg = 0; | 181 | brq.stop.arg = 0; |
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c index f97b472085cb..b49368fd96b8 100644 --- a/drivers/mmc/pxamci.c +++ b/drivers/mmc/pxamci.c | |||
| @@ -119,7 +119,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) | |||
| 119 | nob = 0xffff; | 119 | nob = 0xffff; |
| 120 | 120 | ||
| 121 | writel(nob, host->base + MMC_NOB); | 121 | writel(nob, host->base + MMC_NOB); |
| 122 | writel(1 << data->blksz_bits, host->base + MMC_BLKLEN); | 122 | writel(data->blksz, host->base + MMC_BLKLEN); |
| 123 | 123 | ||
| 124 | clks = (unsigned long long)data->timeout_ns * CLOCKRATE; | 124 | clks = (unsigned long long)data->timeout_ns * CLOCKRATE; |
| 125 | do_div(clks, 1000000000UL); | 125 | do_div(clks, 1000000000UL); |
| @@ -283,7 +283,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) | |||
| 283 | * data blocks as being in error. | 283 | * data blocks as being in error. |
| 284 | */ | 284 | */ |
| 285 | if (data->error == MMC_ERR_NONE) | 285 | if (data->error == MMC_ERR_NONE) |
| 286 | data->bytes_xfered = data->blocks << data->blksz_bits; | 286 | data->bytes_xfered = data->blocks * data->blksz; |
| 287 | else | 287 | else |
| 288 | data->bytes_xfered = 0; | 288 | data->bytes_xfered = 0; |
| 289 | 289 | ||
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c index 39b3d97f891e..8167332d4013 100644 --- a/drivers/mmc/wbsd.c +++ b/drivers/mmc/wbsd.c | |||
| @@ -662,14 +662,14 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) | |||
| 662 | unsigned long dmaflags; | 662 | unsigned long dmaflags; |
| 663 | 663 | ||
| 664 | DBGF("blksz %04x blks %04x flags %08x\n", | 664 | DBGF("blksz %04x blks %04x flags %08x\n", |
| 665 | 1 << data->blksz_bits, data->blocks, data->flags); | 665 | data->blksz, data->blocks, data->flags); |
| 666 | DBGF("tsac %d ms nsac %d clk\n", | 666 | DBGF("tsac %d ms nsac %d clk\n", |
| 667 | data->timeout_ns / 1000000, data->timeout_clks); | 667 | data->timeout_ns / 1000000, data->timeout_clks); |
| 668 | 668 | ||
| 669 | /* | 669 | /* |
| 670 | * Calculate size. | 670 | * Calculate size. |
| 671 | */ | 671 | */ |
| 672 | host->size = data->blocks << data->blksz_bits; | 672 | host->size = data->blocks * data->blksz; |
| 673 | 673 | ||
| 674 | /* | 674 | /* |
| 675 | * Check timeout values for overflow. | 675 | * Check timeout values for overflow. |
| @@ -696,12 +696,12 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) | |||
| 696 | * Two bytes are needed for each data line. | 696 | * Two bytes are needed for each data line. |
| 697 | */ | 697 | */ |
| 698 | if (host->bus_width == MMC_BUS_WIDTH_1) { | 698 | if (host->bus_width == MMC_BUS_WIDTH_1) { |
| 699 | blksize = (1 << data->blksz_bits) + 2; | 699 | blksize = data->blksz + 2; |
| 700 | 700 | ||
| 701 | wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); | 701 | wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); |
| 702 | wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); | 702 | wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); |
| 703 | } else if (host->bus_width == MMC_BUS_WIDTH_4) { | 703 | } else if (host->bus_width == MMC_BUS_WIDTH_4) { |
| 704 | blksize = (1 << data->blksz_bits) + 2 * 4; | 704 | blksize = data->blksz + 2 * 4; |
| 705 | 705 | ||
| 706 | wbsd_write_index(host, WBSD_IDX_PBSMSB, | 706 | wbsd_write_index(host, WBSD_IDX_PBSMSB, |
| 707 | ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); | 707 | ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 3d306681919e..d8233e0b7899 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
| @@ -650,9 +650,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
| 650 | 650 | ||
| 651 | /* Hardware bug work-around, the chip is unable to do PCI DMA | 651 | /* Hardware bug work-around, the chip is unable to do PCI DMA |
| 652 | to/from anything above 1GB :-( */ | 652 | to/from anything above 1GB :-( */ |
| 653 | if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { | 653 | if (dma_mapping_error(mapping) || |
| 654 | mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { | ||
| 654 | /* Sigh... */ | 655 | /* Sigh... */ |
| 655 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | 656 | if (!dma_mapping_error(mapping)) |
| 657 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | ||
| 656 | dev_kfree_skb_any(skb); | 658 | dev_kfree_skb_any(skb); |
| 657 | skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); | 659 | skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); |
| 658 | if (skb == NULL) | 660 | if (skb == NULL) |
| @@ -660,8 +662,10 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
| 660 | mapping = pci_map_single(bp->pdev, skb->data, | 662 | mapping = pci_map_single(bp->pdev, skb->data, |
| 661 | RX_PKT_BUF_SZ, | 663 | RX_PKT_BUF_SZ, |
| 662 | PCI_DMA_FROMDEVICE); | 664 | PCI_DMA_FROMDEVICE); |
| 663 | if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { | 665 | if (dma_mapping_error(mapping) || |
| 664 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | 666 | mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { |
| 667 | if (!dma_mapping_error(mapping)) | ||
| 668 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | ||
| 665 | dev_kfree_skb_any(skb); | 669 | dev_kfree_skb_any(skb); |
| 666 | return -ENOMEM; | 670 | return -ENOMEM; |
| 667 | } | 671 | } |
| @@ -967,9 +971,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 967 | } | 971 | } |
| 968 | 972 | ||
| 969 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 973 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
| 970 | if (mapping + len > B44_DMA_MASK) { | 974 | if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) { |
| 971 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | 975 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ |
| 972 | pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); | 976 | if (!dma_mapping_error(mapping)) |
| 977 | pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); | ||
| 973 | 978 | ||
| 974 | bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, | 979 | bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, |
| 975 | GFP_ATOMIC|GFP_DMA); | 980 | GFP_ATOMIC|GFP_DMA); |
| @@ -978,8 +983,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 978 | 983 | ||
| 979 | mapping = pci_map_single(bp->pdev, bounce_skb->data, | 984 | mapping = pci_map_single(bp->pdev, bounce_skb->data, |
| 980 | len, PCI_DMA_TODEVICE); | 985 | len, PCI_DMA_TODEVICE); |
| 981 | if (mapping + len > B44_DMA_MASK) { | 986 | if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) { |
| 982 | pci_unmap_single(bp->pdev, mapping, | 987 | if (!dma_mapping_error(mapping)) |
| 988 | pci_unmap_single(bp->pdev, mapping, | ||
| 983 | len, PCI_DMA_TODEVICE); | 989 | len, PCI_DMA_TODEVICE); |
| 984 | dev_kfree_skb_any(bounce_skb); | 990 | dev_kfree_skb_any(bounce_skb); |
| 985 | goto err_out; | 991 | goto err_out; |
| @@ -1203,7 +1209,8 @@ static int b44_alloc_consistent(struct b44 *bp) | |||
| 1203 | DMA_TABLE_BYTES, | 1209 | DMA_TABLE_BYTES, |
| 1204 | DMA_BIDIRECTIONAL); | 1210 | DMA_BIDIRECTIONAL); |
| 1205 | 1211 | ||
| 1206 | if (rx_ring_dma + size > B44_DMA_MASK) { | 1212 | if (dma_mapping_error(rx_ring_dma) || |
| 1213 | rx_ring_dma + size > B44_DMA_MASK) { | ||
| 1207 | kfree(rx_ring); | 1214 | kfree(rx_ring); |
| 1208 | goto out_err; | 1215 | goto out_err; |
| 1209 | } | 1216 | } |
| @@ -1229,7 +1236,8 @@ static int b44_alloc_consistent(struct b44 *bp) | |||
| 1229 | DMA_TABLE_BYTES, | 1236 | DMA_TABLE_BYTES, |
| 1230 | DMA_TO_DEVICE); | 1237 | DMA_TO_DEVICE); |
| 1231 | 1238 | ||
| 1232 | if (tx_ring_dma + size > B44_DMA_MASK) { | 1239 | if (dma_mapping_error(tx_ring_dma) || |
| 1240 | tx_ring_dma + size > B44_DMA_MASK) { | ||
| 1233 | kfree(tx_ring); | 1241 | kfree(tx_ring); |
| 1234 | goto out_err; | 1242 | goto out_err; |
| 1235 | } | 1243 | } |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 1ddefd281213..038447fb5c5e 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
| @@ -53,6 +53,7 @@ | |||
| 53 | #define DRV_VERSION "v1.17b" | 53 | #define DRV_VERSION "v1.17b" |
| 54 | #define DRV_RELDATE "2006/03/10" | 54 | #define DRV_RELDATE "2006/03/10" |
| 55 | #include "dl2k.h" | 55 | #include "dl2k.h" |
| 56 | #include <linux/dma-mapping.h> | ||
| 56 | 57 | ||
| 57 | static char version[] __devinitdata = | 58 | static char version[] __devinitdata = |
| 58 | KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; | 59 | KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; |
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c index 6f7dce8eba51..b67f586d7392 100644 --- a/drivers/net/ixp2000/enp2611.c +++ b/drivers/net/ixp2000/enp2611.c | |||
| @@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy) | |||
| 149 | int status; | 149 | int status; |
| 150 | 150 | ||
| 151 | dev = nds[i]; | 151 | dev = nds[i]; |
| 152 | if (dev == NULL) | ||
| 153 | continue; | ||
| 152 | 154 | ||
| 153 | status = pm3386_is_link_up(i); | 155 | status = pm3386_is_link_up(i); |
| 154 | if (status && !netif_carrier_ok(dev)) { | 156 | if (status && !netif_carrier_ok(dev)) { |
| @@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up) | |||
| 191 | 193 | ||
| 192 | static int __init enp2611_init_module(void) | 194 | static int __init enp2611_init_module(void) |
| 193 | { | 195 | { |
| 196 | int ports; | ||
| 194 | int i; | 197 | int i; |
| 195 | 198 | ||
| 196 | if (!machine_is_enp2611()) | 199 | if (!machine_is_enp2611()) |
| @@ -199,7 +202,8 @@ static int __init enp2611_init_module(void) | |||
| 199 | caleb_reset(); | 202 | caleb_reset(); |
| 200 | pm3386_reset(); | 203 | pm3386_reset(); |
| 201 | 204 | ||
| 202 | for (i = 0; i < 3; i++) { | 205 | ports = pm3386_port_count(); |
| 206 | for (i = 0; i < ports; i++) { | ||
| 203 | nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); | 207 | nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); |
| 204 | if (nds[i] == NULL) { | 208 | if (nds[i] == NULL) { |
| 205 | while (--i >= 0) | 209 | while (--i >= 0) |
| @@ -215,9 +219,10 @@ static int __init enp2611_init_module(void) | |||
| 215 | 219 | ||
| 216 | ixp2400_msf_init(&enp2611_msf_parameters); | 220 | ixp2400_msf_init(&enp2611_msf_parameters); |
| 217 | 221 | ||
| 218 | if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) { | 222 | if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) { |
| 219 | for (i = 0; i < 3; i++) | 223 | for (i = 0; i < ports; i++) |
| 220 | free_netdev(nds[i]); | 224 | if (nds[i]) |
| 225 | free_netdev(nds[i]); | ||
| 221 | return -EINVAL; | 226 | return -EINVAL; |
| 222 | } | 227 | } |
| 223 | 228 | ||
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c index 5c7ab7564053..5224651c9aac 100644 --- a/drivers/net/ixp2000/pm3386.c +++ b/drivers/net/ixp2000/pm3386.c | |||
| @@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value) | |||
| 86 | pm3386_reg_write(port >> 1, reg, value); | 86 | pm3386_reg_write(port >> 1, reg, value); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | int pm3386_secondary_present(void) | ||
| 90 | { | ||
| 91 | return pm3386_reg_read(1, 0) == 0x3386; | ||
| 92 | } | ||
| 89 | 93 | ||
| 90 | void pm3386_reset(void) | 94 | void pm3386_reset(void) |
| 91 | { | 95 | { |
| 92 | u8 mac[3][6]; | 96 | u8 mac[3][6]; |
| 97 | int secondary; | ||
| 98 | |||
| 99 | secondary = pm3386_secondary_present(); | ||
| 93 | 100 | ||
| 94 | /* Save programmed MAC addresses. */ | 101 | /* Save programmed MAC addresses. */ |
| 95 | pm3386_get_mac(0, mac[0]); | 102 | pm3386_get_mac(0, mac[0]); |
| 96 | pm3386_get_mac(1, mac[1]); | 103 | pm3386_get_mac(1, mac[1]); |
| 97 | pm3386_get_mac(2, mac[2]); | 104 | if (secondary) |
| 105 | pm3386_get_mac(2, mac[2]); | ||
| 98 | 106 | ||
| 99 | /* Assert analog and digital reset. */ | 107 | /* Assert analog and digital reset. */ |
| 100 | pm3386_reg_write(0, 0x002, 0x0060); | 108 | pm3386_reg_write(0, 0x002, 0x0060); |
| 101 | pm3386_reg_write(1, 0x002, 0x0060); | 109 | if (secondary) |
| 110 | pm3386_reg_write(1, 0x002, 0x0060); | ||
| 102 | mdelay(1); | 111 | mdelay(1); |
| 103 | 112 | ||
| 104 | /* Deassert analog reset. */ | 113 | /* Deassert analog reset. */ |
| 105 | pm3386_reg_write(0, 0x002, 0x0062); | 114 | pm3386_reg_write(0, 0x002, 0x0062); |
| 106 | pm3386_reg_write(1, 0x002, 0x0062); | 115 | if (secondary) |
| 116 | pm3386_reg_write(1, 0x002, 0x0062); | ||
| 107 | mdelay(10); | 117 | mdelay(10); |
| 108 | 118 | ||
| 109 | /* Deassert digital reset. */ | 119 | /* Deassert digital reset. */ |
| 110 | pm3386_reg_write(0, 0x002, 0x0063); | 120 | pm3386_reg_write(0, 0x002, 0x0063); |
| 111 | pm3386_reg_write(1, 0x002, 0x0063); | 121 | if (secondary) |
| 122 | pm3386_reg_write(1, 0x002, 0x0063); | ||
| 112 | mdelay(10); | 123 | mdelay(10); |
| 113 | 124 | ||
| 114 | /* Restore programmed MAC addresses. */ | 125 | /* Restore programmed MAC addresses. */ |
| 115 | pm3386_set_mac(0, mac[0]); | 126 | pm3386_set_mac(0, mac[0]); |
| 116 | pm3386_set_mac(1, mac[1]); | 127 | pm3386_set_mac(1, mac[1]); |
| 117 | pm3386_set_mac(2, mac[2]); | 128 | if (secondary) |
| 129 | pm3386_set_mac(2, mac[2]); | ||
| 118 | 130 | ||
| 119 | /* Disable carrier on all ports. */ | 131 | /* Disable carrier on all ports. */ |
| 120 | pm3386_set_carrier(0, 0); | 132 | pm3386_set_carrier(0, 0); |
| 121 | pm3386_set_carrier(1, 0); | 133 | pm3386_set_carrier(1, 0); |
| 122 | pm3386_set_carrier(2, 0); | 134 | if (secondary) |
| 135 | pm3386_set_carrier(2, 0); | ||
| 123 | } | 136 | } |
| 124 | 137 | ||
| 125 | static u16 swaph(u16 x) | 138 | static u16 swaph(u16 x) |
| @@ -127,6 +140,11 @@ static u16 swaph(u16 x) | |||
| 127 | return ((x << 8) | (x >> 8)) & 0xffff; | 140 | return ((x << 8) | (x >> 8)) & 0xffff; |
| 128 | } | 141 | } |
| 129 | 142 | ||
| 143 | int pm3386_port_count(void) | ||
| 144 | { | ||
| 145 | return 2 + pm3386_secondary_present(); | ||
| 146 | } | ||
| 147 | |||
| 130 | void pm3386_init_port(int port) | 148 | void pm3386_init_port(int port) |
| 131 | { | 149 | { |
| 132 | int pm = port >> 1; | 150 | int pm = port >> 1; |
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h index fe92bb056ac4..cc4183dca911 100644 --- a/drivers/net/ixp2000/pm3386.h +++ b/drivers/net/ixp2000/pm3386.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #define __PM3386_H | 13 | #define __PM3386_H |
| 14 | 14 | ||
| 15 | void pm3386_reset(void); | 15 | void pm3386_reset(void); |
| 16 | int pm3386_port_count(void); | ||
| 16 | void pm3386_init_port(int port); | 17 | void pm3386_init_port(int port); |
| 17 | void pm3386_get_mac(int port, u8 *mac); | 18 | void pm3386_get_mac(int port, u8 *mac); |
| 18 | void pm3386_set_mac(int port, u8 *mac); | 19 | void pm3386_set_mac(int port, u8 *mac); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index ffd267fab21d..62be6d99d05c 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
| @@ -1020,8 +1020,19 @@ static int sky2_up(struct net_device *dev) | |||
| 1020 | struct sky2_hw *hw = sky2->hw; | 1020 | struct sky2_hw *hw = sky2->hw; |
| 1021 | unsigned port = sky2->port; | 1021 | unsigned port = sky2->port; |
| 1022 | u32 ramsize, rxspace, imask; | 1022 | u32 ramsize, rxspace, imask; |
| 1023 | int err = -ENOMEM; | 1023 | int err; |
| 1024 | struct net_device *otherdev = hw->dev[sky2->port^1]; | ||
| 1024 | 1025 | ||
| 1026 | /* Block bringing up both ports at the same time on a dual port card. | ||
| 1027 | * There is an unfixed bug where receiver gets confused and picks up | ||
| 1028 | * packets out of order. Until this is fixed, prevent data corruption. | ||
| 1029 | */ | ||
| 1030 | if (otherdev && netif_running(otherdev)) { | ||
| 1031 | printk(KERN_INFO PFX "dual port support is disabled.\n"); | ||
| 1032 | return -EBUSY; | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | err = -ENOMEM; | ||
| 1025 | if (netif_msg_ifup(sky2)) | 1036 | if (netif_msg_ifup(sky2)) |
| 1026 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | 1037 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); |
| 1027 | 1038 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 19e2b174d33c..d378478612fb 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -634,6 +634,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vi | |||
| 634 | * non-x86 architectures (yes Via exists on PPC among other places), | 634 | * non-x86 architectures (yes Via exists on PPC among other places), |
| 635 | * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get | 635 | * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get |
| 636 | * interrupts delivered properly. | 636 | * interrupts delivered properly. |
| 637 | * | ||
| 638 | * Some of the on-chip devices are actually '586 devices' so they are | ||
| 639 | * listed here. | ||
| 637 | */ | 640 | */ |
| 638 | static void quirk_via_irq(struct pci_dev *dev) | 641 | static void quirk_via_irq(struct pci_dev *dev) |
| 639 | { | 642 | { |
| @@ -648,6 +651,10 @@ static void quirk_via_irq(struct pci_dev *dev) | |||
| 648 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); | 651 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); |
| 649 | } | 652 | } |
| 650 | } | 653 | } |
| 654 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_via_irq); | ||
| 655 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq); | ||
| 656 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq); | ||
| 657 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq); | ||
| 651 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq); | 658 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq); |
| 652 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq); | 659 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq); |
| 653 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq); | 660 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq); |
| @@ -895,6 +902,7 @@ static void __init k8t_sound_hostbridge(struct pci_dev *dev) | |||
| 895 | } | 902 | } |
| 896 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); | 903 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); |
| 897 | 904 | ||
| 905 | #ifndef CONFIG_ACPI_SLEEP | ||
| 898 | /* | 906 | /* |
| 899 | * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge | 907 | * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge |
| 900 | * is not activated. The myth is that Asus said that they do not want the | 908 | * is not activated. The myth is that Asus said that they do not want the |
| @@ -906,8 +914,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_ho | |||
| 906 | * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it | 914 | * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it |
| 907 | * becomes necessary to do this tweak in two steps -- I've chosen the Host | 915 | * becomes necessary to do this tweak in two steps -- I've chosen the Host |
| 908 | * bridge as trigger. | 916 | * bridge as trigger. |
| 917 | * | ||
| 918 | * Actually, leaving it unhidden and not redoing the quirk over suspend2ram | ||
| 919 | * will cause thermal management to break down, and causing machine to | ||
| 920 | * overheat. | ||
| 909 | */ | 921 | */ |
| 910 | static int __initdata asus_hides_smbus = 0; | 922 | static int __initdata asus_hides_smbus; |
| 911 | 923 | ||
| 912 | static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) | 924 | static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) |
| 913 | { | 925 | { |
| @@ -1050,6 +1062,8 @@ static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) | |||
| 1050 | } | 1062 | } |
| 1051 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); | 1063 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); |
| 1052 | 1064 | ||
| 1065 | #endif | ||
| 1066 | |||
| 1053 | /* | 1067 | /* |
| 1054 | * SiS 96x south bridge: BIOS typically hides SMBus device... | 1068 | * SiS 96x south bridge: BIOS typically hides SMBus device... |
| 1055 | */ | 1069 | */ |
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index c53db7ceda5e..738b1ef595a3 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c | |||
| @@ -426,7 +426,7 @@ static int ds_open(struct inode *inode, struct file *file) | |||
| 426 | 426 | ||
| 427 | if (!warning_printed) { | 427 | if (!warning_printed) { |
| 428 | printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl " | 428 | printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl " |
| 429 | "usage.\n"); | 429 | "usage from process: %s.\n", current->comm); |
| 430 | printk(KERN_INFO "pcmcia: This interface will soon be removed from " | 430 | printk(KERN_INFO "pcmcia: This interface will soon be removed from " |
| 431 | "the kernel; please expect breakage unless you upgrade " | 431 | "the kernel; please expect breakage unless you upgrade " |
| 432 | "to new tools.\n"); | 432 | "to new tools.\n"); |
| @@ -601,8 +601,12 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
| 601 | ret = CS_BAD_ARGS; | 601 | ret = CS_BAD_ARGS; |
| 602 | else { | 602 | else { |
| 603 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); | 603 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); |
| 604 | ret = pccard_get_configuration_info(s, p_dev, &buf->config); | 604 | if (p_dev == NULL) |
| 605 | pcmcia_put_dev(p_dev); | 605 | ret = CS_BAD_ARGS; |
| 606 | else { | ||
| 607 | ret = pccard_get_configuration_info(s, p_dev, &buf->config); | ||
| 608 | pcmcia_put_dev(p_dev); | ||
| 609 | } | ||
| 606 | } | 610 | } |
| 607 | break; | 611 | break; |
| 608 | case DS_GET_FIRST_TUPLE: | 612 | case DS_GET_FIRST_TUPLE: |
| @@ -632,8 +636,12 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
| 632 | ret = CS_BAD_ARGS; | 636 | ret = CS_BAD_ARGS; |
| 633 | else { | 637 | else { |
| 634 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); | 638 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); |
| 635 | ret = pccard_get_status(s, p_dev, &buf->status); | 639 | if (p_dev == NULL) |
| 636 | pcmcia_put_dev(p_dev); | 640 | ret = CS_BAD_ARGS; |
| 641 | else { | ||
| 642 | ret = pccard_get_status(s, p_dev, &buf->status); | ||
| 643 | pcmcia_put_dev(p_dev); | ||
| 644 | } | ||
| 637 | } | 645 | } |
| 638 | break; | 646 | break; |
| 639 | case DS_VALIDATE_CIS: | 647 | case DS_VALIDATE_CIS: |
| @@ -665,9 +673,10 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
| 665 | if (!(buf->conf_reg.Function && | 673 | if (!(buf->conf_reg.Function && |
| 666 | (buf->conf_reg.Function >= s->functions))) { | 674 | (buf->conf_reg.Function >= s->functions))) { |
| 667 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function); | 675 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function); |
| 668 | if (p_dev) | 676 | if (p_dev) { |
| 669 | ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg); | 677 | ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg); |
| 670 | pcmcia_put_dev(p_dev); | 678 | pcmcia_put_dev(p_dev); |
| 679 | } | ||
| 671 | } | 680 | } |
| 672 | break; | 681 | break; |
| 673 | case DS_GET_FIRST_REGION: | 682 | case DS_GET_FIRST_REGION: |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 5d6b7a57b02f..e65da921a827 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
| @@ -1348,7 +1348,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
| 1348 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) | 1348 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) |
| 1349 | - channel->ccws; | 1349 | - channel->ccws; |
| 1350 | if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || | 1350 | if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || |
| 1351 | (irb->scsw.cstat | SCHN_STAT_PCI)) | 1351 | (irb->scsw.cstat & SCHN_STAT_PCI)) |
| 1352 | /* Bloody io subsystem tells us lies about cpa... */ | 1352 | /* Bloody io subsystem tells us lies about cpa... */ |
| 1353 | index = (index - 1) & (LCS_NUM_BUFFS - 1); | 1353 | index = (index - 1) & (LCS_NUM_BUFFS - 1); |
| 1354 | while (channel->io_idx != index) { | 1354 | while (channel->io_idx != index) { |
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 9051b6821c1c..00881226f8dd 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
| @@ -875,6 +875,9 @@ static unsigned int ata_id_xfermask(const u16 *id) | |||
| 875 | /** | 875 | /** |
| 876 | * ata_port_queue_task - Queue port_task | 876 | * ata_port_queue_task - Queue port_task |
| 877 | * @ap: The ata_port to queue port_task for | 877 | * @ap: The ata_port to queue port_task for |
| 878 | * @fn: workqueue function to be scheduled | ||
| 879 | * @data: data value to pass to workqueue function | ||
| 880 | * @delay: delay time for workqueue function | ||
| 878 | * | 881 | * |
| 879 | * Schedule @fn(@data) for execution after @delay jiffies using | 882 | * Schedule @fn(@data) for execution after @delay jiffies using |
| 880 | * port_task. There is one port_task per port and it's the | 883 | * port_task. There is one port_task per port and it's the |
| @@ -3091,8 +3094,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) | |||
| 3091 | /** | 3094 | /** |
| 3092 | * ata_dev_init_params - Issue INIT DEV PARAMS command | 3095 | * ata_dev_init_params - Issue INIT DEV PARAMS command |
| 3093 | * @dev: Device to which command will be sent | 3096 | * @dev: Device to which command will be sent |
| 3094 | * @heads: Number of heads | 3097 | * @heads: Number of heads (taskfile parameter) |
| 3095 | * @sectors: Number of sectors | 3098 | * @sectors: Number of sectors (taskfile parameter) |
| 3096 | * | 3099 | * |
| 3097 | * LOCKING: | 3100 | * LOCKING: |
| 3098 | * Kernel thread context (may sleep) | 3101 | * Kernel thread context (may sleep) |
| @@ -5007,6 +5010,7 @@ int ata_device_resume(struct ata_device *dev) | |||
| 5007 | /** | 5010 | /** |
| 5008 | * ata_device_suspend - prepare a device for suspend | 5011 | * ata_device_suspend - prepare a device for suspend |
| 5009 | * @dev: the device to suspend | 5012 | * @dev: the device to suspend |
| 5013 | * @state: target power management state | ||
| 5010 | * | 5014 | * |
| 5011 | * Flush the cache on the drive, if appropriate, then issue a | 5015 | * Flush the cache on the drive, if appropriate, then issue a |
| 5012 | * standbynow command. | 5016 | * standbynow command. |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index e6d141dd0385..bfe817fc7520 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | #include <asm/io.h> | 37 | #include <asm/io.h> |
| 38 | 38 | ||
| 39 | #define DRV_NAME "sata_mv" | 39 | #define DRV_NAME "sata_mv" |
| 40 | #define DRV_VERSION "0.6" | 40 | #define DRV_VERSION "0.7" |
| 41 | 41 | ||
| 42 | enum { | 42 | enum { |
| 43 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 43 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
| @@ -50,6 +50,12 @@ enum { | |||
| 50 | 50 | ||
| 51 | MV_PCI_REG_BASE = 0, | 51 | MV_PCI_REG_BASE = 0, |
| 52 | MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ | 52 | MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ |
| 53 | MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), | ||
| 54 | MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), | ||
| 55 | MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), | ||
| 56 | MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), | ||
| 57 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), | ||
| 58 | |||
| 53 | MV_SATAHC0_REG_BASE = 0x20000, | 59 | MV_SATAHC0_REG_BASE = 0x20000, |
| 54 | MV_FLASH_CTL = 0x1046c, | 60 | MV_FLASH_CTL = 0x1046c, |
| 55 | MV_GPIO_PORT_CTL = 0x104f0, | 61 | MV_GPIO_PORT_CTL = 0x104f0, |
| @@ -302,9 +308,6 @@ struct mv_port_priv { | |||
| 302 | dma_addr_t crpb_dma; | 308 | dma_addr_t crpb_dma; |
| 303 | struct mv_sg *sg_tbl; | 309 | struct mv_sg *sg_tbl; |
| 304 | dma_addr_t sg_tbl_dma; | 310 | dma_addr_t sg_tbl_dma; |
| 305 | |||
| 306 | unsigned req_producer; /* cp of req_in_ptr */ | ||
| 307 | unsigned rsp_consumer; /* cp of rsp_out_ptr */ | ||
| 308 | u32 pp_flags; | 311 | u32 pp_flags; |
| 309 | }; | 312 | }; |
| 310 | 313 | ||
| @@ -937,8 +940,6 @@ static int mv_port_start(struct ata_port *ap) | |||
| 937 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | 940 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, |
| 938 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 941 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
| 939 | 942 | ||
| 940 | pp->req_producer = pp->rsp_consumer = 0; | ||
| 941 | |||
| 942 | /* Don't turn on EDMA here...do it before DMA commands only. Else | 943 | /* Don't turn on EDMA here...do it before DMA commands only. Else |
| 943 | * we'll be unable to send non-data, PIO, etc due to restricted access | 944 | * we'll be unable to send non-data, PIO, etc due to restricted access |
| 944 | * to shadow regs. | 945 | * to shadow regs. |
| @@ -1022,16 +1023,16 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
| 1022 | } | 1023 | } |
| 1023 | } | 1024 | } |
| 1024 | 1025 | ||
| 1025 | static inline unsigned mv_inc_q_index(unsigned *index) | 1026 | static inline unsigned mv_inc_q_index(unsigned index) |
| 1026 | { | 1027 | { |
| 1027 | *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; | 1028 | return (index + 1) & MV_MAX_Q_DEPTH_MASK; |
| 1028 | return *index; | ||
| 1029 | } | 1029 | } |
| 1030 | 1030 | ||
| 1031 | static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) | 1031 | static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) |
| 1032 | { | 1032 | { |
| 1033 | *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | 1033 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | |
| 1034 | (last ? CRQB_CMD_LAST : 0); | 1034 | (last ? CRQB_CMD_LAST : 0); |
| 1035 | *cmdw = cpu_to_le16(tmp); | ||
| 1035 | } | 1036 | } |
| 1036 | 1037 | ||
| 1037 | /** | 1038 | /** |
| @@ -1053,15 +1054,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
| 1053 | u16 *cw; | 1054 | u16 *cw; |
| 1054 | struct ata_taskfile *tf; | 1055 | struct ata_taskfile *tf; |
| 1055 | u16 flags = 0; | 1056 | u16 flags = 0; |
| 1057 | unsigned in_index; | ||
| 1056 | 1058 | ||
| 1057 | if (ATA_PROT_DMA != qc->tf.protocol) | 1059 | if (ATA_PROT_DMA != qc->tf.protocol) |
| 1058 | return; | 1060 | return; |
| 1059 | 1061 | ||
| 1060 | /* the req producer index should be the same as we remember it */ | ||
| 1061 | WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | ||
| 1062 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | ||
| 1063 | pp->req_producer); | ||
| 1064 | |||
| 1065 | /* Fill in command request block | 1062 | /* Fill in command request block |
| 1066 | */ | 1063 | */ |
| 1067 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | 1064 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
| @@ -1069,13 +1066,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
| 1069 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1066 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
| 1070 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1067 | flags |= qc->tag << CRQB_TAG_SHIFT; |
| 1071 | 1068 | ||
| 1072 | pp->crqb[pp->req_producer].sg_addr = | 1069 | /* get current queue index from hardware */ |
| 1070 | in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) | ||
| 1071 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
| 1072 | |||
| 1073 | pp->crqb[in_index].sg_addr = | ||
| 1073 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1074 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); |
| 1074 | pp->crqb[pp->req_producer].sg_addr_hi = | 1075 | pp->crqb[in_index].sg_addr_hi = |
| 1075 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | 1076 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); |
| 1076 | pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); | 1077 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); |
| 1077 | 1078 | ||
| 1078 | cw = &pp->crqb[pp->req_producer].ata_cmd[0]; | 1079 | cw = &pp->crqb[in_index].ata_cmd[0]; |
| 1079 | tf = &qc->tf; | 1080 | tf = &qc->tf; |
| 1080 | 1081 | ||
| 1081 | /* Sadly, the CRQB cannot accomodate all registers--there are | 1082 | /* Sadly, the CRQB cannot accomodate all registers--there are |
| @@ -1144,16 +1145,12 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
| 1144 | struct mv_port_priv *pp = ap->private_data; | 1145 | struct mv_port_priv *pp = ap->private_data; |
| 1145 | struct mv_crqb_iie *crqb; | 1146 | struct mv_crqb_iie *crqb; |
| 1146 | struct ata_taskfile *tf; | 1147 | struct ata_taskfile *tf; |
| 1148 | unsigned in_index; | ||
| 1147 | u32 flags = 0; | 1149 | u32 flags = 0; |
| 1148 | 1150 | ||
| 1149 | if (ATA_PROT_DMA != qc->tf.protocol) | 1151 | if (ATA_PROT_DMA != qc->tf.protocol) |
| 1150 | return; | 1152 | return; |
| 1151 | 1153 | ||
| 1152 | /* the req producer index should be the same as we remember it */ | ||
| 1153 | WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | ||
| 1154 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | ||
| 1155 | pp->req_producer); | ||
| 1156 | |||
| 1157 | /* Fill in Gen IIE command request block | 1154 | /* Fill in Gen IIE command request block |
| 1158 | */ | 1155 | */ |
| 1159 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | 1156 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
| @@ -1162,7 +1159,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
| 1162 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1159 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
| 1163 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1160 | flags |= qc->tag << CRQB_TAG_SHIFT; |
| 1164 | 1161 | ||
| 1165 | crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; | 1162 | /* get current queue index from hardware */ |
| 1163 | in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) | ||
| 1164 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
| 1165 | |||
| 1166 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; | ||
| 1166 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1167 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); |
| 1167 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | 1168 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); |
| 1168 | crqb->flags = cpu_to_le32(flags); | 1169 | crqb->flags = cpu_to_le32(flags); |
| @@ -1210,6 +1211,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
| 1210 | { | 1211 | { |
| 1211 | void __iomem *port_mmio = mv_ap_base(qc->ap); | 1212 | void __iomem *port_mmio = mv_ap_base(qc->ap); |
| 1212 | struct mv_port_priv *pp = qc->ap->private_data; | 1213 | struct mv_port_priv *pp = qc->ap->private_data; |
| 1214 | unsigned in_index; | ||
| 1213 | u32 in_ptr; | 1215 | u32 in_ptr; |
| 1214 | 1216 | ||
| 1215 | if (ATA_PROT_DMA != qc->tf.protocol) { | 1217 | if (ATA_PROT_DMA != qc->tf.protocol) { |
| @@ -1221,23 +1223,20 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
| 1221 | return ata_qc_issue_prot(qc); | 1223 | return ata_qc_issue_prot(qc); |
| 1222 | } | 1224 | } |
| 1223 | 1225 | ||
| 1224 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 1226 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
| 1227 | in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
| 1225 | 1228 | ||
| 1226 | /* the req producer index should be the same as we remember it */ | ||
| 1227 | WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | ||
| 1228 | pp->req_producer); | ||
| 1229 | /* until we do queuing, the queue should be empty at this point */ | 1229 | /* until we do queuing, the queue should be empty at this point */ |
| 1230 | WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | 1230 | WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) |
| 1231 | ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> | 1231 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); |
| 1232 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | ||
| 1233 | 1232 | ||
| 1234 | mv_inc_q_index(&pp->req_producer); /* now incr producer index */ | 1233 | in_index = mv_inc_q_index(in_index); /* now incr producer index */ |
| 1235 | 1234 | ||
| 1236 | mv_start_dma(port_mmio, pp); | 1235 | mv_start_dma(port_mmio, pp); |
| 1237 | 1236 | ||
| 1238 | /* and write the request in pointer to kick the EDMA to life */ | 1237 | /* and write the request in pointer to kick the EDMA to life */ |
| 1239 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; | 1238 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; |
| 1240 | in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; | 1239 | in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; |
| 1241 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 1240 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
| 1242 | 1241 | ||
| 1243 | return 0; | 1242 | return 0; |
| @@ -1260,28 +1259,26 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
| 1260 | { | 1259 | { |
| 1261 | void __iomem *port_mmio = mv_ap_base(ap); | 1260 | void __iomem *port_mmio = mv_ap_base(ap); |
| 1262 | struct mv_port_priv *pp = ap->private_data; | 1261 | struct mv_port_priv *pp = ap->private_data; |
| 1262 | unsigned out_index; | ||
| 1263 | u32 out_ptr; | 1263 | u32 out_ptr; |
| 1264 | u8 ata_status; | 1264 | u8 ata_status; |
| 1265 | 1265 | ||
| 1266 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1266 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
| 1267 | out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
| 1267 | 1268 | ||
| 1268 | /* the response consumer index should be the same as we remember it */ | 1269 | ata_status = le16_to_cpu(pp->crpb[out_index].flags) |
| 1269 | WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | 1270 | >> CRPB_FLAG_STATUS_SHIFT; |
| 1270 | pp->rsp_consumer); | ||
| 1271 | |||
| 1272 | ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT; | ||
| 1273 | 1271 | ||
| 1274 | /* increment our consumer index... */ | 1272 | /* increment our consumer index... */ |
| 1275 | pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); | 1273 | out_index = mv_inc_q_index(out_index); |
| 1276 | 1274 | ||
| 1277 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | 1275 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ |
| 1278 | WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> | 1276 | WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) |
| 1279 | EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | 1277 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); |
| 1280 | pp->rsp_consumer); | ||
| 1281 | 1278 | ||
| 1282 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | 1279 | /* write out our inc'd consumer index so EDMA knows we're caught up */ |
| 1283 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | 1280 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; |
| 1284 | out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; | 1281 | out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; |
| 1285 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1282 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
| 1286 | 1283 | ||
| 1287 | /* Return ATA status register for completed CRPB */ | 1284 | /* Return ATA status register for completed CRPB */ |
| @@ -1291,6 +1288,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
| 1291 | /** | 1288 | /** |
| 1292 | * mv_err_intr - Handle error interrupts on the port | 1289 | * mv_err_intr - Handle error interrupts on the port |
| 1293 | * @ap: ATA channel to manipulate | 1290 | * @ap: ATA channel to manipulate |
| 1291 | * @reset_allowed: bool: 0 == don't trigger from reset here | ||
| 1294 | * | 1292 | * |
| 1295 | * In most cases, just clear the interrupt and move on. However, | 1293 | * In most cases, just clear the interrupt and move on. However, |
| 1296 | * some cases require an eDMA reset, which is done right before | 1294 | * some cases require an eDMA reset, which is done right before |
| @@ -1301,7 +1299,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
| 1301 | * LOCKING: | 1299 | * LOCKING: |
| 1302 | * Inherited from caller. | 1300 | * Inherited from caller. |
| 1303 | */ | 1301 | */ |
| 1304 | static void mv_err_intr(struct ata_port *ap) | 1302 | static void mv_err_intr(struct ata_port *ap, int reset_allowed) |
| 1305 | { | 1303 | { |
| 1306 | void __iomem *port_mmio = mv_ap_base(ap); | 1304 | void __iomem *port_mmio = mv_ap_base(ap); |
| 1307 | u32 edma_err_cause, serr = 0; | 1305 | u32 edma_err_cause, serr = 0; |
| @@ -1323,9 +1321,8 @@ static void mv_err_intr(struct ata_port *ap) | |||
| 1323 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1321 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
| 1324 | 1322 | ||
| 1325 | /* check for fatal here and recover if needed */ | 1323 | /* check for fatal here and recover if needed */ |
| 1326 | if (EDMA_ERR_FATAL & edma_err_cause) { | 1324 | if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) |
| 1327 | mv_stop_and_reset(ap); | 1325 | mv_stop_and_reset(ap); |
| 1328 | } | ||
| 1329 | } | 1326 | } |
| 1330 | 1327 | ||
| 1331 | /** | 1328 | /** |
| @@ -1374,12 +1371,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
| 1374 | struct ata_port *ap = host_set->ports[port]; | 1371 | struct ata_port *ap = host_set->ports[port]; |
| 1375 | struct mv_port_priv *pp = ap->private_data; | 1372 | struct mv_port_priv *pp = ap->private_data; |
| 1376 | 1373 | ||
| 1377 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ | 1374 | hard_port = mv_hardport_from_port(port); /* range 0..3 */ |
| 1378 | handled = 0; /* ensure ata_status is set if handled++ */ | 1375 | handled = 0; /* ensure ata_status is set if handled++ */ |
| 1379 | 1376 | ||
| 1380 | /* Note that DEV_IRQ might happen spuriously during EDMA, | 1377 | /* Note that DEV_IRQ might happen spuriously during EDMA, |
| 1381 | * and should be ignored in such cases. We could mask it, | 1378 | * and should be ignored in such cases. |
| 1382 | * but it's pretty rare and may not be worth the overhead. | 1379 | * The cause of this is still under investigation. |
| 1383 | */ | 1380 | */ |
| 1384 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 1381 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
| 1385 | /* EDMA: check for response queue interrupt */ | 1382 | /* EDMA: check for response queue interrupt */ |
| @@ -1393,6 +1390,11 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
| 1393 | ata_status = readb((void __iomem *) | 1390 | ata_status = readb((void __iomem *) |
| 1394 | ap->ioaddr.status_addr); | 1391 | ap->ioaddr.status_addr); |
| 1395 | handled = 1; | 1392 | handled = 1; |
| 1393 | /* ignore spurious intr if drive still BUSY */ | ||
| 1394 | if (ata_status & ATA_BUSY) { | ||
| 1395 | ata_status = 0; | ||
| 1396 | handled = 0; | ||
| 1397 | } | ||
| 1396 | } | 1398 | } |
| 1397 | } | 1399 | } |
| 1398 | 1400 | ||
| @@ -1406,7 +1408,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
| 1406 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | 1408 | shift++; /* skip bit 8 in the HC Main IRQ reg */ |
| 1407 | } | 1409 | } |
| 1408 | if ((PORT0_ERR << shift) & relevant) { | 1410 | if ((PORT0_ERR << shift) & relevant) { |
| 1409 | mv_err_intr(ap); | 1411 | mv_err_intr(ap, 1); |
| 1410 | err_mask |= AC_ERR_OTHER; | 1412 | err_mask |= AC_ERR_OTHER; |
| 1411 | handled = 1; | 1413 | handled = 1; |
| 1412 | } | 1414 | } |
| @@ -1448,6 +1450,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, | |||
| 1448 | struct ata_host_set *host_set = dev_instance; | 1450 | struct ata_host_set *host_set = dev_instance; |
| 1449 | unsigned int hc, handled = 0, n_hcs; | 1451 | unsigned int hc, handled = 0, n_hcs; |
| 1450 | void __iomem *mmio = host_set->mmio_base; | 1452 | void __iomem *mmio = host_set->mmio_base; |
| 1453 | struct mv_host_priv *hpriv; | ||
| 1451 | u32 irq_stat; | 1454 | u32 irq_stat; |
| 1452 | 1455 | ||
| 1453 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); | 1456 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); |
| @@ -1469,6 +1472,17 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, | |||
| 1469 | handled++; | 1472 | handled++; |
| 1470 | } | 1473 | } |
| 1471 | } | 1474 | } |
| 1475 | |||
| 1476 | hpriv = host_set->private_data; | ||
| 1477 | if (IS_60XX(hpriv)) { | ||
| 1478 | /* deal with the interrupt coalescing bits */ | ||
| 1479 | if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { | ||
| 1480 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); | ||
| 1481 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); | ||
| 1482 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE); | ||
| 1483 | } | ||
| 1484 | } | ||
| 1485 | |||
| 1472 | if (PCI_ERR & irq_stat) { | 1486 | if (PCI_ERR & irq_stat) { |
| 1473 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", | 1487 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", |
| 1474 | readl(mmio + PCI_IRQ_CAUSE_OFS)); | 1488 | readl(mmio + PCI_IRQ_CAUSE_OFS)); |
| @@ -1867,7 +1881,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
| 1867 | 1881 | ||
| 1868 | if (IS_60XX(hpriv)) { | 1882 | if (IS_60XX(hpriv)) { |
| 1869 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 1883 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); |
| 1870 | ifctl |= (1 << 12) | (1 << 7); | 1884 | ifctl |= (1 << 7); /* enable gen2i speed */ |
| 1885 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | ||
| 1871 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); | 1886 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); |
| 1872 | } | 1887 | } |
| 1873 | 1888 | ||
| @@ -2033,11 +2048,14 @@ static void mv_eng_timeout(struct ata_port *ap) | |||
| 2033 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, | 2048 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, |
| 2034 | &qc->scsicmd->cmnd); | 2049 | &qc->scsicmd->cmnd); |
| 2035 | 2050 | ||
| 2036 | mv_err_intr(ap); | 2051 | mv_err_intr(ap, 0); |
| 2037 | mv_stop_and_reset(ap); | 2052 | mv_stop_and_reset(ap); |
| 2038 | 2053 | ||
| 2039 | qc->err_mask |= AC_ERR_TIMEOUT; | 2054 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
| 2040 | ata_eh_qc_complete(qc); | 2055 | if (qc->flags & ATA_QCFLAG_ACTIVE) { |
| 2056 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
| 2057 | ata_eh_qc_complete(qc); | ||
| 2058 | } | ||
| 2041 | } | 2059 | } |
| 2042 | 2060 | ||
| 2043 | /** | 2061 | /** |
| @@ -2231,7 +2249,8 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, | |||
| 2231 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2249 | void __iomem *port_mmio = mv_port_base(mmio, port); |
| 2232 | 2250 | ||
| 2233 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 2251 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); |
| 2234 | ifctl |= (1 << 12); | 2252 | ifctl |= (1 << 7); /* enable gen2i speed */ |
| 2253 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | ||
| 2235 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); | 2254 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); |
| 2236 | } | 2255 | } |
| 2237 | 2256 | ||
| @@ -2332,6 +2351,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2332 | if (rc) { | 2351 | if (rc) { |
| 2333 | return rc; | 2352 | return rc; |
| 2334 | } | 2353 | } |
| 2354 | pci_set_master(pdev); | ||
| 2335 | 2355 | ||
| 2336 | rc = pci_request_regions(pdev, DRV_NAME); | 2356 | rc = pci_request_regions(pdev, DRV_NAME); |
| 2337 | if (rc) { | 2357 | if (rc) { |
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index aeb8153ccf24..17839e753e4c 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
| @@ -1907,9 +1907,12 @@ uart_set_options(struct uart_port *port, struct console *co, | |||
| 1907 | static void uart_change_pm(struct uart_state *state, int pm_state) | 1907 | static void uart_change_pm(struct uart_state *state, int pm_state) |
| 1908 | { | 1908 | { |
| 1909 | struct uart_port *port = state->port; | 1909 | struct uart_port *port = state->port; |
| 1910 | if (port->ops->pm) | 1910 | |
| 1911 | port->ops->pm(port, pm_state, state->pm_state); | 1911 | if (state->pm_state != pm_state) { |
| 1912 | state->pm_state = pm_state; | 1912 | if (port->ops->pm) |
| 1913 | port->ops->pm(port, pm_state, state->pm_state); | ||
| 1914 | state->pm_state = pm_state; | ||
| 1915 | } | ||
| 1913 | } | 1916 | } |
| 1914 | 1917 | ||
| 1915 | int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) | 1918 | int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 7a75faeb0526..9ce1d01469b1 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
| @@ -75,6 +75,14 @@ config SPI_BUTTERFLY | |||
| 75 | inexpensive battery powered microcontroller evaluation board. | 75 | inexpensive battery powered microcontroller evaluation board. |
| 76 | This same cable can be used to flash new firmware. | 76 | This same cable can be used to flash new firmware. |
| 77 | 77 | ||
| 78 | config SPI_PXA2XX | ||
| 79 | tristate "PXA2xx SSP SPI master" | ||
| 80 | depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL | ||
| 81 | help | ||
| 82 | This enables using a PXA2xx SSP port as a SPI master controller. | ||
| 83 | The driver can be configured to use any SSP port and additional | ||
| 84 | documentation can be found a Documentation/spi/pxa2xx. | ||
| 85 | |||
| 78 | # | 86 | # |
| 79 | # Add new SPI master controllers in alphabetical order above this line | 87 | # Add new SPI master controllers in alphabetical order above this line |
| 80 | # | 88 | # |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index c2c87e845abf..1bca5f95de25 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
| @@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_MASTER) += spi.o | |||
| 13 | # SPI master controller drivers (bus) | 13 | # SPI master controller drivers (bus) |
| 14 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o | 14 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o |
| 15 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o | 15 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o |
| 16 | obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o | ||
| 16 | # ... add above this line ... | 17 | # ... add above this line ... |
| 17 | 18 | ||
| 18 | # SPI protocol drivers (device/link on bus) | 19 | # SPI protocol drivers (device/link on bus) |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c new file mode 100644 index 000000000000..596bf820b70c --- /dev/null +++ b/drivers/spi/pxa2xx_spi.c | |||
| @@ -0,0 +1,1467 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/module.h> | ||
| 21 | #include <linux/device.h> | ||
| 22 | #include <linux/ioport.h> | ||
| 23 | #include <linux/errno.h> | ||
| 24 | #include <linux/interrupt.h> | ||
| 25 | #include <linux/platform_device.h> | ||
| 26 | #include <linux/dma-mapping.h> | ||
| 27 | #include <linux/spi/spi.h> | ||
| 28 | #include <linux/workqueue.h> | ||
| 29 | #include <linux/errno.h> | ||
| 30 | #include <linux/delay.h> | ||
| 31 | |||
| 32 | #include <asm/io.h> | ||
| 33 | #include <asm/irq.h> | ||
| 34 | #include <asm/hardware.h> | ||
| 35 | #include <asm/delay.h> | ||
| 36 | #include <asm/dma.h> | ||
| 37 | |||
| 38 | #include <asm/arch/hardware.h> | ||
| 39 | #include <asm/arch/pxa-regs.h> | ||
| 40 | #include <asm/arch/pxa2xx_spi.h> | ||
| 41 | |||
| 42 | MODULE_AUTHOR("Stephen Street"); | ||
| 43 | MODULE_DESCRIPTION("PXA2xx SSP SPI Contoller"); | ||
| 44 | MODULE_LICENSE("GPL"); | ||
| 45 | |||
| 46 | #define MAX_BUSES 3 | ||
| 47 | |||
| 48 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | ||
| 49 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) | ||
| 50 | #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) | ||
| 51 | |||
| 52 | #define DEFINE_SSP_REG(reg, off) \ | ||
| 53 | static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \ | ||
| 54 | static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); } | ||
| 55 | |||
| 56 | DEFINE_SSP_REG(SSCR0, 0x00) | ||
| 57 | DEFINE_SSP_REG(SSCR1, 0x04) | ||
| 58 | DEFINE_SSP_REG(SSSR, 0x08) | ||
| 59 | DEFINE_SSP_REG(SSITR, 0x0c) | ||
| 60 | DEFINE_SSP_REG(SSDR, 0x10) | ||
| 61 | DEFINE_SSP_REG(SSTO, 0x28) | ||
| 62 | DEFINE_SSP_REG(SSPSP, 0x2c) | ||
| 63 | |||
| 64 | #define START_STATE ((void*)0) | ||
| 65 | #define RUNNING_STATE ((void*)1) | ||
| 66 | #define DONE_STATE ((void*)2) | ||
| 67 | #define ERROR_STATE ((void*)-1) | ||
| 68 | |||
| 69 | #define QUEUE_RUNNING 0 | ||
| 70 | #define QUEUE_STOPPED 1 | ||
| 71 | |||
| 72 | struct driver_data { | ||
| 73 | /* Driver model hookup */ | ||
| 74 | struct platform_device *pdev; | ||
| 75 | |||
| 76 | /* SPI framework hookup */ | ||
| 77 | enum pxa_ssp_type ssp_type; | ||
| 78 | struct spi_master *master; | ||
| 79 | |||
| 80 | /* PXA hookup */ | ||
| 81 | struct pxa2xx_spi_master *master_info; | ||
| 82 | |||
| 83 | /* DMA setup stuff */ | ||
| 84 | int rx_channel; | ||
| 85 | int tx_channel; | ||
| 86 | u32 *null_dma_buf; | ||
| 87 | |||
| 88 | /* SSP register addresses */ | ||
| 89 | void *ioaddr; | ||
| 90 | u32 ssdr_physical; | ||
| 91 | |||
| 92 | /* SSP masks*/ | ||
| 93 | u32 dma_cr1; | ||
| 94 | u32 int_cr1; | ||
| 95 | u32 clear_sr; | ||
| 96 | u32 mask_sr; | ||
| 97 | |||
| 98 | /* Driver message queue */ | ||
| 99 | struct workqueue_struct *workqueue; | ||
| 100 | struct work_struct pump_messages; | ||
| 101 | spinlock_t lock; | ||
| 102 | struct list_head queue; | ||
| 103 | int busy; | ||
| 104 | int run; | ||
| 105 | |||
| 106 | /* Message Transfer pump */ | ||
| 107 | struct tasklet_struct pump_transfers; | ||
| 108 | |||
| 109 | /* Current message transfer state info */ | ||
| 110 | struct spi_message* cur_msg; | ||
| 111 | struct spi_transfer* cur_transfer; | ||
| 112 | struct chip_data *cur_chip; | ||
| 113 | size_t len; | ||
| 114 | void *tx; | ||
| 115 | void *tx_end; | ||
| 116 | void *rx; | ||
| 117 | void *rx_end; | ||
| 118 | int dma_mapped; | ||
| 119 | dma_addr_t rx_dma; | ||
| 120 | dma_addr_t tx_dma; | ||
| 121 | size_t rx_map_len; | ||
| 122 | size_t tx_map_len; | ||
| 123 | u8 n_bytes; | ||
| 124 | u32 dma_width; | ||
| 125 | int cs_change; | ||
| 126 | void (*write)(struct driver_data *drv_data); | ||
| 127 | void (*read)(struct driver_data *drv_data); | ||
| 128 | irqreturn_t (*transfer_handler)(struct driver_data *drv_data); | ||
| 129 | void (*cs_control)(u32 command); | ||
| 130 | }; | ||
| 131 | |||
| 132 | struct chip_data { | ||
| 133 | u32 cr0; | ||
| 134 | u32 cr1; | ||
| 135 | u32 to; | ||
| 136 | u32 psp; | ||
| 137 | u32 timeout; | ||
| 138 | u8 n_bytes; | ||
| 139 | u32 dma_width; | ||
| 140 | u32 dma_burst_size; | ||
| 141 | u32 threshold; | ||
| 142 | u32 dma_threshold; | ||
| 143 | u8 enable_dma; | ||
| 144 | u8 bits_per_word; | ||
| 145 | u32 speed_hz; | ||
| 146 | void (*write)(struct driver_data *drv_data); | ||
| 147 | void (*read)(struct driver_data *drv_data); | ||
| 148 | void (*cs_control)(u32 command); | ||
| 149 | }; | ||
| 150 | |||
| 151 | static void pump_messages(void *data); | ||
| 152 | |||
| 153 | static int flush(struct driver_data *drv_data) | ||
| 154 | { | ||
| 155 | unsigned long limit = loops_per_jiffy << 1; | ||
| 156 | |||
| 157 | void *reg = drv_data->ioaddr; | ||
| 158 | |||
| 159 | do { | ||
| 160 | while (read_SSSR(reg) & SSSR_RNE) { | ||
| 161 | read_SSDR(reg); | ||
| 162 | } | ||
| 163 | } while ((read_SSSR(reg) & SSSR_BSY) && limit--); | ||
| 164 | write_SSSR(SSSR_ROR, reg); | ||
| 165 | |||
| 166 | return limit; | ||
| 167 | } | ||
| 168 | |||
| 169 | static void restore_state(struct driver_data *drv_data) | ||
| 170 | { | ||
| 171 | void *reg = drv_data->ioaddr; | ||
| 172 | |||
| 173 | /* Clear status and disable clock */ | ||
| 174 | write_SSSR(drv_data->clear_sr, reg); | ||
| 175 | write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg); | ||
| 176 | |||
| 177 | /* Load the registers */ | ||
| 178 | write_SSCR1(drv_data->cur_chip->cr1, reg); | ||
| 179 | write_SSCR0(drv_data->cur_chip->cr0, reg); | ||
| 180 | if (drv_data->ssp_type != PXA25x_SSP) { | ||
| 181 | write_SSTO(0, reg); | ||
| 182 | write_SSPSP(drv_data->cur_chip->psp, reg); | ||
| 183 | } | ||
| 184 | } | ||
| 185 | |||
| 186 | static void null_cs_control(u32 command) | ||
| 187 | { | ||
| 188 | } | ||
| 189 | |||
| 190 | static void null_writer(struct driver_data *drv_data) | ||
| 191 | { | ||
| 192 | void *reg = drv_data->ioaddr; | ||
| 193 | u8 n_bytes = drv_data->n_bytes; | ||
| 194 | |||
| 195 | while ((read_SSSR(reg) & SSSR_TNF) | ||
| 196 | && (drv_data->tx < drv_data->tx_end)) { | ||
| 197 | write_SSDR(0, reg); | ||
| 198 | drv_data->tx += n_bytes; | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | static void null_reader(struct driver_data *drv_data) | ||
| 203 | { | ||
| 204 | void *reg = drv_data->ioaddr; | ||
| 205 | u8 n_bytes = drv_data->n_bytes; | ||
| 206 | |||
| 207 | while ((read_SSSR(reg) & SSSR_RNE) | ||
| 208 | && (drv_data->rx < drv_data->rx_end)) { | ||
| 209 | read_SSDR(reg); | ||
| 210 | drv_data->rx += n_bytes; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | |||
| 214 | static void u8_writer(struct driver_data *drv_data) | ||
| 215 | { | ||
| 216 | void *reg = drv_data->ioaddr; | ||
| 217 | |||
| 218 | while ((read_SSSR(reg) & SSSR_TNF) | ||
| 219 | && (drv_data->tx < drv_data->tx_end)) { | ||
| 220 | write_SSDR(*(u8 *)(drv_data->tx), reg); | ||
| 221 | ++drv_data->tx; | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | static void u8_reader(struct driver_data *drv_data) | ||
| 226 | { | ||
| 227 | void *reg = drv_data->ioaddr; | ||
| 228 | |||
| 229 | while ((read_SSSR(reg) & SSSR_RNE) | ||
| 230 | && (drv_data->rx < drv_data->rx_end)) { | ||
| 231 | *(u8 *)(drv_data->rx) = read_SSDR(reg); | ||
| 232 | ++drv_data->rx; | ||
| 233 | } | ||
| 234 | } | ||
| 235 | |||
| 236 | static void u16_writer(struct driver_data *drv_data) | ||
| 237 | { | ||
| 238 | void *reg = drv_data->ioaddr; | ||
| 239 | |||
| 240 | while ((read_SSSR(reg) & SSSR_TNF) | ||
| 241 | && (drv_data->tx < drv_data->tx_end)) { | ||
| 242 | write_SSDR(*(u16 *)(drv_data->tx), reg); | ||
| 243 | drv_data->tx += 2; | ||
| 244 | } | ||
| 245 | } | ||
| 246 | |||
| 247 | static void u16_reader(struct driver_data *drv_data) | ||
| 248 | { | ||
| 249 | void *reg = drv_data->ioaddr; | ||
| 250 | |||
| 251 | while ((read_SSSR(reg) & SSSR_RNE) | ||
| 252 | && (drv_data->rx < drv_data->rx_end)) { | ||
| 253 | *(u16 *)(drv_data->rx) = read_SSDR(reg); | ||
| 254 | drv_data->rx += 2; | ||
| 255 | } | ||
| 256 | } | ||
| 257 | static void u32_writer(struct driver_data *drv_data) | ||
| 258 | { | ||
| 259 | void *reg = drv_data->ioaddr; | ||
| 260 | |||
| 261 | while ((read_SSSR(reg) & SSSR_TNF) | ||
| 262 | && (drv_data->tx < drv_data->tx_end)) { | ||
| 263 | write_SSDR(*(u32 *)(drv_data->tx), reg); | ||
| 264 | drv_data->tx += 4; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | static void u32_reader(struct driver_data *drv_data) | ||
| 269 | { | ||
| 270 | void *reg = drv_data->ioaddr; | ||
| 271 | |||
| 272 | while ((read_SSSR(reg) & SSSR_RNE) | ||
| 273 | && (drv_data->rx < drv_data->rx_end)) { | ||
| 274 | *(u32 *)(drv_data->rx) = read_SSDR(reg); | ||
| 275 | drv_data->rx += 4; | ||
| 276 | } | ||
| 277 | } | ||
| 278 | |||
| 279 | static void *next_transfer(struct driver_data *drv_data) | ||
| 280 | { | ||
| 281 | struct spi_message *msg = drv_data->cur_msg; | ||
| 282 | struct spi_transfer *trans = drv_data->cur_transfer; | ||
| 283 | |||
| 284 | /* Move to next transfer */ | ||
| 285 | if (trans->transfer_list.next != &msg->transfers) { | ||
| 286 | drv_data->cur_transfer = | ||
| 287 | list_entry(trans->transfer_list.next, | ||
| 288 | struct spi_transfer, | ||
| 289 | transfer_list); | ||
| 290 | return RUNNING_STATE; | ||
| 291 | } else | ||
| 292 | return DONE_STATE; | ||
| 293 | } | ||
| 294 | |||
| 295 | static int map_dma_buffers(struct driver_data *drv_data) | ||
| 296 | { | ||
| 297 | struct spi_message *msg = drv_data->cur_msg; | ||
| 298 | struct device *dev = &msg->spi->dev; | ||
| 299 | |||
| 300 | if (!drv_data->cur_chip->enable_dma) | ||
| 301 | return 0; | ||
| 302 | |||
| 303 | if (msg->is_dma_mapped) | ||
| 304 | return drv_data->rx_dma && drv_data->tx_dma; | ||
| 305 | |||
| 306 | if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) | ||
| 307 | return 0; | ||
| 308 | |||
| 309 | /* Modify setup if rx buffer is null */ | ||
| 310 | if (drv_data->rx == NULL) { | ||
| 311 | *drv_data->null_dma_buf = 0; | ||
| 312 | drv_data->rx = drv_data->null_dma_buf; | ||
| 313 | drv_data->rx_map_len = 4; | ||
| 314 | } else | ||
| 315 | drv_data->rx_map_len = drv_data->len; | ||
| 316 | |||
| 317 | |||
| 318 | /* Modify setup if tx buffer is null */ | ||
| 319 | if (drv_data->tx == NULL) { | ||
| 320 | *drv_data->null_dma_buf = 0; | ||
| 321 | drv_data->tx = drv_data->null_dma_buf; | ||
| 322 | drv_data->tx_map_len = 4; | ||
| 323 | } else | ||
| 324 | drv_data->tx_map_len = drv_data->len; | ||
| 325 | |||
| 326 | /* Stream map the rx buffer */ | ||
| 327 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | ||
| 328 | drv_data->rx_map_len, | ||
| 329 | DMA_FROM_DEVICE); | ||
| 330 | if (dma_mapping_error(drv_data->rx_dma)) | ||
| 331 | return 0; | ||
| 332 | |||
| 333 | /* Stream map the tx buffer */ | ||
| 334 | drv_data->tx_dma = dma_map_single(dev, drv_data->tx, | ||
| 335 | drv_data->tx_map_len, | ||
| 336 | DMA_TO_DEVICE); | ||
| 337 | |||
| 338 | if (dma_mapping_error(drv_data->tx_dma)) { | ||
| 339 | dma_unmap_single(dev, drv_data->rx_dma, | ||
| 340 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
| 341 | return 0; | ||
| 342 | } | ||
| 343 | |||
| 344 | return 1; | ||
| 345 | } | ||
| 346 | |||
| 347 | static void unmap_dma_buffers(struct driver_data *drv_data) | ||
| 348 | { | ||
| 349 | struct device *dev; | ||
| 350 | |||
| 351 | if (!drv_data->dma_mapped) | ||
| 352 | return; | ||
| 353 | |||
| 354 | if (!drv_data->cur_msg->is_dma_mapped) { | ||
| 355 | dev = &drv_data->cur_msg->spi->dev; | ||
| 356 | dma_unmap_single(dev, drv_data->rx_dma, | ||
| 357 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
| 358 | dma_unmap_single(dev, drv_data->tx_dma, | ||
| 359 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
| 360 | } | ||
| 361 | |||
| 362 | drv_data->dma_mapped = 0; | ||
| 363 | } | ||
| 364 | |||
| 365 | /* caller already set message->status; dma and pio irqs are blocked */ | ||
| 366 | static void giveback(struct spi_message *message, struct driver_data *drv_data) | ||
| 367 | { | ||
| 368 | struct spi_transfer* last_transfer; | ||
| 369 | |||
| 370 | last_transfer = list_entry(message->transfers.prev, | ||
| 371 | struct spi_transfer, | ||
| 372 | transfer_list); | ||
| 373 | |||
| 374 | if (!last_transfer->cs_change) | ||
| 375 | drv_data->cs_control(PXA2XX_CS_DEASSERT); | ||
| 376 | |||
| 377 | message->state = NULL; | ||
| 378 | if (message->complete) | ||
| 379 | message->complete(message->context); | ||
| 380 | |||
| 381 | drv_data->cur_msg = NULL; | ||
| 382 | drv_data->cur_transfer = NULL; | ||
| 383 | drv_data->cur_chip = NULL; | ||
| 384 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
| 385 | } | ||
| 386 | |||
| 387 | static int wait_ssp_rx_stall(void *ioaddr) | ||
| 388 | { | ||
| 389 | unsigned long limit = loops_per_jiffy << 1; | ||
| 390 | |||
| 391 | while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--) | ||
| 392 | cpu_relax(); | ||
| 393 | |||
| 394 | return limit; | ||
| 395 | } | ||
| 396 | |||
| 397 | static int wait_dma_channel_stop(int channel) | ||
| 398 | { | ||
| 399 | unsigned long limit = loops_per_jiffy << 1; | ||
| 400 | |||
| 401 | while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--) | ||
| 402 | cpu_relax(); | ||
| 403 | |||
| 404 | return limit; | ||
| 405 | } | ||
| 406 | |||
| 407 | static void dma_handler(int channel, void *data, struct pt_regs *regs) | ||
| 408 | { | ||
| 409 | struct driver_data *drv_data = data; | ||
| 410 | struct spi_message *msg = drv_data->cur_msg; | ||
| 411 | void *reg = drv_data->ioaddr; | ||
| 412 | u32 irq_status = DCSR(channel) & DMA_INT_MASK; | ||
| 413 | u32 trailing_sssr = 0; | ||
| 414 | |||
| 415 | if (irq_status & DCSR_BUSERR) { | ||
| 416 | |||
| 417 | /* Disable interrupts, clear status and reset DMA */ | ||
| 418 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 419 | write_SSTO(0, reg); | ||
| 420 | write_SSSR(drv_data->clear_sr, reg); | ||
| 421 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
| 422 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
| 423 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
| 424 | |||
| 425 | if (flush(drv_data) == 0) | ||
| 426 | dev_err(&drv_data->pdev->dev, | ||
| 427 | "dma_handler: flush fail\n"); | ||
| 428 | |||
| 429 | unmap_dma_buffers(drv_data); | ||
| 430 | |||
| 431 | if (channel == drv_data->tx_channel) | ||
| 432 | dev_err(&drv_data->pdev->dev, | ||
| 433 | "dma_handler: bad bus address on " | ||
| 434 | "tx channel %d, source %x target = %x\n", | ||
| 435 | channel, DSADR(channel), DTADR(channel)); | ||
| 436 | else | ||
| 437 | dev_err(&drv_data->pdev->dev, | ||
| 438 | "dma_handler: bad bus address on " | ||
| 439 | "rx channel %d, source %x target = %x\n", | ||
| 440 | channel, DSADR(channel), DTADR(channel)); | ||
| 441 | |||
| 442 | msg->state = ERROR_STATE; | ||
| 443 | tasklet_schedule(&drv_data->pump_transfers); | ||
| 444 | } | ||
| 445 | |||
| 446 | /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ | ||
| 447 | if ((drv_data->ssp_type == PXA25x_SSP) | ||
| 448 | && (channel == drv_data->tx_channel) | ||
| 449 | && (irq_status & DCSR_ENDINTR)) { | ||
| 450 | |||
| 451 | /* Wait for rx to stall */ | ||
| 452 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
| 453 | dev_err(&drv_data->pdev->dev, | ||
| 454 | "dma_handler: ssp rx stall failed\n"); | ||
| 455 | |||
| 456 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
| 457 | write_SSSR(drv_data->clear_sr, reg); | ||
| 458 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
| 459 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
| 460 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
| 461 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | ||
| 462 | dev_err(&drv_data->pdev->dev, | ||
| 463 | "dma_handler: dma rx channel stop failed\n"); | ||
| 464 | |||
| 465 | unmap_dma_buffers(drv_data); | ||
| 466 | |||
| 467 | /* Read trailing bytes */ | ||
| 468 | /* Calculate number of trailing bytes, read them */ | ||
| 469 | trailing_sssr = read_SSSR(reg); | ||
| 470 | if ((trailing_sssr & 0xf008) != 0xf000) { | ||
| 471 | drv_data->rx = drv_data->rx_end - | ||
| 472 | (((trailing_sssr >> 12) & 0x0f) + 1); | ||
| 473 | drv_data->read(drv_data); | ||
| 474 | } | ||
| 475 | msg->actual_length += drv_data->len; | ||
| 476 | |||
| 477 | /* Release chip select if requested, transfer delays are | ||
| 478 | * handled in pump_transfers */ | ||
| 479 | if (drv_data->cs_change) | ||
| 480 | drv_data->cs_control(PXA2XX_CS_DEASSERT); | ||
| 481 | |||
| 482 | /* Move to next transfer */ | ||
| 483 | msg->state = next_transfer(drv_data); | ||
| 484 | |||
| 485 | /* Schedule transfer tasklet */ | ||
| 486 | tasklet_schedule(&drv_data->pump_transfers); | ||
| 487 | } | ||
| 488 | } | ||
| 489 | |||
| 490 | static irqreturn_t dma_transfer(struct driver_data *drv_data) | ||
| 491 | { | ||
| 492 | u32 irq_status; | ||
| 493 | u32 trailing_sssr = 0; | ||
| 494 | struct spi_message *msg = drv_data->cur_msg; | ||
| 495 | void *reg = drv_data->ioaddr; | ||
| 496 | |||
| 497 | irq_status = read_SSSR(reg) & drv_data->mask_sr; | ||
| 498 | if (irq_status & SSSR_ROR) { | ||
| 499 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
| 500 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 501 | write_SSTO(0, reg); | ||
| 502 | write_SSSR(drv_data->clear_sr, reg); | ||
| 503 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
| 504 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
| 505 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
| 506 | unmap_dma_buffers(drv_data); | ||
| 507 | |||
| 508 | if (flush(drv_data) == 0) | ||
| 509 | dev_err(&drv_data->pdev->dev, | ||
| 510 | "dma_transfer: flush fail\n"); | ||
| 511 | |||
| 512 | dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n"); | ||
| 513 | |||
| 514 | drv_data->cur_msg->state = ERROR_STATE; | ||
| 515 | tasklet_schedule(&drv_data->pump_transfers); | ||
| 516 | |||
| 517 | return IRQ_HANDLED; | ||
| 518 | } | ||
| 519 | |||
| 520 | /* Check for false positive timeout */ | ||
| 521 | if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) { | ||
| 522 | write_SSSR(SSSR_TINT, reg); | ||
| 523 | return IRQ_HANDLED; | ||
| 524 | } | ||
| 525 | |||
| 526 | if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { | ||
| 527 | |||
| 528 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
| 529 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 530 | write_SSTO(0, reg); | ||
| 531 | write_SSSR(drv_data->clear_sr, reg); | ||
| 532 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
| 533 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
| 534 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
| 535 | |||
| 536 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | ||
| 537 | dev_err(&drv_data->pdev->dev, | ||
| 538 | "dma_transfer: dma rx channel stop failed\n"); | ||
| 539 | |||
| 540 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
| 541 | dev_err(&drv_data->pdev->dev, | ||
| 542 | "dma_transfer: ssp rx stall failed\n"); | ||
| 543 | |||
| 544 | unmap_dma_buffers(drv_data); | ||
| 545 | |||
| 546 | /* Calculate number of trailing bytes, read them */ | ||
| 547 | trailing_sssr = read_SSSR(reg); | ||
| 548 | if ((trailing_sssr & 0xf008) != 0xf000) { | ||
| 549 | drv_data->rx = drv_data->rx_end - | ||
| 550 | (((trailing_sssr >> 12) & 0x0f) + 1); | ||
| 551 | drv_data->read(drv_data); | ||
| 552 | } | ||
| 553 | msg->actual_length += drv_data->len; | ||
| 554 | |||
| 555 | /* Release chip select if requested, transfer delays are | ||
| 556 | * handled in pump_transfers */ | ||
| 557 | if (drv_data->cs_change) | ||
| 558 | drv_data->cs_control(PXA2XX_CS_DEASSERT); | ||
| 559 | |||
| 560 | /* Move to next transfer */ | ||
| 561 | msg->state = next_transfer(drv_data); | ||
| 562 | |||
| 563 | /* Schedule transfer tasklet */ | ||
| 564 | tasklet_schedule(&drv_data->pump_transfers); | ||
| 565 | |||
| 566 | return IRQ_HANDLED; | ||
| 567 | } | ||
| 568 | |||
| 569 | /* Opps problem detected */ | ||
| 570 | return IRQ_NONE; | ||
| 571 | } | ||
| 572 | |||
| 573 | static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | ||
| 574 | { | ||
| 575 | u32 irq_status; | ||
| 576 | struct spi_message *msg = drv_data->cur_msg; | ||
| 577 | void *reg = drv_data->ioaddr; | ||
| 578 | irqreturn_t handled = IRQ_NONE; | ||
| 579 | unsigned long limit = loops_per_jiffy << 1; | ||
| 580 | |||
| 581 | while ((irq_status = (read_SSSR(reg) & drv_data->mask_sr))) { | ||
| 582 | |||
| 583 | if (irq_status & SSSR_ROR) { | ||
| 584 | |||
| 585 | /* Clear and disable interrupts */ | ||
| 586 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 587 | write_SSTO(0, reg); | ||
| 588 | write_SSSR(drv_data->clear_sr, reg); | ||
| 589 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | ||
| 590 | |||
| 591 | if (flush(drv_data) == 0) | ||
| 592 | dev_err(&drv_data->pdev->dev, | ||
| 593 | "interrupt_transfer: flush fail\n"); | ||
| 594 | |||
| 595 | dev_warn(&drv_data->pdev->dev, | ||
| 596 | "interrupt_transfer: fifo overun\n"); | ||
| 597 | |||
| 598 | msg->state = ERROR_STATE; | ||
| 599 | tasklet_schedule(&drv_data->pump_transfers); | ||
| 600 | |||
| 601 | return IRQ_HANDLED; | ||
| 602 | } | ||
| 603 | |||
| 604 | /* Look for false positive timeout */ | ||
| 605 | if ((irq_status & SSSR_TINT) | ||
| 606 | && (drv_data->rx < drv_data->rx_end)) | ||
| 607 | write_SSSR(SSSR_TINT, reg); | ||
| 608 | |||
| 609 | /* Pump data */ | ||
| 610 | drv_data->read(drv_data); | ||
| 611 | drv_data->write(drv_data); | ||
| 612 | |||
| 613 | if (drv_data->tx == drv_data->tx_end) { | ||
| 614 | /* Disable tx interrupt */ | ||
| 615 | write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); | ||
| 616 | |||
| 617 | /* PXA25x_SSP has no timeout, read trailing bytes */ | ||
| 618 | if (drv_data->ssp_type == PXA25x_SSP) { | ||
| 619 | while ((read_SSSR(reg) & SSSR_BSY) && limit--) | ||
| 620 | drv_data->read(drv_data); | ||
| 621 | |||
| 622 | if (limit == 0) | ||
| 623 | dev_err(&drv_data->pdev->dev, | ||
| 624 | "interrupt_transfer: " | ||
| 625 | "trailing byte read failed\n"); | ||
| 626 | } | ||
| 627 | } | ||
| 628 | |||
| 629 | if ((irq_status & SSSR_TINT) | ||
| 630 | || (drv_data->rx == drv_data->rx_end)) { | ||
| 631 | |||
| 632 | /* Clear timeout */ | ||
| 633 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 634 | write_SSTO(0, reg); | ||
| 635 | write_SSSR(drv_data->clear_sr, reg); | ||
| 636 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | ||
| 637 | |||
| 638 | /* Update total byte transfered */ | ||
| 639 | msg->actual_length += drv_data->len; | ||
| 640 | |||
| 641 | /* Release chip select if requested, transfer delays are | ||
| 642 | * handled in pump_transfers */ | ||
| 643 | if (drv_data->cs_change) | ||
| 644 | drv_data->cs_control(PXA2XX_CS_DEASSERT); | ||
| 645 | |||
| 646 | /* Move to next transfer */ | ||
| 647 | msg->state = next_transfer(drv_data); | ||
| 648 | |||
| 649 | /* Schedule transfer tasklet */ | ||
| 650 | tasklet_schedule(&drv_data->pump_transfers); | ||
| 651 | |||
| 652 | return IRQ_HANDLED; | ||
| 653 | } | ||
| 654 | |||
| 655 | /* We did something */ | ||
| 656 | handled = IRQ_HANDLED; | ||
| 657 | } | ||
| 658 | |||
| 659 | return handled; | ||
| 660 | } | ||
| 661 | |||
| 662 | static irqreturn_t ssp_int(int irq, void *dev_id, struct pt_regs *regs) | ||
| 663 | { | ||
| 664 | struct driver_data *drv_data = (struct driver_data *)dev_id; | ||
| 665 | |||
| 666 | if (!drv_data->cur_msg) { | ||
| 667 | dev_err(&drv_data->pdev->dev, "bad message state " | ||
| 668 | "in interrupt handler\n"); | ||
| 669 | /* Never fail */ | ||
| 670 | return IRQ_HANDLED; | ||
| 671 | } | ||
| 672 | |||
| 673 | return drv_data->transfer_handler(drv_data); | ||
| 674 | } | ||
| 675 | |||
| 676 | static void pump_transfers(unsigned long data) | ||
| 677 | { | ||
| 678 | struct driver_data *drv_data = (struct driver_data *)data; | ||
| 679 | struct spi_message *message = NULL; | ||
| 680 | struct spi_transfer *transfer = NULL; | ||
| 681 | struct spi_transfer *previous = NULL; | ||
| 682 | struct chip_data *chip = NULL; | ||
| 683 | void *reg = drv_data->ioaddr; | ||
| 684 | u32 clk_div = 0; | ||
| 685 | u8 bits = 0; | ||
| 686 | u32 speed = 0; | ||
| 687 | u32 cr0; | ||
| 688 | |||
| 689 | /* Get current state information */ | ||
| 690 | message = drv_data->cur_msg; | ||
| 691 | transfer = drv_data->cur_transfer; | ||
| 692 | chip = drv_data->cur_chip; | ||
| 693 | |||
| 694 | /* Handle for abort */ | ||
| 695 | if (message->state == ERROR_STATE) { | ||
| 696 | message->status = -EIO; | ||
| 697 | giveback(message, drv_data); | ||
| 698 | return; | ||
| 699 | } | ||
| 700 | |||
| 701 | /* Handle end of message */ | ||
| 702 | if (message->state == DONE_STATE) { | ||
| 703 | message->status = 0; | ||
| 704 | giveback(message, drv_data); | ||
| 705 | return; | ||
| 706 | } | ||
| 707 | |||
| 708 | /* Delay if requested at end of transfer*/ | ||
| 709 | if (message->state == RUNNING_STATE) { | ||
| 710 | previous = list_entry(transfer->transfer_list.prev, | ||
| 711 | struct spi_transfer, | ||
| 712 | transfer_list); | ||
| 713 | if (previous->delay_usecs) | ||
| 714 | udelay(previous->delay_usecs); | ||
| 715 | } | ||
| 716 | |||
| 717 | /* Setup the transfer state based on the type of transfer */ | ||
| 718 | if (flush(drv_data) == 0) { | ||
| 719 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); | ||
| 720 | message->status = -EIO; | ||
| 721 | giveback(message, drv_data); | ||
| 722 | return; | ||
| 723 | } | ||
| 724 | drv_data->n_bytes = chip->n_bytes; | ||
| 725 | drv_data->dma_width = chip->dma_width; | ||
| 726 | drv_data->cs_control = chip->cs_control; | ||
| 727 | drv_data->tx = (void *)transfer->tx_buf; | ||
| 728 | drv_data->tx_end = drv_data->tx + transfer->len; | ||
| 729 | drv_data->rx = transfer->rx_buf; | ||
| 730 | drv_data->rx_end = drv_data->rx + transfer->len; | ||
| 731 | drv_data->rx_dma = transfer->rx_dma; | ||
| 732 | drv_data->tx_dma = transfer->tx_dma; | ||
| 733 | drv_data->len = transfer->len; | ||
| 734 | drv_data->write = drv_data->tx ? chip->write : null_writer; | ||
| 735 | drv_data->read = drv_data->rx ? chip->read : null_reader; | ||
| 736 | drv_data->cs_change = transfer->cs_change; | ||
| 737 | |||
| 738 | /* Change speed and bit per word on a per transfer */ | ||
| 739 | if (transfer->speed_hz || transfer->bits_per_word) { | ||
| 740 | |||
| 741 | /* Disable clock */ | ||
| 742 | write_SSCR0(chip->cr0 & ~SSCR0_SSE, reg); | ||
| 743 | cr0 = chip->cr0; | ||
| 744 | bits = chip->bits_per_word; | ||
| 745 | speed = chip->speed_hz; | ||
| 746 | |||
| 747 | if (transfer->speed_hz) | ||
| 748 | speed = transfer->speed_hz; | ||
| 749 | |||
| 750 | if (transfer->bits_per_word) | ||
| 751 | bits = transfer->bits_per_word; | ||
| 752 | |||
| 753 | if (reg == SSP1_VIRT) | ||
| 754 | clk_div = SSP1_SerClkDiv(speed); | ||
| 755 | else if (reg == SSP2_VIRT) | ||
| 756 | clk_div = SSP2_SerClkDiv(speed); | ||
| 757 | else if (reg == SSP3_VIRT) | ||
| 758 | clk_div = SSP3_SerClkDiv(speed); | ||
| 759 | |||
| 760 | if (bits <= 8) { | ||
| 761 | drv_data->n_bytes = 1; | ||
| 762 | drv_data->dma_width = DCMD_WIDTH1; | ||
| 763 | drv_data->read = drv_data->read != null_reader ? | ||
| 764 | u8_reader : null_reader; | ||
| 765 | drv_data->write = drv_data->write != null_writer ? | ||
| 766 | u8_writer : null_writer; | ||
| 767 | } else if (bits <= 16) { | ||
| 768 | drv_data->n_bytes = 2; | ||
| 769 | drv_data->dma_width = DCMD_WIDTH2; | ||
| 770 | drv_data->read = drv_data->read != null_reader ? | ||
| 771 | u16_reader : null_reader; | ||
| 772 | drv_data->write = drv_data->write != null_writer ? | ||
| 773 | u16_writer : null_writer; | ||
| 774 | } else if (bits <= 32) { | ||
| 775 | drv_data->n_bytes = 4; | ||
| 776 | drv_data->dma_width = DCMD_WIDTH4; | ||
| 777 | drv_data->read = drv_data->read != null_reader ? | ||
| 778 | u32_reader : null_reader; | ||
| 779 | drv_data->write = drv_data->write != null_writer ? | ||
| 780 | u32_writer : null_writer; | ||
| 781 | } | ||
| 782 | |||
| 783 | cr0 = clk_div | ||
| 784 | | SSCR0_Motorola | ||
| 785 | | SSCR0_DataSize(bits & 0x0f) | ||
| 786 | | SSCR0_SSE | ||
| 787 | | (bits > 16 ? SSCR0_EDSS : 0); | ||
| 788 | |||
| 789 | /* Start it back up */ | ||
| 790 | write_SSCR0(cr0, reg); | ||
| 791 | } | ||
| 792 | |||
| 793 | message->state = RUNNING_STATE; | ||
| 794 | |||
| 795 | /* Try to map dma buffer and do a dma transfer if successful */ | ||
| 796 | if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) { | ||
| 797 | |||
| 798 | /* Ensure we have the correct interrupt handler */ | ||
| 799 | drv_data->transfer_handler = dma_transfer; | ||
| 800 | |||
| 801 | /* Setup rx DMA Channel */ | ||
| 802 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
| 803 | DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; | ||
| 804 | DTADR(drv_data->rx_channel) = drv_data->rx_dma; | ||
| 805 | if (drv_data->rx == drv_data->null_dma_buf) | ||
| 806 | /* No target address increment */ | ||
| 807 | DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | ||
| 808 | | drv_data->dma_width | ||
| 809 | | chip->dma_burst_size | ||
| 810 | | drv_data->len; | ||
| 811 | else | ||
| 812 | DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | ||
| 813 | | DCMD_FLOWSRC | ||
| 814 | | drv_data->dma_width | ||
| 815 | | chip->dma_burst_size | ||
| 816 | | drv_data->len; | ||
| 817 | |||
| 818 | /* Setup tx DMA Channel */ | ||
| 819 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
| 820 | DSADR(drv_data->tx_channel) = drv_data->tx_dma; | ||
| 821 | DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; | ||
| 822 | if (drv_data->tx == drv_data->null_dma_buf) | ||
| 823 | /* No source address increment */ | ||
| 824 | DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | ||
| 825 | | drv_data->dma_width | ||
| 826 | | chip->dma_burst_size | ||
| 827 | | drv_data->len; | ||
| 828 | else | ||
| 829 | DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | ||
| 830 | | DCMD_FLOWTRG | ||
| 831 | | drv_data->dma_width | ||
| 832 | | chip->dma_burst_size | ||
| 833 | | drv_data->len; | ||
| 834 | |||
| 835 | /* Enable dma end irqs on SSP to detect end of transfer */ | ||
| 836 | if (drv_data->ssp_type == PXA25x_SSP) | ||
| 837 | DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; | ||
| 838 | |||
| 839 | /* Fix me, need to handle cs polarity */ | ||
| 840 | drv_data->cs_control(PXA2XX_CS_ASSERT); | ||
| 841 | |||
| 842 | /* Go baby, go */ | ||
| 843 | write_SSSR(drv_data->clear_sr, reg); | ||
| 844 | DCSR(drv_data->rx_channel) |= DCSR_RUN; | ||
| 845 | DCSR(drv_data->tx_channel) |= DCSR_RUN; | ||
| 846 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 847 | write_SSTO(chip->timeout, reg); | ||
| 848 | write_SSCR1(chip->cr1 | ||
| 849 | | chip->dma_threshold | ||
| 850 | | drv_data->dma_cr1, | ||
| 851 | reg); | ||
| 852 | } else { | ||
| 853 | /* Ensure we have the correct interrupt handler */ | ||
| 854 | drv_data->transfer_handler = interrupt_transfer; | ||
| 855 | |||
| 856 | /* Fix me, need to handle cs polarity */ | ||
| 857 | drv_data->cs_control(PXA2XX_CS_ASSERT); | ||
| 858 | |||
| 859 | /* Go baby, go */ | ||
| 860 | write_SSSR(drv_data->clear_sr, reg); | ||
| 861 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 862 | write_SSTO(chip->timeout, reg); | ||
| 863 | write_SSCR1(chip->cr1 | ||
| 864 | | chip->threshold | ||
| 865 | | drv_data->int_cr1, | ||
| 866 | reg); | ||
| 867 | } | ||
| 868 | } | ||
| 869 | |||
| 870 | static void pump_messages(void *data) | ||
| 871 | { | ||
| 872 | struct driver_data *drv_data = data; | ||
| 873 | unsigned long flags; | ||
| 874 | |||
| 875 | /* Lock queue and check for queue work */ | ||
| 876 | spin_lock_irqsave(&drv_data->lock, flags); | ||
| 877 | if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { | ||
| 878 | drv_data->busy = 0; | ||
| 879 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 880 | return; | ||
| 881 | } | ||
| 882 | |||
| 883 | /* Make sure we are not already running a message */ | ||
| 884 | if (drv_data->cur_msg) { | ||
| 885 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 886 | return; | ||
| 887 | } | ||
| 888 | |||
| 889 | /* Extract head of queue */ | ||
| 890 | drv_data->cur_msg = list_entry(drv_data->queue.next, | ||
| 891 | struct spi_message, queue); | ||
| 892 | list_del_init(&drv_data->cur_msg->queue); | ||
| 893 | drv_data->busy = 1; | ||
| 894 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 895 | |||
| 896 | /* Initial message state*/ | ||
| 897 | drv_data->cur_msg->state = START_STATE; | ||
| 898 | drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, | ||
| 899 | struct spi_transfer, | ||
| 900 | transfer_list); | ||
| 901 | |||
| 902 | /* Setup the SSP using the per chip configuration */ | ||
| 903 | drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); | ||
| 904 | restore_state(drv_data); | ||
| 905 | |||
| 906 | /* Mark as busy and launch transfers */ | ||
| 907 | tasklet_schedule(&drv_data->pump_transfers); | ||
| 908 | } | ||
| 909 | |||
| 910 | static int transfer(struct spi_device *spi, struct spi_message *msg) | ||
| 911 | { | ||
| 912 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | ||
| 913 | unsigned long flags; | ||
| 914 | |||
| 915 | spin_lock_irqsave(&drv_data->lock, flags); | ||
| 916 | |||
| 917 | if (drv_data->run == QUEUE_STOPPED) { | ||
| 918 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 919 | return -ESHUTDOWN; | ||
| 920 | } | ||
| 921 | |||
| 922 | msg->actual_length = 0; | ||
| 923 | msg->status = -EINPROGRESS; | ||
| 924 | msg->state = START_STATE; | ||
| 925 | |||
| 926 | list_add_tail(&msg->queue, &drv_data->queue); | ||
| 927 | |||
| 928 | if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) | ||
| 929 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
| 930 | |||
| 931 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 932 | |||
| 933 | return 0; | ||
| 934 | } | ||
| 935 | |||
| 936 | static int setup(struct spi_device *spi) | ||
| 937 | { | ||
| 938 | struct pxa2xx_spi_chip *chip_info = NULL; | ||
| 939 | struct chip_data *chip; | ||
| 940 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | ||
| 941 | unsigned int clk_div; | ||
| 942 | |||
| 943 | if (!spi->bits_per_word) | ||
| 944 | spi->bits_per_word = 8; | ||
| 945 | |||
| 946 | if (drv_data->ssp_type != PXA25x_SSP | ||
| 947 | && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) | ||
| 948 | return -EINVAL; | ||
| 949 | else if (spi->bits_per_word < 4 || spi->bits_per_word > 16) | ||
| 950 | return -EINVAL; | ||
| 951 | |||
| 952 | /* Only alloc (or use chip_info) on first setup */ | ||
| 953 | chip = spi_get_ctldata(spi); | ||
| 954 | if (chip == NULL) { | ||
| 955 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); | ||
| 956 | if (!chip) | ||
| 957 | return -ENOMEM; | ||
| 958 | |||
| 959 | chip->cs_control = null_cs_control; | ||
| 960 | chip->enable_dma = 0; | ||
| 961 | chip->timeout = 5; | ||
| 962 | chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1); | ||
| 963 | chip->dma_burst_size = drv_data->master_info->enable_dma ? | ||
| 964 | DCMD_BURST8 : 0; | ||
| 965 | |||
| 966 | chip_info = spi->controller_data; | ||
| 967 | } | ||
| 968 | |||
| 969 | /* chip_info isn't always needed */ | ||
| 970 | if (chip_info) { | ||
| 971 | if (chip_info->cs_control) | ||
| 972 | chip->cs_control = chip_info->cs_control; | ||
| 973 | |||
| 974 | chip->timeout = (chip_info->timeout_microsecs * 10000) / 2712; | ||
| 975 | |||
| 976 | chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold) | ||
| 977 | | SSCR1_TxTresh(chip_info->tx_threshold); | ||
| 978 | |||
| 979 | chip->enable_dma = chip_info->dma_burst_size != 0 | ||
| 980 | && drv_data->master_info->enable_dma; | ||
| 981 | chip->dma_threshold = 0; | ||
| 982 | |||
| 983 | if (chip->enable_dma) { | ||
| 984 | if (chip_info->dma_burst_size <= 8) { | ||
| 985 | chip->dma_threshold = SSCR1_RxTresh(8) | ||
| 986 | | SSCR1_TxTresh(8); | ||
| 987 | chip->dma_burst_size = DCMD_BURST8; | ||
| 988 | } else if (chip_info->dma_burst_size <= 16) { | ||
| 989 | chip->dma_threshold = SSCR1_RxTresh(16) | ||
| 990 | | SSCR1_TxTresh(16); | ||
| 991 | chip->dma_burst_size = DCMD_BURST16; | ||
| 992 | } else { | ||
| 993 | chip->dma_threshold = SSCR1_RxTresh(32) | ||
| 994 | | SSCR1_TxTresh(32); | ||
| 995 | chip->dma_burst_size = DCMD_BURST32; | ||
| 996 | } | ||
| 997 | } | ||
| 998 | |||
| 999 | |||
| 1000 | if (chip_info->enable_loopback) | ||
| 1001 | chip->cr1 = SSCR1_LBM; | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | if (drv_data->ioaddr == SSP1_VIRT) | ||
| 1005 | clk_div = SSP1_SerClkDiv(spi->max_speed_hz); | ||
| 1006 | else if (drv_data->ioaddr == SSP2_VIRT) | ||
| 1007 | clk_div = SSP2_SerClkDiv(spi->max_speed_hz); | ||
| 1008 | else if (drv_data->ioaddr == SSP3_VIRT) | ||
| 1009 | clk_div = SSP3_SerClkDiv(spi->max_speed_hz); | ||
| 1010 | else | ||
| 1011 | return -ENODEV; | ||
| 1012 | chip->speed_hz = spi->max_speed_hz; | ||
| 1013 | |||
| 1014 | chip->cr0 = clk_div | ||
| 1015 | | SSCR0_Motorola | ||
| 1016 | | SSCR0_DataSize(spi->bits_per_word & 0x0f) | ||
| 1017 | | SSCR0_SSE | ||
| 1018 | | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); | ||
| 1019 | chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) << 4) | ||
| 1020 | | (((spi->mode & SPI_CPOL) != 0) << 3); | ||
| 1021 | |||
| 1022 | /* NOTE: PXA25x_SSP _could_ use external clocking ... */ | ||
| 1023 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 1024 | dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n", | ||
| 1025 | spi->bits_per_word, | ||
| 1026 | (CLOCK_SPEED_HZ) | ||
| 1027 | / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), | ||
| 1028 | spi->mode & 0x3); | ||
| 1029 | else | ||
| 1030 | dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n", | ||
| 1031 | spi->bits_per_word, | ||
| 1032 | (CLOCK_SPEED_HZ/2) | ||
| 1033 | / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), | ||
| 1034 | spi->mode & 0x3); | ||
| 1035 | |||
| 1036 | if (spi->bits_per_word <= 8) { | ||
| 1037 | chip->n_bytes = 1; | ||
| 1038 | chip->dma_width = DCMD_WIDTH1; | ||
| 1039 | chip->read = u8_reader; | ||
| 1040 | chip->write = u8_writer; | ||
| 1041 | } else if (spi->bits_per_word <= 16) { | ||
| 1042 | chip->n_bytes = 2; | ||
| 1043 | chip->dma_width = DCMD_WIDTH2; | ||
| 1044 | chip->read = u16_reader; | ||
| 1045 | chip->write = u16_writer; | ||
| 1046 | } else if (spi->bits_per_word <= 32) { | ||
| 1047 | chip->cr0 |= SSCR0_EDSS; | ||
| 1048 | chip->n_bytes = 4; | ||
| 1049 | chip->dma_width = DCMD_WIDTH4; | ||
| 1050 | chip->read = u32_reader; | ||
| 1051 | chip->write = u32_writer; | ||
| 1052 | } else { | ||
| 1053 | dev_err(&spi->dev, "invalid wordsize\n"); | ||
| 1054 | kfree(chip); | ||
| 1055 | return -ENODEV; | ||
| 1056 | } | ||
| 1057 | chip->bits_per_word = spi->bits_per_word; | ||
| 1058 | |||
| 1059 | spi_set_ctldata(spi, chip); | ||
| 1060 | |||
| 1061 | return 0; | ||
| 1062 | } | ||
| 1063 | |||
| 1064 | static void cleanup(const struct spi_device *spi) | ||
| 1065 | { | ||
| 1066 | struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); | ||
| 1067 | |||
| 1068 | kfree(chip); | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | static int init_queue(struct driver_data *drv_data) | ||
| 1072 | { | ||
| 1073 | INIT_LIST_HEAD(&drv_data->queue); | ||
| 1074 | spin_lock_init(&drv_data->lock); | ||
| 1075 | |||
| 1076 | drv_data->run = QUEUE_STOPPED; | ||
| 1077 | drv_data->busy = 0; | ||
| 1078 | |||
| 1079 | tasklet_init(&drv_data->pump_transfers, | ||
| 1080 | pump_transfers, (unsigned long)drv_data); | ||
| 1081 | |||
| 1082 | INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data); | ||
| 1083 | drv_data->workqueue = create_singlethread_workqueue( | ||
| 1084 | drv_data->master->cdev.dev->bus_id); | ||
| 1085 | if (drv_data->workqueue == NULL) | ||
| 1086 | return -EBUSY; | ||
| 1087 | |||
| 1088 | return 0; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | static int start_queue(struct driver_data *drv_data) | ||
| 1092 | { | ||
| 1093 | unsigned long flags; | ||
| 1094 | |||
| 1095 | spin_lock_irqsave(&drv_data->lock, flags); | ||
| 1096 | |||
| 1097 | if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { | ||
| 1098 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 1099 | return -EBUSY; | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | drv_data->run = QUEUE_RUNNING; | ||
| 1103 | drv_data->cur_msg = NULL; | ||
| 1104 | drv_data->cur_transfer = NULL; | ||
| 1105 | drv_data->cur_chip = NULL; | ||
| 1106 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 1107 | |||
| 1108 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
| 1109 | |||
| 1110 | return 0; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | static int stop_queue(struct driver_data *drv_data) | ||
| 1114 | { | ||
| 1115 | unsigned long flags; | ||
| 1116 | unsigned limit = 500; | ||
| 1117 | int status = 0; | ||
| 1118 | |||
| 1119 | spin_lock_irqsave(&drv_data->lock, flags); | ||
| 1120 | |||
| 1121 | /* This is a bit lame, but is optimized for the common execution path. | ||
| 1122 | * A wait_queue on the drv_data->busy could be used, but then the common | ||
| 1123 | * execution path (pump_messages) would be required to call wake_up or | ||
| 1124 | * friends on every SPI message. Do this instead */ | ||
| 1125 | drv_data->run = QUEUE_STOPPED; | ||
| 1126 | while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { | ||
| 1127 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 1128 | msleep(10); | ||
| 1129 | spin_lock_irqsave(&drv_data->lock, flags); | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | if (!list_empty(&drv_data->queue) || drv_data->busy) | ||
| 1133 | status = -EBUSY; | ||
| 1134 | |||
| 1135 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
| 1136 | |||
| 1137 | return status; | ||
| 1138 | } | ||
| 1139 | |||
| 1140 | static int destroy_queue(struct driver_data *drv_data) | ||
| 1141 | { | ||
| 1142 | int status; | ||
| 1143 | |||
| 1144 | status = stop_queue(drv_data); | ||
| 1145 | if (status != 0) | ||
| 1146 | return status; | ||
| 1147 | |||
| 1148 | destroy_workqueue(drv_data->workqueue); | ||
| 1149 | |||
| 1150 | return 0; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | static int pxa2xx_spi_probe(struct platform_device *pdev) | ||
| 1154 | { | ||
| 1155 | struct device *dev = &pdev->dev; | ||
| 1156 | struct pxa2xx_spi_master *platform_info; | ||
| 1157 | struct spi_master *master; | ||
| 1158 | struct driver_data *drv_data = 0; | ||
| 1159 | struct resource *memory_resource; | ||
| 1160 | int irq; | ||
| 1161 | int status = 0; | ||
| 1162 | |||
| 1163 | platform_info = dev->platform_data; | ||
| 1164 | |||
| 1165 | if (platform_info->ssp_type == SSP_UNDEFINED) { | ||
| 1166 | dev_err(&pdev->dev, "undefined SSP\n"); | ||
| 1167 | return -ENODEV; | ||
| 1168 | } | ||
| 1169 | |||
| 1170 | /* Allocate master with space for drv_data and null dma buffer */ | ||
| 1171 | master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); | ||
| 1172 | if (!master) { | ||
| 1173 | dev_err(&pdev->dev, "can not alloc spi_master\n"); | ||
| 1174 | return -ENOMEM; | ||
| 1175 | } | ||
| 1176 | drv_data = spi_master_get_devdata(master); | ||
| 1177 | drv_data->master = master; | ||
| 1178 | drv_data->master_info = platform_info; | ||
| 1179 | drv_data->pdev = pdev; | ||
| 1180 | |||
| 1181 | master->bus_num = pdev->id; | ||
| 1182 | master->num_chipselect = platform_info->num_chipselect; | ||
| 1183 | master->cleanup = cleanup; | ||
| 1184 | master->setup = setup; | ||
| 1185 | master->transfer = transfer; | ||
| 1186 | |||
| 1187 | drv_data->ssp_type = platform_info->ssp_type; | ||
| 1188 | drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data + | ||
| 1189 | sizeof(struct driver_data)), 8); | ||
| 1190 | |||
| 1191 | /* Setup register addresses */ | ||
| 1192 | memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1193 | if (!memory_resource) { | ||
| 1194 | dev_err(&pdev->dev, "memory resources not defined\n"); | ||
| 1195 | status = -ENODEV; | ||
| 1196 | goto out_error_master_alloc; | ||
| 1197 | } | ||
| 1198 | |||
| 1199 | drv_data->ioaddr = (void *)io_p2v(memory_resource->start); | ||
| 1200 | drv_data->ssdr_physical = memory_resource->start + 0x00000010; | ||
| 1201 | if (platform_info->ssp_type == PXA25x_SSP) { | ||
| 1202 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; | ||
| 1203 | drv_data->dma_cr1 = 0; | ||
| 1204 | drv_data->clear_sr = SSSR_ROR; | ||
| 1205 | drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; | ||
| 1206 | } else { | ||
| 1207 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; | ||
| 1208 | drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; | ||
| 1209 | drv_data->clear_sr = SSSR_ROR | SSSR_TINT; | ||
| 1210 | drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | /* Attach to IRQ */ | ||
| 1214 | irq = platform_get_irq(pdev, 0); | ||
| 1215 | if (irq < 0) { | ||
| 1216 | dev_err(&pdev->dev, "irq resource not defined\n"); | ||
| 1217 | status = -ENODEV; | ||
| 1218 | goto out_error_master_alloc; | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | status = request_irq(irq, ssp_int, SA_INTERRUPT, dev->bus_id, drv_data); | ||
| 1222 | if (status < 0) { | ||
| 1223 | dev_err(&pdev->dev, "can not get IRQ\n"); | ||
| 1224 | goto out_error_master_alloc; | ||
| 1225 | } | ||
| 1226 | |||
| 1227 | /* Setup DMA if requested */ | ||
| 1228 | drv_data->tx_channel = -1; | ||
| 1229 | drv_data->rx_channel = -1; | ||
| 1230 | if (platform_info->enable_dma) { | ||
| 1231 | |||
| 1232 | /* Get two DMA channels (rx and tx) */ | ||
| 1233 | drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", | ||
| 1234 | DMA_PRIO_HIGH, | ||
| 1235 | dma_handler, | ||
| 1236 | drv_data); | ||
| 1237 | if (drv_data->rx_channel < 0) { | ||
| 1238 | dev_err(dev, "problem (%d) requesting rx channel\n", | ||
| 1239 | drv_data->rx_channel); | ||
| 1240 | status = -ENODEV; | ||
| 1241 | goto out_error_irq_alloc; | ||
| 1242 | } | ||
| 1243 | drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", | ||
| 1244 | DMA_PRIO_MEDIUM, | ||
| 1245 | dma_handler, | ||
| 1246 | drv_data); | ||
| 1247 | if (drv_data->tx_channel < 0) { | ||
| 1248 | dev_err(dev, "problem (%d) requesting tx channel\n", | ||
| 1249 | drv_data->tx_channel); | ||
| 1250 | status = -ENODEV; | ||
| 1251 | goto out_error_dma_alloc; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | if (drv_data->ioaddr == SSP1_VIRT) { | ||
| 1255 | DRCMRRXSSDR = DRCMR_MAPVLD | ||
| 1256 | | drv_data->rx_channel; | ||
| 1257 | DRCMRTXSSDR = DRCMR_MAPVLD | ||
| 1258 | | drv_data->tx_channel; | ||
| 1259 | } else if (drv_data->ioaddr == SSP2_VIRT) { | ||
| 1260 | DRCMRRXSS2DR = DRCMR_MAPVLD | ||
| 1261 | | drv_data->rx_channel; | ||
| 1262 | DRCMRTXSS2DR = DRCMR_MAPVLD | ||
| 1263 | | drv_data->tx_channel; | ||
| 1264 | } else if (drv_data->ioaddr == SSP3_VIRT) { | ||
| 1265 | DRCMRRXSS3DR = DRCMR_MAPVLD | ||
| 1266 | | drv_data->rx_channel; | ||
| 1267 | DRCMRTXSS3DR = DRCMR_MAPVLD | ||
| 1268 | | drv_data->tx_channel; | ||
| 1269 | } else { | ||
| 1270 | dev_err(dev, "bad SSP type\n"); | ||
| 1271 | goto out_error_dma_alloc; | ||
| 1272 | } | ||
| 1273 | } | ||
| 1274 | |||
| 1275 | /* Enable SOC clock */ | ||
| 1276 | pxa_set_cken(platform_info->clock_enable, 1); | ||
| 1277 | |||
| 1278 | /* Load default SSP configuration */ | ||
| 1279 | write_SSCR0(0, drv_data->ioaddr); | ||
| 1280 | write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr); | ||
| 1281 | write_SSCR0(SSCR0_SerClkDiv(2) | ||
| 1282 | | SSCR0_Motorola | ||
| 1283 | | SSCR0_DataSize(8), | ||
| 1284 | drv_data->ioaddr); | ||
| 1285 | if (drv_data->ssp_type != PXA25x_SSP) | ||
| 1286 | write_SSTO(0, drv_data->ioaddr); | ||
| 1287 | write_SSPSP(0, drv_data->ioaddr); | ||
| 1288 | |||
| 1289 | /* Initial and start queue */ | ||
| 1290 | status = init_queue(drv_data); | ||
| 1291 | if (status != 0) { | ||
| 1292 | dev_err(&pdev->dev, "problem initializing queue\n"); | ||
| 1293 | goto out_error_clock_enabled; | ||
| 1294 | } | ||
| 1295 | status = start_queue(drv_data); | ||
| 1296 | if (status != 0) { | ||
| 1297 | dev_err(&pdev->dev, "problem starting queue\n"); | ||
| 1298 | goto out_error_clock_enabled; | ||
| 1299 | } | ||
| 1300 | |||
| 1301 | /* Register with the SPI framework */ | ||
| 1302 | platform_set_drvdata(pdev, drv_data); | ||
| 1303 | status = spi_register_master(master); | ||
| 1304 | if (status != 0) { | ||
| 1305 | dev_err(&pdev->dev, "problem registering spi master\n"); | ||
| 1306 | goto out_error_queue_alloc; | ||
| 1307 | } | ||
| 1308 | |||
| 1309 | return status; | ||
| 1310 | |||
| 1311 | out_error_queue_alloc: | ||
| 1312 | destroy_queue(drv_data); | ||
| 1313 | |||
| 1314 | out_error_clock_enabled: | ||
| 1315 | pxa_set_cken(platform_info->clock_enable, 0); | ||
| 1316 | |||
| 1317 | out_error_dma_alloc: | ||
| 1318 | if (drv_data->tx_channel != -1) | ||
| 1319 | pxa_free_dma(drv_data->tx_channel); | ||
| 1320 | if (drv_data->rx_channel != -1) | ||
| 1321 | pxa_free_dma(drv_data->rx_channel); | ||
| 1322 | |||
| 1323 | out_error_irq_alloc: | ||
| 1324 | free_irq(irq, drv_data); | ||
| 1325 | |||
| 1326 | out_error_master_alloc: | ||
| 1327 | spi_master_put(master); | ||
| 1328 | return status; | ||
| 1329 | } | ||
| 1330 | |||
| 1331 | static int pxa2xx_spi_remove(struct platform_device *pdev) | ||
| 1332 | { | ||
| 1333 | struct driver_data *drv_data = platform_get_drvdata(pdev); | ||
| 1334 | int irq; | ||
| 1335 | int status = 0; | ||
| 1336 | |||
| 1337 | if (!drv_data) | ||
| 1338 | return 0; | ||
| 1339 | |||
| 1340 | /* Remove the queue */ | ||
| 1341 | status = destroy_queue(drv_data); | ||
| 1342 | if (status != 0) | ||
| 1343 | return status; | ||
| 1344 | |||
| 1345 | /* Disable the SSP at the peripheral and SOC level */ | ||
| 1346 | write_SSCR0(0, drv_data->ioaddr); | ||
| 1347 | pxa_set_cken(drv_data->master_info->clock_enable, 0); | ||
| 1348 | |||
| 1349 | /* Release DMA */ | ||
| 1350 | if (drv_data->master_info->enable_dma) { | ||
| 1351 | if (drv_data->ioaddr == SSP1_VIRT) { | ||
| 1352 | DRCMRRXSSDR = 0; | ||
| 1353 | DRCMRTXSSDR = 0; | ||
| 1354 | } else if (drv_data->ioaddr == SSP2_VIRT) { | ||
| 1355 | DRCMRRXSS2DR = 0; | ||
| 1356 | DRCMRTXSS2DR = 0; | ||
| 1357 | } else if (drv_data->ioaddr == SSP3_VIRT) { | ||
| 1358 | DRCMRRXSS3DR = 0; | ||
| 1359 | DRCMRTXSS3DR = 0; | ||
| 1360 | } | ||
| 1361 | pxa_free_dma(drv_data->tx_channel); | ||
| 1362 | pxa_free_dma(drv_data->rx_channel); | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | /* Release IRQ */ | ||
| 1366 | irq = platform_get_irq(pdev, 0); | ||
| 1367 | if (irq >= 0) | ||
| 1368 | free_irq(irq, drv_data); | ||
| 1369 | |||
| 1370 | /* Disconnect from the SPI framework */ | ||
| 1371 | spi_unregister_master(drv_data->master); | ||
| 1372 | |||
| 1373 | /* Prevent double remove */ | ||
| 1374 | platform_set_drvdata(pdev, NULL); | ||
| 1375 | |||
| 1376 | return 0; | ||
| 1377 | } | ||
| 1378 | |||
| 1379 | static void pxa2xx_spi_shutdown(struct platform_device *pdev) | ||
| 1380 | { | ||
| 1381 | int status = 0; | ||
| 1382 | |||
| 1383 | if ((status = pxa2xx_spi_remove(pdev)) != 0) | ||
| 1384 | dev_err(&pdev->dev, "shutdown failed with %d\n", status); | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | #ifdef CONFIG_PM | ||
| 1388 | static int suspend_devices(struct device *dev, void *pm_message) | ||
| 1389 | { | ||
| 1390 | pm_message_t *state = pm_message; | ||
| 1391 | |||
| 1392 | if (dev->power.power_state.event != state->event) { | ||
| 1393 | dev_warn(dev, "pm state does not match request\n"); | ||
| 1394 | return -1; | ||
| 1395 | } | ||
| 1396 | |||
| 1397 | return 0; | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | ||
| 1401 | { | ||
| 1402 | struct driver_data *drv_data = platform_get_drvdata(pdev); | ||
| 1403 | int status = 0; | ||
| 1404 | |||
| 1405 | /* Check all childern for current power state */ | ||
| 1406 | if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) { | ||
| 1407 | dev_warn(&pdev->dev, "suspend aborted\n"); | ||
| 1408 | return -1; | ||
| 1409 | } | ||
| 1410 | |||
| 1411 | status = stop_queue(drv_data); | ||
| 1412 | if (status != 0) | ||
| 1413 | return status; | ||
| 1414 | write_SSCR0(0, drv_data->ioaddr); | ||
| 1415 | pxa_set_cken(drv_data->master_info->clock_enable, 0); | ||
| 1416 | |||
| 1417 | return 0; | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | static int pxa2xx_spi_resume(struct platform_device *pdev) | ||
| 1421 | { | ||
| 1422 | struct driver_data *drv_data = platform_get_drvdata(pdev); | ||
| 1423 | int status = 0; | ||
| 1424 | |||
| 1425 | /* Enable the SSP clock */ | ||
| 1426 | pxa_set_cken(drv_data->master_info->clock_enable, 1); | ||
| 1427 | |||
| 1428 | /* Start the queue running */ | ||
| 1429 | status = start_queue(drv_data); | ||
| 1430 | if (status != 0) { | ||
| 1431 | dev_err(&pdev->dev, "problem starting queue (%d)\n", status); | ||
| 1432 | return status; | ||
| 1433 | } | ||
| 1434 | |||
| 1435 | return 0; | ||
| 1436 | } | ||
| 1437 | #else | ||
| 1438 | #define pxa2xx_spi_suspend NULL | ||
| 1439 | #define pxa2xx_spi_resume NULL | ||
| 1440 | #endif /* CONFIG_PM */ | ||
| 1441 | |||
| 1442 | static struct platform_driver driver = { | ||
| 1443 | .driver = { | ||
| 1444 | .name = "pxa2xx-spi", | ||
| 1445 | .bus = &platform_bus_type, | ||
| 1446 | .owner = THIS_MODULE, | ||
| 1447 | }, | ||
| 1448 | .probe = pxa2xx_spi_probe, | ||
| 1449 | .remove = __devexit_p(pxa2xx_spi_remove), | ||
| 1450 | .shutdown = pxa2xx_spi_shutdown, | ||
| 1451 | .suspend = pxa2xx_spi_suspend, | ||
| 1452 | .resume = pxa2xx_spi_resume, | ||
| 1453 | }; | ||
| 1454 | |||
| 1455 | static int __init pxa2xx_spi_init(void) | ||
| 1456 | { | ||
| 1457 | platform_driver_register(&driver); | ||
| 1458 | |||
| 1459 | return 0; | ||
| 1460 | } | ||
| 1461 | module_init(pxa2xx_spi_init); | ||
| 1462 | |||
| 1463 | static void __exit pxa2xx_spi_exit(void) | ||
| 1464 | { | ||
| 1465 | platform_driver_unregister(&driver); | ||
| 1466 | } | ||
| 1467 | module_exit(pxa2xx_spi_exit); | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 94f5e8ed83a7..7a3f733051e9 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master); | |||
| 395 | int __init_or_module | 395 | int __init_or_module |
| 396 | spi_register_master(struct spi_master *master) | 396 | spi_register_master(struct spi_master *master) |
| 397 | { | 397 | { |
| 398 | static atomic_t dyn_bus_id = ATOMIC_INIT(0); | 398 | static atomic_t dyn_bus_id = ATOMIC_INIT((1<<16) - 1); |
| 399 | struct device *dev = master->cdev.dev; | 399 | struct device *dev = master->cdev.dev; |
| 400 | int status = -ENODEV; | 400 | int status = -ENODEV; |
| 401 | int dynamic = 0; | 401 | int dynamic = 0; |
| @@ -404,7 +404,7 @@ spi_register_master(struct spi_master *master) | |||
| 404 | return -ENODEV; | 404 | return -ENODEV; |
| 405 | 405 | ||
| 406 | /* convention: dynamically assigned bus IDs count down from the max */ | 406 | /* convention: dynamically assigned bus IDs count down from the max */ |
| 407 | if (master->bus_num == 0) { | 407 | if (master->bus_num < 0) { |
| 408 | master->bus_num = atomic_dec_return(&dyn_bus_id); | 408 | master->bus_num = atomic_dec_return(&dyn_bus_id); |
| 409 | dynamic = 1; | 409 | dynamic = 1; |
| 410 | } | 410 | } |
| @@ -522,7 +522,8 @@ int spi_sync(struct spi_device *spi, struct spi_message *message) | |||
| 522 | } | 522 | } |
| 523 | EXPORT_SYMBOL_GPL(spi_sync); | 523 | EXPORT_SYMBOL_GPL(spi_sync); |
| 524 | 524 | ||
| 525 | #define SPI_BUFSIZ (SMP_CACHE_BYTES) | 525 | /* portable code must never pass more than 32 bytes */ |
| 526 | #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) | ||
| 526 | 527 | ||
| 527 | static u8 *buf; | 528 | static u8 *buf; |
| 528 | 529 | ||
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index f037e5593269..dd2f950b21a7 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c | |||
| @@ -138,6 +138,45 @@ static unsigned bitbang_txrx_32( | |||
| 138 | return t->len - count; | 138 | return t->len - count; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | ||
| 142 | { | ||
| 143 | struct spi_bitbang_cs *cs = spi->controller_state; | ||
| 144 | u8 bits_per_word; | ||
| 145 | u32 hz; | ||
| 146 | |||
| 147 | if (t) { | ||
| 148 | bits_per_word = t->bits_per_word; | ||
| 149 | hz = t->speed_hz; | ||
| 150 | } else { | ||
| 151 | bits_per_word = 0; | ||
| 152 | hz = 0; | ||
| 153 | } | ||
| 154 | |||
| 155 | /* spi_transfer level calls that work per-word */ | ||
| 156 | if (!bits_per_word) | ||
| 157 | bits_per_word = spi->bits_per_word; | ||
| 158 | if (bits_per_word <= 8) | ||
| 159 | cs->txrx_bufs = bitbang_txrx_8; | ||
| 160 | else if (bits_per_word <= 16) | ||
| 161 | cs->txrx_bufs = bitbang_txrx_16; | ||
| 162 | else if (bits_per_word <= 32) | ||
| 163 | cs->txrx_bufs = bitbang_txrx_32; | ||
| 164 | else | ||
| 165 | return -EINVAL; | ||
| 166 | |||
| 167 | /* nsecs = (clock period)/2 */ | ||
| 168 | if (!hz) | ||
| 169 | hz = spi->max_speed_hz; | ||
| 170 | if (hz) { | ||
| 171 | cs->nsecs = (1000000000/2) / hz; | ||
| 172 | if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000)) | ||
| 173 | return -EINVAL; | ||
| 174 | } | ||
| 175 | |||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer); | ||
| 179 | |||
| 141 | /** | 180 | /** |
| 142 | * spi_bitbang_setup - default setup for per-word I/O loops | 181 | * spi_bitbang_setup - default setup for per-word I/O loops |
| 143 | */ | 182 | */ |
| @@ -145,8 +184,16 @@ int spi_bitbang_setup(struct spi_device *spi) | |||
| 145 | { | 184 | { |
| 146 | struct spi_bitbang_cs *cs = spi->controller_state; | 185 | struct spi_bitbang_cs *cs = spi->controller_state; |
| 147 | struct spi_bitbang *bitbang; | 186 | struct spi_bitbang *bitbang; |
| 187 | int retval; | ||
| 148 | 188 | ||
| 149 | if (!spi->max_speed_hz) | 189 | bitbang = spi_master_get_devdata(spi->master); |
| 190 | |||
| 191 | /* REVISIT: some systems will want to support devices using lsb-first | ||
| 192 | * bit encodings on the wire. In pure software that would be trivial, | ||
| 193 | * just bitbang_txrx_le_cphaX() routines shifting the other way, and | ||
| 194 | * some hardware controllers also have this support. | ||
| 195 | */ | ||
| 196 | if ((spi->mode & SPI_LSB_FIRST) != 0) | ||
| 150 | return -EINVAL; | 197 | return -EINVAL; |
| 151 | 198 | ||
| 152 | if (!cs) { | 199 | if (!cs) { |
| @@ -155,32 +202,20 @@ int spi_bitbang_setup(struct spi_device *spi) | |||
| 155 | return -ENOMEM; | 202 | return -ENOMEM; |
| 156 | spi->controller_state = cs; | 203 | spi->controller_state = cs; |
| 157 | } | 204 | } |
| 158 | bitbang = spi_master_get_devdata(spi->master); | ||
| 159 | 205 | ||
| 160 | if (!spi->bits_per_word) | 206 | if (!spi->bits_per_word) |
| 161 | spi->bits_per_word = 8; | 207 | spi->bits_per_word = 8; |
| 162 | 208 | ||
| 163 | /* spi_transfer level calls that work per-word */ | ||
| 164 | if (spi->bits_per_word <= 8) | ||
| 165 | cs->txrx_bufs = bitbang_txrx_8; | ||
| 166 | else if (spi->bits_per_word <= 16) | ||
| 167 | cs->txrx_bufs = bitbang_txrx_16; | ||
| 168 | else if (spi->bits_per_word <= 32) | ||
| 169 | cs->txrx_bufs = bitbang_txrx_32; | ||
| 170 | else | ||
| 171 | return -EINVAL; | ||
| 172 | |||
| 173 | /* per-word shift register access, in hardware or bitbanging */ | 209 | /* per-word shift register access, in hardware or bitbanging */ |
| 174 | cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; | 210 | cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; |
| 175 | if (!cs->txrx_word) | 211 | if (!cs->txrx_word) |
| 176 | return -EINVAL; | 212 | return -EINVAL; |
| 177 | 213 | ||
| 178 | /* nsecs = (clock period)/2 */ | 214 | retval = spi_bitbang_setup_transfer(spi, NULL); |
| 179 | cs->nsecs = (1000000000/2) / (spi->max_speed_hz); | 215 | if (retval < 0) |
| 180 | if (cs->nsecs > MAX_UDELAY_MS * 1000) | 216 | return retval; |
| 181 | return -EINVAL; | ||
| 182 | 217 | ||
| 183 | dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n", | 218 | dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", |
| 184 | __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA), | 219 | __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA), |
| 185 | spi->bits_per_word, 2 * cs->nsecs); | 220 | spi->bits_per_word, 2 * cs->nsecs); |
| 186 | 221 | ||
| @@ -246,6 +281,8 @@ static void bitbang_work(void *_bitbang) | |||
| 246 | unsigned tmp; | 281 | unsigned tmp; |
| 247 | unsigned cs_change; | 282 | unsigned cs_change; |
| 248 | int status; | 283 | int status; |
| 284 | int (*setup_transfer)(struct spi_device *, | ||
| 285 | struct spi_transfer *); | ||
| 249 | 286 | ||
| 250 | m = container_of(bitbang->queue.next, struct spi_message, | 287 | m = container_of(bitbang->queue.next, struct spi_message, |
| 251 | queue); | 288 | queue); |
| @@ -262,6 +299,7 @@ static void bitbang_work(void *_bitbang) | |||
| 262 | tmp = 0; | 299 | tmp = 0; |
| 263 | cs_change = 1; | 300 | cs_change = 1; |
| 264 | status = 0; | 301 | status = 0; |
| 302 | setup_transfer = NULL; | ||
| 265 | 303 | ||
| 266 | list_for_each_entry (t, &m->transfers, transfer_list) { | 304 | list_for_each_entry (t, &m->transfers, transfer_list) { |
| 267 | if (bitbang->shutdown) { | 305 | if (bitbang->shutdown) { |
| @@ -269,6 +307,20 @@ static void bitbang_work(void *_bitbang) | |||
| 269 | break; | 307 | break; |
| 270 | } | 308 | } |
| 271 | 309 | ||
| 310 | /* override or restore speed and wordsize */ | ||
| 311 | if (t->speed_hz || t->bits_per_word) { | ||
| 312 | setup_transfer = bitbang->setup_transfer; | ||
| 313 | if (!setup_transfer) { | ||
| 314 | status = -ENOPROTOOPT; | ||
| 315 | break; | ||
| 316 | } | ||
| 317 | } | ||
| 318 | if (setup_transfer) { | ||
| 319 | status = setup_transfer(spi, t); | ||
| 320 | if (status < 0) | ||
| 321 | break; | ||
| 322 | } | ||
| 323 | |||
| 272 | /* set up default clock polarity, and activate chip; | 324 | /* set up default clock polarity, and activate chip; |
| 273 | * this implicitly updates clock and spi modes as | 325 | * this implicitly updates clock and spi modes as |
| 274 | * previously recorded for this device via setup(). | 326 | * previously recorded for this device via setup(). |
| @@ -325,6 +377,10 @@ static void bitbang_work(void *_bitbang) | |||
| 325 | m->status = status; | 377 | m->status = status; |
| 326 | m->complete(m->context); | 378 | m->complete(m->context); |
| 327 | 379 | ||
| 380 | /* restore speed and wordsize */ | ||
| 381 | if (setup_transfer) | ||
| 382 | setup_transfer(spi, NULL); | ||
| 383 | |||
| 328 | /* normally deactivate chipselect ... unless no error and | 384 | /* normally deactivate chipselect ... unless no error and |
| 329 | * cs_change has hinted that the next message will probably | 385 | * cs_change has hinted that the next message will probably |
| 330 | * be for this chip too. | 386 | * be for this chip too. |
| @@ -348,6 +404,7 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) | |||
| 348 | { | 404 | { |
| 349 | struct spi_bitbang *bitbang; | 405 | struct spi_bitbang *bitbang; |
| 350 | unsigned long flags; | 406 | unsigned long flags; |
| 407 | int status = 0; | ||
| 351 | 408 | ||
| 352 | m->actual_length = 0; | 409 | m->actual_length = 0; |
| 353 | m->status = -EINPROGRESS; | 410 | m->status = -EINPROGRESS; |
| @@ -357,11 +414,15 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) | |||
| 357 | return -ESHUTDOWN; | 414 | return -ESHUTDOWN; |
| 358 | 415 | ||
| 359 | spin_lock_irqsave(&bitbang->lock, flags); | 416 | spin_lock_irqsave(&bitbang->lock, flags); |
| 360 | list_add_tail(&m->queue, &bitbang->queue); | 417 | if (!spi->max_speed_hz) |
| 361 | queue_work(bitbang->workqueue, &bitbang->work); | 418 | status = -ENETDOWN; |
| 419 | else { | ||
| 420 | list_add_tail(&m->queue, &bitbang->queue); | ||
| 421 | queue_work(bitbang->workqueue, &bitbang->work); | ||
| 422 | } | ||
| 362 | spin_unlock_irqrestore(&bitbang->lock, flags); | 423 | spin_unlock_irqrestore(&bitbang->lock, flags); |
| 363 | 424 | ||
| 364 | return 0; | 425 | return status; |
| 365 | } | 426 | } |
| 366 | EXPORT_SYMBOL_GPL(spi_bitbang_transfer); | 427 | EXPORT_SYMBOL_GPL(spi_bitbang_transfer); |
| 367 | 428 | ||
| @@ -406,6 +467,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) | |||
| 406 | bitbang->use_dma = 0; | 467 | bitbang->use_dma = 0; |
| 407 | bitbang->txrx_bufs = spi_bitbang_bufs; | 468 | bitbang->txrx_bufs = spi_bitbang_bufs; |
| 408 | if (!bitbang->master->setup) { | 469 | if (!bitbang->master->setup) { |
| 470 | if (!bitbang->setup_transfer) | ||
| 471 | bitbang->setup_transfer = | ||
| 472 | spi_bitbang_setup_transfer; | ||
| 409 | bitbang->master->setup = spi_bitbang_setup; | 473 | bitbang->master->setup = spi_bitbang_setup; |
| 410 | bitbang->master->cleanup = spi_bitbang_cleanup; | 474 | bitbang->master->cleanup = spi_bitbang_cleanup; |
| 411 | } | 475 | } |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 334b1db1bd7c..27597c576eff 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
| @@ -29,12 +29,15 @@ static ssize_t backlight_show_power(struct class_device *cdev, char *buf) | |||
| 29 | 29 | ||
| 30 | static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count) | 30 | static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count) |
| 31 | { | 31 | { |
| 32 | int rc = -ENXIO, power; | 32 | int rc = -ENXIO; |
| 33 | char *endp; | 33 | char *endp; |
| 34 | struct backlight_device *bd = to_backlight_device(cdev); | 34 | struct backlight_device *bd = to_backlight_device(cdev); |
| 35 | int power = simple_strtoul(buf, &endp, 0); | ||
| 36 | size_t size = endp - buf; | ||
| 35 | 37 | ||
| 36 | power = simple_strtoul(buf, &endp, 0); | 38 | if (*endp && isspace(*endp)) |
| 37 | if (*endp && !isspace(*endp)) | 39 | size++; |
| 40 | if (size != count) | ||
| 38 | return -EINVAL; | 41 | return -EINVAL; |
| 39 | 42 | ||
| 40 | down(&bd->sem); | 43 | down(&bd->sem); |
| @@ -65,12 +68,15 @@ static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf) | |||
| 65 | 68 | ||
| 66 | static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count) | 69 | static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count) |
| 67 | { | 70 | { |
| 68 | int rc = -ENXIO, brightness; | 71 | int rc = -ENXIO; |
| 69 | char *endp; | 72 | char *endp; |
| 70 | struct backlight_device *bd = to_backlight_device(cdev); | 73 | struct backlight_device *bd = to_backlight_device(cdev); |
| 74 | int brightness = simple_strtoul(buf, &endp, 0); | ||
| 75 | size_t size = endp - buf; | ||
| 71 | 76 | ||
| 72 | brightness = simple_strtoul(buf, &endp, 0); | 77 | if (*endp && isspace(*endp)) |
| 73 | if (*endp && !isspace(*endp)) | 78 | size++; |
| 79 | if (size != count) | ||
| 74 | return -EINVAL; | 80 | return -EINVAL; |
| 75 | 81 | ||
| 76 | down(&bd->sem); | 82 | down(&bd->sem); |
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 86908a60c630..bc8ab005a3fb 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c | |||
| @@ -31,12 +31,15 @@ static ssize_t lcd_show_power(struct class_device *cdev, char *buf) | |||
| 31 | 31 | ||
| 32 | static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count) | 32 | static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count) |
| 33 | { | 33 | { |
| 34 | int rc, power; | 34 | int rc = -ENXIO; |
| 35 | char *endp; | 35 | char *endp; |
| 36 | struct lcd_device *ld = to_lcd_device(cdev); | 36 | struct lcd_device *ld = to_lcd_device(cdev); |
| 37 | int power = simple_strtoul(buf, &endp, 0); | ||
| 38 | size_t size = endp - buf; | ||
| 37 | 39 | ||
| 38 | power = simple_strtoul(buf, &endp, 0); | 40 | if (*endp && isspace(*endp)) |
| 39 | if (*endp && !isspace(*endp)) | 41 | size++; |
| 42 | if (size != count) | ||
| 40 | return -EINVAL; | 43 | return -EINVAL; |
| 41 | 44 | ||
| 42 | down(&ld->sem); | 45 | down(&ld->sem); |
| @@ -44,8 +47,7 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_ | |||
| 44 | pr_debug("lcd: set power to %d\n", power); | 47 | pr_debug("lcd: set power to %d\n", power); |
| 45 | ld->props->set_power(ld, power); | 48 | ld->props->set_power(ld, power); |
| 46 | rc = count; | 49 | rc = count; |
| 47 | } else | 50 | } |
| 48 | rc = -ENXIO; | ||
| 49 | up(&ld->sem); | 51 | up(&ld->sem); |
| 50 | 52 | ||
| 51 | return rc; | 53 | return rc; |
| @@ -53,14 +55,12 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_ | |||
| 53 | 55 | ||
| 54 | static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf) | 56 | static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf) |
| 55 | { | 57 | { |
| 56 | int rc; | 58 | int rc = -ENXIO; |
| 57 | struct lcd_device *ld = to_lcd_device(cdev); | 59 | struct lcd_device *ld = to_lcd_device(cdev); |
| 58 | 60 | ||
| 59 | down(&ld->sem); | 61 | down(&ld->sem); |
| 60 | if (likely(ld->props && ld->props->get_contrast)) | 62 | if (likely(ld->props && ld->props->get_contrast)) |
| 61 | rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld)); | 63 | rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld)); |
| 62 | else | ||
| 63 | rc = -ENXIO; | ||
| 64 | up(&ld->sem); | 64 | up(&ld->sem); |
| 65 | 65 | ||
| 66 | return rc; | 66 | return rc; |
| @@ -68,12 +68,15 @@ static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf) | |||
| 68 | 68 | ||
| 69 | static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count) | 69 | static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count) |
| 70 | { | 70 | { |
| 71 | int rc, contrast; | 71 | int rc = -ENXIO; |
| 72 | char *endp; | 72 | char *endp; |
| 73 | struct lcd_device *ld = to_lcd_device(cdev); | 73 | struct lcd_device *ld = to_lcd_device(cdev); |
| 74 | int contrast = simple_strtoul(buf, &endp, 0); | ||
| 75 | size_t size = endp - buf; | ||
| 74 | 76 | ||
| 75 | contrast = simple_strtoul(buf, &endp, 0); | 77 | if (*endp && isspace(*endp)) |
| 76 | if (*endp && !isspace(*endp)) | 78 | size++; |
| 79 | if (size != count) | ||
| 77 | return -EINVAL; | 80 | return -EINVAL; |
| 78 | 81 | ||
| 79 | down(&ld->sem); | 82 | down(&ld->sem); |
| @@ -81,8 +84,7 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si | |||
| 81 | pr_debug("lcd: set contrast to %d\n", contrast); | 84 | pr_debug("lcd: set contrast to %d\n", contrast); |
| 82 | ld->props->set_contrast(ld, contrast); | 85 | ld->props->set_contrast(ld, contrast); |
| 83 | rc = count; | 86 | rc = count; |
| 84 | } else | 87 | } |
| 85 | rc = -ENXIO; | ||
| 86 | up(&ld->sem); | 88 | up(&ld->sem); |
| 87 | 89 | ||
| 88 | return rc; | 90 | return rc; |
| @@ -90,14 +92,12 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si | |||
| 90 | 92 | ||
| 91 | static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf) | 93 | static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf) |
| 92 | { | 94 | { |
| 93 | int rc; | 95 | int rc = -ENXIO; |
| 94 | struct lcd_device *ld = to_lcd_device(cdev); | 96 | struct lcd_device *ld = to_lcd_device(cdev); |
| 95 | 97 | ||
| 96 | down(&ld->sem); | 98 | down(&ld->sem); |
| 97 | if (likely(ld->props)) | 99 | if (likely(ld->props)) |
| 98 | rc = sprintf(buf, "%d\n", ld->props->max_contrast); | 100 | rc = sprintf(buf, "%d\n", ld->props->max_contrast); |
| 99 | else | ||
| 100 | rc = -ENXIO; | ||
| 101 | up(&ld->sem); | 101 | up(&ld->sem); |
| 102 | 102 | ||
| 103 | return rc; | 103 | return rc; |
