diff options
| -rw-r--r-- | MAINTAINERS | 6 | ||||
| -rw-r--r-- | drivers/block/DAC960.c | 1 | ||||
| -rw-r--r-- | drivers/block/Kconfig | 23 | ||||
| -rw-r--r-- | drivers/block/Makefile | 3 | ||||
| -rw-r--r-- | drivers/block/mtip32xx/Kconfig | 2 | ||||
| -rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 431 | ||||
| -rw-r--r-- | drivers/block/mtip32xx/mtip32xx.h | 48 | ||||
| -rw-r--r-- | drivers/block/rsxx/Makefile | 2 | ||||
| -rw-r--r-- | drivers/block/rsxx/config.c | 213 | ||||
| -rw-r--r-- | drivers/block/rsxx/core.c | 649 | ||||
| -rw-r--r-- | drivers/block/rsxx/cregs.c | 758 | ||||
| -rw-r--r-- | drivers/block/rsxx/dev.c | 367 | ||||
| -rw-r--r-- | drivers/block/rsxx/dma.c | 998 | ||||
| -rw-r--r-- | drivers/block/rsxx/rsxx.h | 45 | ||||
| -rw-r--r-- | drivers/block/rsxx/rsxx_cfg.h | 72 | ||||
| -rw-r--r-- | drivers/block/rsxx/rsxx_priv.h | 399 | ||||
| -rw-r--r-- | drivers/block/xd.c | 1123 | ||||
| -rw-r--r-- | drivers/block/xd.h | 134 |
18 files changed, 3919 insertions, 1355 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 35a56bcd5e75..34774f43a652 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -6312,6 +6312,12 @@ S: Maintained | |||
| 6312 | F: Documentation/blockdev/ramdisk.txt | 6312 | F: Documentation/blockdev/ramdisk.txt |
| 6313 | F: drivers/block/brd.c | 6313 | F: drivers/block/brd.c |
| 6314 | 6314 | ||
| 6315 | RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card) | ||
| 6316 | M: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6317 | M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 6318 | S: Maintained | ||
| 6319 | F: drivers/block/rsxx/ | ||
| 6320 | |||
| 6315 | RANDOM NUMBER DRIVER | 6321 | RANDOM NUMBER DRIVER |
| 6316 | M: Theodore Ts'o" <tytso@mit.edu> | 6322 | M: Theodore Ts'o" <tytso@mit.edu> |
| 6317 | S: Maintained | 6323 | S: Maintained |
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 9a13e889837e..0d3ffc51a7bd 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
| @@ -7054,6 +7054,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, | |||
| 7054 | else | 7054 | else |
| 7055 | ErrorCode = 0; | 7055 | ErrorCode = 0; |
| 7056 | } | 7056 | } |
| 7057 | break; | ||
| 7057 | default: | 7058 | default: |
| 7058 | ErrorCode = -ENOTTY; | 7059 | ErrorCode = -ENOTTY; |
| 7059 | } | 7060 | } |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 824e09c4d0d7..5dc0daed8fac 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
| @@ -63,19 +63,6 @@ config AMIGA_Z2RAM | |||
| 63 | To compile this driver as a module, choose M here: the | 63 | To compile this driver as a module, choose M here: the |
| 64 | module will be called z2ram. | 64 | module will be called z2ram. |
| 65 | 65 | ||
| 66 | config BLK_DEV_XD | ||
| 67 | tristate "XT hard disk support" | ||
| 68 | depends on ISA && ISA_DMA_API | ||
| 69 | select CHECK_SIGNATURE | ||
| 70 | help | ||
| 71 | Very old 8 bit hard disk controllers used in the IBM XT computer | ||
| 72 | will be supported if you say Y here. | ||
| 73 | |||
| 74 | To compile this driver as a module, choose M here: the | ||
| 75 | module will be called xd. | ||
| 76 | |||
| 77 | It's pretty unlikely that you have one of these: say N. | ||
| 78 | |||
| 79 | config GDROM | 66 | config GDROM |
| 80 | tristate "SEGA Dreamcast GD-ROM drive" | 67 | tristate "SEGA Dreamcast GD-ROM drive" |
| 81 | depends on SH_DREAMCAST | 68 | depends on SH_DREAMCAST |
| @@ -544,4 +531,14 @@ config BLK_DEV_RBD | |||
| 544 | 531 | ||
| 545 | If unsure, say N. | 532 | If unsure, say N. |
| 546 | 533 | ||
| 534 | config BLK_DEV_RSXX | ||
| 535 | tristate "RamSam PCIe Flash SSD Device Driver" | ||
| 536 | depends on PCI | ||
| 537 | help | ||
| 538 | Device driver for IBM's high speed PCIe SSD | ||
| 539 | storage devices: RamSan-70 and RamSan-80. | ||
| 540 | |||
| 541 | To compile this driver as a module, choose M here: the | ||
| 542 | module will be called rsxx. | ||
| 543 | |||
| 547 | endif # BLK_DEV | 544 | endif # BLK_DEV |
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 17e82df3df74..a3b40232c6ab 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
| @@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o | |||
| 15 | obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o | 15 | obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o |
| 16 | obj-$(CONFIG_BLK_DEV_RAM) += brd.o | 16 | obj-$(CONFIG_BLK_DEV_RAM) += brd.o |
| 17 | obj-$(CONFIG_BLK_DEV_LOOP) += loop.o | 17 | obj-$(CONFIG_BLK_DEV_LOOP) += loop.o |
| 18 | obj-$(CONFIG_BLK_DEV_XD) += xd.o | ||
| 19 | obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o | 18 | obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o |
| 20 | obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o | 19 | obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o |
| 21 | obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o | 20 | obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o |
| @@ -41,4 +40,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ | |||
| 41 | obj-$(CONFIG_BLK_DEV_RBD) += rbd.o | 40 | obj-$(CONFIG_BLK_DEV_RBD) += rbd.o |
| 42 | obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ | 41 | obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ |
| 43 | 42 | ||
| 43 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ | ||
| 44 | |||
| 44 | swim_mod-y := swim.o swim_asm.o | 45 | swim_mod-y := swim.o swim_asm.o |
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig index 0ba837fc62a8..1fca1f996b45 100644 --- a/drivers/block/mtip32xx/Kconfig +++ b/drivers/block/mtip32xx/Kconfig | |||
| @@ -4,6 +4,6 @@ | |||
| 4 | 4 | ||
| 5 | config BLK_DEV_PCIESSD_MTIP32XX | 5 | config BLK_DEV_PCIESSD_MTIP32XX |
| 6 | tristate "Block Device Driver for Micron PCIe SSDs" | 6 | tristate "Block Device Driver for Micron PCIe SSDs" |
| 7 | depends on PCI | 7 | depends on PCI && GENERIC_HARDIRQS |
| 8 | help | 8 | help |
| 9 | This enables the block driver for Micron PCIe SSDs. | 9 | This enables the block driver for Micron PCIe SSDs. |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 3fd100990453..11cc9522cdd4 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
| @@ -88,6 +88,8 @@ static int instance; | |||
| 88 | static int mtip_major; | 88 | static int mtip_major; |
| 89 | static struct dentry *dfs_parent; | 89 | static struct dentry *dfs_parent; |
| 90 | 90 | ||
| 91 | static u32 cpu_use[NR_CPUS]; | ||
| 92 | |||
| 91 | static DEFINE_SPINLOCK(rssd_index_lock); | 93 | static DEFINE_SPINLOCK(rssd_index_lock); |
| 92 | static DEFINE_IDA(rssd_index_ida); | 94 | static DEFINE_IDA(rssd_index_ida); |
| 93 | 95 | ||
| @@ -296,16 +298,17 @@ static int hba_reset_nosleep(struct driver_data *dd) | |||
| 296 | */ | 298 | */ |
| 297 | static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) | 299 | static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) |
| 298 | { | 300 | { |
| 299 | atomic_set(&port->commands[tag].active, 1); | 301 | int group = tag >> 5; |
| 300 | 302 | ||
| 301 | spin_lock(&port->cmd_issue_lock); | 303 | atomic_set(&port->commands[tag].active, 1); |
| 302 | 304 | ||
| 305 | /* guard SACT and CI registers */ | ||
| 306 | spin_lock(&port->cmd_issue_lock[group]); | ||
| 303 | writel((1 << MTIP_TAG_BIT(tag)), | 307 | writel((1 << MTIP_TAG_BIT(tag)), |
| 304 | port->s_active[MTIP_TAG_INDEX(tag)]); | 308 | port->s_active[MTIP_TAG_INDEX(tag)]); |
| 305 | writel((1 << MTIP_TAG_BIT(tag)), | 309 | writel((1 << MTIP_TAG_BIT(tag)), |
| 306 | port->cmd_issue[MTIP_TAG_INDEX(tag)]); | 310 | port->cmd_issue[MTIP_TAG_INDEX(tag)]); |
| 307 | 311 | spin_unlock(&port->cmd_issue_lock[group]); | |
| 308 | spin_unlock(&port->cmd_issue_lock); | ||
| 309 | 312 | ||
| 310 | /* Set the command's timeout value.*/ | 313 | /* Set the command's timeout value.*/ |
| 311 | port->commands[tag].comp_time = jiffies + msecs_to_jiffies( | 314 | port->commands[tag].comp_time = jiffies + msecs_to_jiffies( |
| @@ -964,56 +967,56 @@ handle_tfe_exit: | |||
| 964 | /* | 967 | /* |
| 965 | * Handle a set device bits interrupt | 968 | * Handle a set device bits interrupt |
| 966 | */ | 969 | */ |
| 967 | static inline void mtip_process_sdbf(struct driver_data *dd) | 970 | static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, |
| 971 | u32 completed) | ||
| 968 | { | 972 | { |
| 969 | struct mtip_port *port = dd->port; | 973 | struct driver_data *dd = port->dd; |
| 970 | int group, tag, bit; | 974 | int tag, bit; |
| 971 | u32 completed; | ||
| 972 | struct mtip_cmd *command; | 975 | struct mtip_cmd *command; |
| 973 | 976 | ||
| 974 | /* walk all bits in all slot groups */ | 977 | if (!completed) { |
| 975 | for (group = 0; group < dd->slot_groups; group++) { | 978 | WARN_ON_ONCE(!completed); |
| 976 | completed = readl(port->completed[group]); | 979 | return; |
| 977 | if (!completed) | 980 | } |
| 978 | continue; | 981 | /* clear completed status register in the hardware.*/ |
| 982 | writel(completed, port->completed[group]); | ||
| 979 | 983 | ||
| 980 | /* clear completed status register in the hardware.*/ | 984 | /* Process completed commands. */ |
| 981 | writel(completed, port->completed[group]); | 985 | for (bit = 0; (bit < 32) && completed; bit++) { |
| 986 | if (completed & 0x01) { | ||
| 987 | tag = (group << 5) | bit; | ||
| 982 | 988 | ||
| 983 | /* Process completed commands. */ | 989 | /* skip internal command slot. */ |
| 984 | for (bit = 0; | 990 | if (unlikely(tag == MTIP_TAG_INTERNAL)) |
| 985 | (bit < 32) && completed; | 991 | continue; |
| 986 | bit++, completed >>= 1) { | ||
| 987 | if (completed & 0x01) { | ||
| 988 | tag = (group << 5) | bit; | ||
| 989 | 992 | ||
| 990 | /* skip internal command slot. */ | 993 | command = &port->commands[tag]; |
| 991 | if (unlikely(tag == MTIP_TAG_INTERNAL)) | 994 | /* make internal callback */ |
| 992 | continue; | 995 | if (likely(command->comp_func)) { |
| 996 | command->comp_func( | ||
| 997 | port, | ||
| 998 | tag, | ||
| 999 | command->comp_data, | ||
| 1000 | 0); | ||
| 1001 | } else { | ||
| 1002 | dev_warn(&dd->pdev->dev, | ||
| 1003 | "Null completion " | ||
| 1004 | "for tag %d", | ||
| 1005 | tag); | ||
| 993 | 1006 | ||
| 994 | command = &port->commands[tag]; | 1007 | if (mtip_check_surprise_removal( |
| 995 | /* make internal callback */ | 1008 | dd->pdev)) { |
| 996 | if (likely(command->comp_func)) { | 1009 | mtip_command_cleanup(dd); |
| 997 | command->comp_func( | 1010 | return; |
| 998 | port, | ||
| 999 | tag, | ||
| 1000 | command->comp_data, | ||
| 1001 | 0); | ||
| 1002 | } else { | ||
| 1003 | dev_warn(&dd->pdev->dev, | ||
| 1004 | "Null completion " | ||
| 1005 | "for tag %d", | ||
| 1006 | tag); | ||
| 1007 | |||
| 1008 | if (mtip_check_surprise_removal( | ||
| 1009 | dd->pdev)) { | ||
| 1010 | mtip_command_cleanup(dd); | ||
| 1011 | return; | ||
| 1012 | } | ||
| 1013 | } | 1011 | } |
| 1014 | } | 1012 | } |
| 1015 | } | 1013 | } |
| 1014 | completed >>= 1; | ||
| 1016 | } | 1015 | } |
| 1016 | |||
| 1017 | /* If last, re-enable interrupts */ | ||
| 1018 | if (atomic_dec_return(&dd->irq_workers_active) == 0) | ||
| 1019 | writel(0xffffffff, dd->mmio + HOST_IRQ_STAT); | ||
| 1017 | } | 1020 | } |
| 1018 | 1021 | ||
| 1019 | /* | 1022 | /* |
| @@ -1072,6 +1075,8 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data) | |||
| 1072 | struct mtip_port *port = dd->port; | 1075 | struct mtip_port *port = dd->port; |
| 1073 | u32 hba_stat, port_stat; | 1076 | u32 hba_stat, port_stat; |
| 1074 | int rv = IRQ_NONE; | 1077 | int rv = IRQ_NONE; |
| 1078 | int do_irq_enable = 1, i, workers; | ||
| 1079 | struct mtip_work *twork; | ||
| 1075 | 1080 | ||
| 1076 | hba_stat = readl(dd->mmio + HOST_IRQ_STAT); | 1081 | hba_stat = readl(dd->mmio + HOST_IRQ_STAT); |
| 1077 | if (hba_stat) { | 1082 | if (hba_stat) { |
| @@ -1082,8 +1087,42 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data) | |||
| 1082 | writel(port_stat, port->mmio + PORT_IRQ_STAT); | 1087 | writel(port_stat, port->mmio + PORT_IRQ_STAT); |
| 1083 | 1088 | ||
| 1084 | /* Demux port status */ | 1089 | /* Demux port status */ |
| 1085 | if (likely(port_stat & PORT_IRQ_SDB_FIS)) | 1090 | if (likely(port_stat & PORT_IRQ_SDB_FIS)) { |
| 1086 | mtip_process_sdbf(dd); | 1091 | do_irq_enable = 0; |
| 1092 | WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0); | ||
| 1093 | |||
| 1094 | /* Start at 1: group zero is always local? */ | ||
| 1095 | for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; | ||
| 1096 | i++) { | ||
| 1097 | twork = &dd->work[i]; | ||
| 1098 | twork->completed = readl(port->completed[i]); | ||
| 1099 | if (twork->completed) | ||
| 1100 | workers++; | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | atomic_set(&dd->irq_workers_active, workers); | ||
| 1104 | if (workers) { | ||
| 1105 | for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) { | ||
| 1106 | twork = &dd->work[i]; | ||
| 1107 | if (twork->completed) | ||
| 1108 | queue_work_on( | ||
| 1109 | twork->cpu_binding, | ||
| 1110 | dd->isr_workq, | ||
| 1111 | &twork->work); | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | if (likely(dd->work[0].completed)) | ||
| 1115 | mtip_workq_sdbfx(port, 0, | ||
| 1116 | dd->work[0].completed); | ||
| 1117 | |||
| 1118 | } else { | ||
| 1119 | /* | ||
| 1120 | * Chip quirk: SDB interrupt but nothing | ||
| 1121 | * to complete | ||
| 1122 | */ | ||
| 1123 | do_irq_enable = 1; | ||
| 1124 | } | ||
| 1125 | } | ||
| 1087 | 1126 | ||
| 1088 | if (unlikely(port_stat & PORT_IRQ_ERR)) { | 1127 | if (unlikely(port_stat & PORT_IRQ_ERR)) { |
| 1089 | if (unlikely(mtip_check_surprise_removal(dd->pdev))) { | 1128 | if (unlikely(mtip_check_surprise_removal(dd->pdev))) { |
| @@ -1103,21 +1142,13 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data) | |||
| 1103 | } | 1142 | } |
| 1104 | 1143 | ||
| 1105 | /* acknowledge interrupt */ | 1144 | /* acknowledge interrupt */ |
| 1106 | writel(hba_stat, dd->mmio + HOST_IRQ_STAT); | 1145 | if (unlikely(do_irq_enable)) |
| 1146 | writel(hba_stat, dd->mmio + HOST_IRQ_STAT); | ||
| 1107 | 1147 | ||
| 1108 | return rv; | 1148 | return rv; |
| 1109 | } | 1149 | } |
| 1110 | 1150 | ||
| 1111 | /* | 1151 | /* |
| 1112 | * Wrapper for mtip_handle_irq | ||
| 1113 | * (ignores return code) | ||
| 1114 | */ | ||
| 1115 | static void mtip_tasklet(unsigned long data) | ||
| 1116 | { | ||
| 1117 | mtip_handle_irq((struct driver_data *) data); | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | /* | ||
| 1121 | * HBA interrupt subroutine. | 1152 | * HBA interrupt subroutine. |
| 1122 | * | 1153 | * |
| 1123 | * @irq IRQ number. | 1154 | * @irq IRQ number. |
| @@ -1130,8 +1161,8 @@ static void mtip_tasklet(unsigned long data) | |||
| 1130 | static irqreturn_t mtip_irq_handler(int irq, void *instance) | 1161 | static irqreturn_t mtip_irq_handler(int irq, void *instance) |
| 1131 | { | 1162 | { |
| 1132 | struct driver_data *dd = instance; | 1163 | struct driver_data *dd = instance; |
| 1133 | tasklet_schedule(&dd->tasklet); | 1164 | |
| 1134 | return IRQ_HANDLED; | 1165 | return mtip_handle_irq(dd); |
| 1135 | } | 1166 | } |
| 1136 | 1167 | ||
| 1137 | static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) | 1168 | static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) |
| @@ -1489,6 +1520,12 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer) | |||
| 1489 | } | 1520 | } |
| 1490 | #endif | 1521 | #endif |
| 1491 | 1522 | ||
| 1523 | /* Demux ID.DRAT & ID.RZAT to determine trim support */ | ||
| 1524 | if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5)) | ||
| 1525 | port->dd->trim_supp = true; | ||
| 1526 | else | ||
| 1527 | port->dd->trim_supp = false; | ||
| 1528 | |||
| 1492 | /* Set the identify buffer as valid. */ | 1529 | /* Set the identify buffer as valid. */ |
| 1493 | port->identify_valid = 1; | 1530 | port->identify_valid = 1; |
| 1494 | 1531 | ||
| @@ -1676,6 +1713,81 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, | |||
| 1676 | } | 1713 | } |
| 1677 | 1714 | ||
| 1678 | /* | 1715 | /* |
| 1716 | * Trim unused sectors | ||
| 1717 | * | ||
| 1718 | * @dd pointer to driver_data structure | ||
| 1719 | * @lba starting lba | ||
| 1720 | * @len # of 512b sectors to trim | ||
| 1721 | * | ||
| 1722 | * return value | ||
| 1723 | * -ENOMEM Out of dma memory | ||
| 1724 | * -EINVAL Invalid parameters passed in, trim not supported | ||
| 1725 | * -EIO Error submitting trim request to hw | ||
| 1726 | */ | ||
| 1727 | static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) | ||
| 1728 | { | ||
| 1729 | int i, rv = 0; | ||
| 1730 | u64 tlba, tlen, sect_left; | ||
| 1731 | struct mtip_trim_entry *buf; | ||
| 1732 | dma_addr_t dma_addr; | ||
| 1733 | struct host_to_dev_fis fis; | ||
| 1734 | |||
| 1735 | if (!len || dd->trim_supp == false) | ||
| 1736 | return -EINVAL; | ||
| 1737 | |||
| 1738 | /* Trim request too big */ | ||
| 1739 | WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES)); | ||
| 1740 | |||
| 1741 | /* Trim request not aligned on 4k boundary */ | ||
| 1742 | WARN_ON(len % 8 != 0); | ||
| 1743 | |||
| 1744 | /* Warn if vu_trim structure is too big */ | ||
| 1745 | WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE); | ||
| 1746 | |||
| 1747 | /* Allocate a DMA buffer for the trim structure */ | ||
| 1748 | buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, | ||
| 1749 | GFP_KERNEL); | ||
| 1750 | if (!buf) | ||
| 1751 | return -ENOMEM; | ||
| 1752 | memset(buf, 0, ATA_SECT_SIZE); | ||
| 1753 | |||
| 1754 | for (i = 0, sect_left = len, tlba = lba; | ||
| 1755 | i < MTIP_MAX_TRIM_ENTRIES && sect_left; | ||
| 1756 | i++) { | ||
| 1757 | tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ? | ||
| 1758 | MTIP_MAX_TRIM_ENTRY_LEN : | ||
| 1759 | sect_left); | ||
| 1760 | buf[i].lba = __force_bit2int cpu_to_le32(tlba); | ||
| 1761 | buf[i].range = __force_bit2int cpu_to_le16(tlen); | ||
| 1762 | tlba += tlen; | ||
| 1763 | sect_left -= tlen; | ||
| 1764 | } | ||
| 1765 | WARN_ON(sect_left != 0); | ||
| 1766 | |||
| 1767 | /* Build the fis */ | ||
| 1768 | memset(&fis, 0, sizeof(struct host_to_dev_fis)); | ||
| 1769 | fis.type = 0x27; | ||
| 1770 | fis.opts = 1 << 7; | ||
| 1771 | fis.command = 0xfb; | ||
| 1772 | fis.features = 0x60; | ||
| 1773 | fis.sect_count = 1; | ||
| 1774 | fis.device = ATA_DEVICE_OBS; | ||
| 1775 | |||
| 1776 | if (mtip_exec_internal_command(dd->port, | ||
| 1777 | &fis, | ||
| 1778 | 5, | ||
| 1779 | dma_addr, | ||
| 1780 | ATA_SECT_SIZE, | ||
| 1781 | 0, | ||
| 1782 | GFP_KERNEL, | ||
| 1783 | MTIP_TRIM_TIMEOUT_MS) < 0) | ||
| 1784 | rv = -EIO; | ||
| 1785 | |||
| 1786 | dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); | ||
| 1787 | return rv; | ||
| 1788 | } | ||
| 1789 | |||
| 1790 | /* | ||
| 1679 | * Get the drive capacity. | 1791 | * Get the drive capacity. |
| 1680 | * | 1792 | * |
| 1681 | * @dd Pointer to the device data structure. | 1793 | * @dd Pointer to the device data structure. |
| @@ -3005,20 +3117,24 @@ static int mtip_hw_init(struct driver_data *dd) | |||
| 3005 | 3117 | ||
| 3006 | hba_setup(dd); | 3118 | hba_setup(dd); |
| 3007 | 3119 | ||
| 3008 | tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd); | 3120 | dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL, |
| 3009 | 3121 | dd->numa_node); | |
| 3010 | dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL); | ||
| 3011 | if (!dd->port) { | 3122 | if (!dd->port) { |
| 3012 | dev_err(&dd->pdev->dev, | 3123 | dev_err(&dd->pdev->dev, |
| 3013 | "Memory allocation: port structure\n"); | 3124 | "Memory allocation: port structure\n"); |
| 3014 | return -ENOMEM; | 3125 | return -ENOMEM; |
| 3015 | } | 3126 | } |
| 3016 | 3127 | ||
| 3128 | /* Continue workqueue setup */ | ||
| 3129 | for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) | ||
| 3130 | dd->work[i].port = dd->port; | ||
| 3131 | |||
| 3017 | /* Counting semaphore to track command slot usage */ | 3132 | /* Counting semaphore to track command slot usage */ |
| 3018 | sema_init(&dd->port->cmd_slot, num_command_slots - 1); | 3133 | sema_init(&dd->port->cmd_slot, num_command_slots - 1); |
| 3019 | 3134 | ||
| 3020 | /* Spinlock to prevent concurrent issue */ | 3135 | /* Spinlock to prevent concurrent issue */ |
| 3021 | spin_lock_init(&dd->port->cmd_issue_lock); | 3136 | for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) |
| 3137 | spin_lock_init(&dd->port->cmd_issue_lock[i]); | ||
| 3022 | 3138 | ||
| 3023 | /* Set the port mmio base address. */ | 3139 | /* Set the port mmio base address. */ |
| 3024 | dd->port->mmio = dd->mmio + PORT_OFFSET; | 3140 | dd->port->mmio = dd->mmio + PORT_OFFSET; |
| @@ -3165,6 +3281,7 @@ static int mtip_hw_init(struct driver_data *dd) | |||
| 3165 | "Unable to allocate IRQ %d\n", dd->pdev->irq); | 3281 | "Unable to allocate IRQ %d\n", dd->pdev->irq); |
| 3166 | goto out2; | 3282 | goto out2; |
| 3167 | } | 3283 | } |
| 3284 | irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding)); | ||
| 3168 | 3285 | ||
| 3169 | /* Enable interrupts on the HBA. */ | 3286 | /* Enable interrupts on the HBA. */ |
| 3170 | writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, | 3287 | writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, |
| @@ -3241,7 +3358,8 @@ out3: | |||
| 3241 | writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, | 3358 | writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, |
| 3242 | dd->mmio + HOST_CTL); | 3359 | dd->mmio + HOST_CTL); |
| 3243 | 3360 | ||
| 3244 | /*Release the IRQ. */ | 3361 | /* Release the IRQ. */ |
| 3362 | irq_set_affinity_hint(dd->pdev->irq, NULL); | ||
| 3245 | devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); | 3363 | devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); |
| 3246 | 3364 | ||
| 3247 | out2: | 3365 | out2: |
| @@ -3291,11 +3409,9 @@ static int mtip_hw_exit(struct driver_data *dd) | |||
| 3291 | del_timer_sync(&dd->port->cmd_timer); | 3409 | del_timer_sync(&dd->port->cmd_timer); |
| 3292 | 3410 | ||
| 3293 | /* Release the IRQ. */ | 3411 | /* Release the IRQ. */ |
| 3412 | irq_set_affinity_hint(dd->pdev->irq, NULL); | ||
| 3294 | devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); | 3413 | devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); |
| 3295 | 3414 | ||
| 3296 | /* Stop the bottom half tasklet. */ | ||
| 3297 | tasklet_kill(&dd->tasklet); | ||
| 3298 | |||
| 3299 | /* Free the command/command header memory. */ | 3415 | /* Free the command/command header memory. */ |
| 3300 | dmam_free_coherent(&dd->pdev->dev, | 3416 | dmam_free_coherent(&dd->pdev->dev, |
| 3301 | HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4), | 3417 | HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4), |
| @@ -3641,6 +3757,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) | |||
| 3641 | } | 3757 | } |
| 3642 | } | 3758 | } |
| 3643 | 3759 | ||
| 3760 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { | ||
| 3761 | bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, | ||
| 3762 | bio_sectors(bio))); | ||
| 3763 | return; | ||
| 3764 | } | ||
| 3765 | |||
| 3644 | if (unlikely(!bio_has_data(bio))) { | 3766 | if (unlikely(!bio_has_data(bio))) { |
| 3645 | blk_queue_flush(queue, 0); | 3767 | blk_queue_flush(queue, 0); |
| 3646 | bio_endio(bio, 0); | 3768 | bio_endio(bio, 0); |
| @@ -3711,7 +3833,7 @@ static int mtip_block_initialize(struct driver_data *dd) | |||
| 3711 | goto protocol_init_error; | 3833 | goto protocol_init_error; |
| 3712 | } | 3834 | } |
| 3713 | 3835 | ||
| 3714 | dd->disk = alloc_disk(MTIP_MAX_MINORS); | 3836 | dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node); |
| 3715 | if (dd->disk == NULL) { | 3837 | if (dd->disk == NULL) { |
| 3716 | dev_err(&dd->pdev->dev, | 3838 | dev_err(&dd->pdev->dev, |
| 3717 | "Unable to allocate gendisk structure\n"); | 3839 | "Unable to allocate gendisk structure\n"); |
| @@ -3755,7 +3877,7 @@ static int mtip_block_initialize(struct driver_data *dd) | |||
| 3755 | 3877 | ||
| 3756 | skip_create_disk: | 3878 | skip_create_disk: |
| 3757 | /* Allocate the request queue. */ | 3879 | /* Allocate the request queue. */ |
| 3758 | dd->queue = blk_alloc_queue(GFP_KERNEL); | 3880 | dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node); |
| 3759 | if (dd->queue == NULL) { | 3881 | if (dd->queue == NULL) { |
| 3760 | dev_err(&dd->pdev->dev, | 3882 | dev_err(&dd->pdev->dev, |
| 3761 | "Unable to allocate request queue\n"); | 3883 | "Unable to allocate request queue\n"); |
| @@ -3783,6 +3905,15 @@ skip_create_disk: | |||
| 3783 | */ | 3905 | */ |
| 3784 | blk_queue_flush(dd->queue, 0); | 3906 | blk_queue_flush(dd->queue, 0); |
| 3785 | 3907 | ||
| 3908 | /* Signal trim support */ | ||
| 3909 | if (dd->trim_supp == true) { | ||
| 3910 | set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags); | ||
| 3911 | dd->queue->limits.discard_granularity = 4096; | ||
| 3912 | blk_queue_max_discard_sectors(dd->queue, | ||
| 3913 | MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); | ||
| 3914 | dd->queue->limits.discard_zeroes_data = 0; | ||
| 3915 | } | ||
| 3916 | |||
| 3786 | /* Set the capacity of the device in 512 byte sectors. */ | 3917 | /* Set the capacity of the device in 512 byte sectors. */ |
| 3787 | if (!(mtip_hw_get_capacity(dd, &capacity))) { | 3918 | if (!(mtip_hw_get_capacity(dd, &capacity))) { |
| 3788 | dev_warn(&dd->pdev->dev, | 3919 | dev_warn(&dd->pdev->dev, |
| @@ -3813,9 +3944,8 @@ skip_create_disk: | |||
| 3813 | 3944 | ||
| 3814 | start_service_thread: | 3945 | start_service_thread: |
| 3815 | sprintf(thd_name, "mtip_svc_thd_%02d", index); | 3946 | sprintf(thd_name, "mtip_svc_thd_%02d", index); |
| 3816 | 3947 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, | |
| 3817 | dd->mtip_svc_handler = kthread_run(mtip_service_thread, | 3948 | dd, dd->numa_node, thd_name); |
| 3818 | dd, thd_name); | ||
| 3819 | 3949 | ||
| 3820 | if (IS_ERR(dd->mtip_svc_handler)) { | 3950 | if (IS_ERR(dd->mtip_svc_handler)) { |
| 3821 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); | 3951 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); |
| @@ -3823,7 +3953,7 @@ start_service_thread: | |||
| 3823 | rv = -EFAULT; | 3953 | rv = -EFAULT; |
| 3824 | goto kthread_run_error; | 3954 | goto kthread_run_error; |
| 3825 | } | 3955 | } |
| 3826 | 3956 | wake_up_process(dd->mtip_svc_handler); | |
| 3827 | if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) | 3957 | if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) |
| 3828 | rv = wait_for_rebuild; | 3958 | rv = wait_for_rebuild; |
| 3829 | 3959 | ||
| @@ -3963,6 +4093,56 @@ static int mtip_block_resume(struct driver_data *dd) | |||
| 3963 | return 0; | 4093 | return 0; |
| 3964 | } | 4094 | } |
| 3965 | 4095 | ||
| 4096 | static void drop_cpu(int cpu) | ||
| 4097 | { | ||
| 4098 | cpu_use[cpu]--; | ||
| 4099 | } | ||
| 4100 | |||
| 4101 | static int get_least_used_cpu_on_node(int node) | ||
| 4102 | { | ||
| 4103 | int cpu, least_used_cpu, least_cnt; | ||
| 4104 | const struct cpumask *node_mask; | ||
| 4105 | |||
| 4106 | node_mask = cpumask_of_node(node); | ||
| 4107 | least_used_cpu = cpumask_first(node_mask); | ||
| 4108 | least_cnt = cpu_use[least_used_cpu]; | ||
| 4109 | cpu = least_used_cpu; | ||
| 4110 | |||
| 4111 | for_each_cpu(cpu, node_mask) { | ||
| 4112 | if (cpu_use[cpu] < least_cnt) { | ||
| 4113 | least_used_cpu = cpu; | ||
| 4114 | least_cnt = cpu_use[cpu]; | ||
| 4115 | } | ||
| 4116 | } | ||
| 4117 | cpu_use[least_used_cpu]++; | ||
| 4118 | return least_used_cpu; | ||
| 4119 | } | ||
| 4120 | |||
| 4121 | /* Helper for selecting a node in round robin mode */ | ||
| 4122 | static inline int mtip_get_next_rr_node(void) | ||
| 4123 | { | ||
| 4124 | static int next_node = -1; | ||
| 4125 | |||
| 4126 | if (next_node == -1) { | ||
| 4127 | next_node = first_online_node; | ||
| 4128 | return next_node; | ||
| 4129 | } | ||
| 4130 | |||
| 4131 | next_node = next_online_node(next_node); | ||
| 4132 | if (next_node == MAX_NUMNODES) | ||
| 4133 | next_node = first_online_node; | ||
| 4134 | return next_node; | ||
| 4135 | } | ||
| 4136 | |||
| 4137 | static DEFINE_HANDLER(0); | ||
| 4138 | static DEFINE_HANDLER(1); | ||
| 4139 | static DEFINE_HANDLER(2); | ||
| 4140 | static DEFINE_HANDLER(3); | ||
| 4141 | static DEFINE_HANDLER(4); | ||
| 4142 | static DEFINE_HANDLER(5); | ||
| 4143 | static DEFINE_HANDLER(6); | ||
| 4144 | static DEFINE_HANDLER(7); | ||
| 4145 | |||
| 3966 | /* | 4146 | /* |
| 3967 | * Called for each supported PCI device detected. | 4147 | * Called for each supported PCI device detected. |
| 3968 | * | 4148 | * |
| @@ -3977,9 +4157,25 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
| 3977 | { | 4157 | { |
| 3978 | int rv = 0; | 4158 | int rv = 0; |
| 3979 | struct driver_data *dd = NULL; | 4159 | struct driver_data *dd = NULL; |
| 4160 | char cpu_list[256]; | ||
| 4161 | const struct cpumask *node_mask; | ||
| 4162 | int cpu, i = 0, j = 0; | ||
| 4163 | int my_node = NUMA_NO_NODE; | ||
| 3980 | 4164 | ||
| 3981 | /* Allocate memory for this devices private data. */ | 4165 | /* Allocate memory for this devices private data. */ |
| 3982 | dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL); | 4166 | my_node = pcibus_to_node(pdev->bus); |
| 4167 | if (my_node != NUMA_NO_NODE) { | ||
| 4168 | if (!node_online(my_node)) | ||
| 4169 | my_node = mtip_get_next_rr_node(); | ||
| 4170 | } else { | ||
| 4171 | dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n"); | ||
| 4172 | my_node = mtip_get_next_rr_node(); | ||
| 4173 | } | ||
| 4174 | dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", | ||
| 4175 | my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), | ||
| 4176 | cpu_to_node(smp_processor_id()), smp_processor_id()); | ||
| 4177 | |||
| 4178 | dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); | ||
| 3983 | if (dd == NULL) { | 4179 | if (dd == NULL) { |
| 3984 | dev_err(&pdev->dev, | 4180 | dev_err(&pdev->dev, |
| 3985 | "Unable to allocate memory for driver data\n"); | 4181 | "Unable to allocate memory for driver data\n"); |
| @@ -4016,19 +4212,82 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
| 4016 | } | 4212 | } |
| 4017 | } | 4213 | } |
| 4018 | 4214 | ||
| 4019 | pci_set_master(pdev); | 4215 | /* Copy the info we may need later into the private data structure. */ |
| 4216 | dd->major = mtip_major; | ||
| 4217 | dd->instance = instance; | ||
| 4218 | dd->pdev = pdev; | ||
| 4219 | dd->numa_node = my_node; | ||
| 4020 | 4220 | ||
| 4221 | memset(dd->workq_name, 0, 32); | ||
| 4222 | snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); | ||
| 4223 | |||
| 4224 | dd->isr_workq = create_workqueue(dd->workq_name); | ||
| 4225 | if (!dd->isr_workq) { | ||
| 4226 | dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); | ||
| 4227 | goto block_initialize_err; | ||
| 4228 | } | ||
| 4229 | |||
| 4230 | memset(cpu_list, 0, sizeof(cpu_list)); | ||
| 4231 | |||
| 4232 | node_mask = cpumask_of_node(dd->numa_node); | ||
| 4233 | if (!cpumask_empty(node_mask)) { | ||
| 4234 | for_each_cpu(cpu, node_mask) | ||
| 4235 | { | ||
| 4236 | snprintf(&cpu_list[j], 256 - j, "%d ", cpu); | ||
| 4237 | j = strlen(cpu_list); | ||
| 4238 | } | ||
| 4239 | |||
| 4240 | dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n", | ||
| 4241 | dd->numa_node, | ||
| 4242 | topology_physical_package_id(cpumask_first(node_mask)), | ||
| 4243 | nr_cpus_node(dd->numa_node), | ||
| 4244 | cpu_list); | ||
| 4245 | } else | ||
| 4246 | dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n"); | ||
| 4247 | |||
| 4248 | dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node); | ||
| 4249 | dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n", | ||
| 4250 | cpu_to_node(dd->isr_binding), dd->isr_binding); | ||
| 4251 | |||
| 4252 | /* first worker context always runs in ISR */ | ||
| 4253 | dd->work[0].cpu_binding = dd->isr_binding; | ||
| 4254 | dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); | ||
| 4255 | dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); | ||
| 4256 | dd->work[3].cpu_binding = dd->work[0].cpu_binding; | ||
| 4257 | dd->work[4].cpu_binding = dd->work[1].cpu_binding; | ||
| 4258 | dd->work[5].cpu_binding = dd->work[2].cpu_binding; | ||
| 4259 | dd->work[6].cpu_binding = dd->work[2].cpu_binding; | ||
| 4260 | dd->work[7].cpu_binding = dd->work[1].cpu_binding; | ||
| 4261 | |||
| 4262 | /* Log the bindings */ | ||
| 4263 | for_each_present_cpu(cpu) { | ||
| 4264 | memset(cpu_list, 0, sizeof(cpu_list)); | ||
| 4265 | for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) { | ||
| 4266 | if (dd->work[i].cpu_binding == cpu) { | ||
| 4267 | snprintf(&cpu_list[j], 256 - j, "%d ", i); | ||
| 4268 | j = strlen(cpu_list); | ||
| 4269 | } | ||
| 4270 | } | ||
| 4271 | if (j) | ||
| 4272 | dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list); | ||
| 4273 | } | ||
| 4274 | |||
| 4275 | INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0); | ||
| 4276 | INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1); | ||
| 4277 | INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2); | ||
| 4278 | INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3); | ||
| 4279 | INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4); | ||
| 4280 | INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5); | ||
| 4281 | INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6); | ||
| 4282 | INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); | ||
| 4283 | |||
| 4284 | pci_set_master(pdev); | ||
| 4021 | if (pci_enable_msi(pdev)) { | 4285 | if (pci_enable_msi(pdev)) { |
| 4022 | dev_warn(&pdev->dev, | 4286 | dev_warn(&pdev->dev, |
| 4023 | "Unable to enable MSI interrupt.\n"); | 4287 | "Unable to enable MSI interrupt.\n"); |
| 4024 | goto block_initialize_err; | 4288 | goto block_initialize_err; |
| 4025 | } | 4289 | } |
| 4026 | 4290 | ||
| 4027 | /* Copy the info we may need later into the private data structure. */ | ||
| 4028 | dd->major = mtip_major; | ||
| 4029 | dd->instance = instance; | ||
| 4030 | dd->pdev = pdev; | ||
| 4031 | |||
| 4032 | /* Initialize the block layer. */ | 4291 | /* Initialize the block layer. */ |
| 4033 | rv = mtip_block_initialize(dd); | 4292 | rv = mtip_block_initialize(dd); |
| 4034 | if (rv < 0) { | 4293 | if (rv < 0) { |
| @@ -4048,7 +4307,13 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
| 4048 | 4307 | ||
| 4049 | block_initialize_err: | 4308 | block_initialize_err: |
| 4050 | pci_disable_msi(pdev); | 4309 | pci_disable_msi(pdev); |
| 4051 | 4310 | if (dd->isr_workq) { | |
| 4311 | flush_workqueue(dd->isr_workq); | ||
| 4312 | destroy_workqueue(dd->isr_workq); | ||
| 4313 | drop_cpu(dd->work[0].cpu_binding); | ||
| 4314 | drop_cpu(dd->work[1].cpu_binding); | ||
| 4315 | drop_cpu(dd->work[2].cpu_binding); | ||
| 4316 | } | ||
| 4052 | setmask_err: | 4317 | setmask_err: |
| 4053 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); | 4318 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); |
| 4054 | 4319 | ||
| @@ -4089,6 +4354,14 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
| 4089 | /* Clean up the block layer. */ | 4354 | /* Clean up the block layer. */ |
| 4090 | mtip_block_remove(dd); | 4355 | mtip_block_remove(dd); |
| 4091 | 4356 | ||
| 4357 | if (dd->isr_workq) { | ||
| 4358 | flush_workqueue(dd->isr_workq); | ||
| 4359 | destroy_workqueue(dd->isr_workq); | ||
| 4360 | drop_cpu(dd->work[0].cpu_binding); | ||
| 4361 | drop_cpu(dd->work[1].cpu_binding); | ||
| 4362 | drop_cpu(dd->work[2].cpu_binding); | ||
| 4363 | } | ||
| 4364 | |||
| 4092 | pci_disable_msi(pdev); | 4365 | pci_disable_msi(pdev); |
| 4093 | 4366 | ||
| 4094 | kfree(dd); | 4367 | kfree(dd); |
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index b1742640556a..3bffff5f670c 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
| @@ -164,6 +164,35 @@ struct smart_attr { | |||
| 164 | u8 res[3]; | 164 | u8 res[3]; |
| 165 | } __packed; | 165 | } __packed; |
| 166 | 166 | ||
| 167 | struct mtip_work { | ||
| 168 | struct work_struct work; | ||
| 169 | void *port; | ||
| 170 | int cpu_binding; | ||
| 171 | u32 completed; | ||
| 172 | } ____cacheline_aligned_in_smp; | ||
| 173 | |||
| 174 | #define DEFINE_HANDLER(group) \ | ||
| 175 | void mtip_workq_sdbf##group(struct work_struct *work) \ | ||
| 176 | { \ | ||
| 177 | struct mtip_work *w = (struct mtip_work *) work; \ | ||
| 178 | mtip_workq_sdbfx(w->port, group, w->completed); \ | ||
| 179 | } | ||
| 180 | |||
| 181 | #define MTIP_TRIM_TIMEOUT_MS 240000 | ||
| 182 | #define MTIP_MAX_TRIM_ENTRIES 8 | ||
| 183 | #define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 | ||
| 184 | |||
| 185 | struct mtip_trim_entry { | ||
| 186 | u32 lba; /* starting lba of region */ | ||
| 187 | u16 rsvd; /* unused */ | ||
| 188 | u16 range; /* # of 512b blocks to trim */ | ||
| 189 | } __packed; | ||
| 190 | |||
| 191 | struct mtip_trim { | ||
| 192 | /* Array of regions to trim */ | ||
| 193 | struct mtip_trim_entry entry[MTIP_MAX_TRIM_ENTRIES]; | ||
| 194 | } __packed; | ||
| 195 | |||
| 167 | /* Register Frame Information Structure (FIS), host to device. */ | 196 | /* Register Frame Information Structure (FIS), host to device. */ |
| 168 | struct host_to_dev_fis { | 197 | struct host_to_dev_fis { |
| 169 | /* | 198 | /* |
| @@ -424,7 +453,7 @@ struct mtip_port { | |||
| 424 | */ | 453 | */ |
| 425 | struct semaphore cmd_slot; | 454 | struct semaphore cmd_slot; |
| 426 | /* Spinlock for working around command-issue bug. */ | 455 | /* Spinlock for working around command-issue bug. */ |
| 427 | spinlock_t cmd_issue_lock; | 456 | spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS]; |
| 428 | }; | 457 | }; |
| 429 | 458 | ||
| 430 | /* | 459 | /* |
| @@ -447,9 +476,6 @@ struct driver_data { | |||
| 447 | 476 | ||
| 448 | struct mtip_port *port; /* Pointer to the port data structure. */ | 477 | struct mtip_port *port; /* Pointer to the port data structure. */ |
| 449 | 478 | ||
| 450 | /* Tasklet used to process the bottom half of the ISR. */ | ||
| 451 | struct tasklet_struct tasklet; | ||
| 452 | |||
| 453 | unsigned product_type; /* magic value declaring the product type */ | 479 | unsigned product_type; /* magic value declaring the product type */ |
| 454 | 480 | ||
| 455 | unsigned slot_groups; /* number of slot groups the product supports */ | 481 | unsigned slot_groups; /* number of slot groups the product supports */ |
| @@ -461,6 +487,20 @@ struct driver_data { | |||
| 461 | struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ | 487 | struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ |
| 462 | 488 | ||
| 463 | struct dentry *dfs_node; | 489 | struct dentry *dfs_node; |
| 490 | |||
| 491 | bool trim_supp; /* flag indicating trim support */ | ||
| 492 | |||
| 493 | int numa_node; /* NUMA support */ | ||
| 494 | |||
| 495 | char workq_name[32]; | ||
| 496 | |||
| 497 | struct workqueue_struct *isr_workq; | ||
| 498 | |||
| 499 | struct mtip_work work[MTIP_MAX_SLOT_GROUPS]; | ||
| 500 | |||
| 501 | atomic_t irq_workers_active; | ||
| 502 | |||
| 503 | int isr_binding; | ||
| 464 | }; | 504 | }; |
| 465 | 505 | ||
| 466 | #endif | 506 | #endif |
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile new file mode 100644 index 000000000000..f35cd0b71f7b --- /dev/null +++ b/drivers/block/rsxx/Makefile | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o | ||
| 2 | rsxx-y := config.o core.o cregs.o dev.o dma.o | ||
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c new file mode 100644 index 000000000000..a295e7e9ee41 --- /dev/null +++ b/drivers/block/rsxx/config.c | |||
| @@ -0,0 +1,213 @@ | |||
| 1 | /* | ||
| 2 | * Filename: config.c | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 7 | * | ||
| 8 | * (C) Copyright 2013 IBM Corporation | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation; either version 2 of the | ||
| 13 | * License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software Foundation, | ||
| 22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/types.h> | ||
| 26 | #include <linux/crc32.h> | ||
| 27 | #include <linux/swab.h> | ||
| 28 | |||
| 29 | #include "rsxx_priv.h" | ||
| 30 | #include "rsxx_cfg.h" | ||
| 31 | |||
| 32 | static void initialize_config(void *config) | ||
| 33 | { | ||
| 34 | struct rsxx_card_cfg *cfg = config; | ||
| 35 | |||
| 36 | cfg->hdr.version = RSXX_CFG_VERSION; | ||
| 37 | |||
| 38 | cfg->data.block_size = RSXX_HW_BLK_SIZE; | ||
| 39 | cfg->data.stripe_size = RSXX_HW_BLK_SIZE; | ||
| 40 | cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM; | ||
| 41 | cfg->data.cache_order = (-1); | ||
| 42 | cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; | ||
| 43 | cfg->data.intr_coal.count = 0; | ||
| 44 | cfg->data.intr_coal.latency = 0; | ||
| 45 | } | ||
| 46 | |||
| 47 | static u32 config_data_crc32(struct rsxx_card_cfg *cfg) | ||
| 48 | { | ||
| 49 | /* | ||
| 50 | * Return the compliment of the CRC to ensure compatibility | ||
| 51 | * (i.e. this is how early rsxx drivers did it.) | ||
| 52 | */ | ||
| 53 | |||
| 54 | return ~crc32(~0, &cfg->data, sizeof(cfg->data)); | ||
| 55 | } | ||
| 56 | |||
| 57 | |||
| 58 | /*----------------- Config Byte Swap Functions -------------------*/ | ||
| 59 | static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr) | ||
| 60 | { | ||
| 61 | hdr->version = be32_to_cpu((__force __be32) hdr->version); | ||
| 62 | hdr->crc = be32_to_cpu((__force __be32) hdr->crc); | ||
| 63 | } | ||
| 64 | |||
| 65 | static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr) | ||
| 66 | { | ||
| 67 | hdr->version = (__force u32) cpu_to_be32(hdr->version); | ||
| 68 | hdr->crc = (__force u32) cpu_to_be32(hdr->crc); | ||
| 69 | } | ||
| 70 | |||
| 71 | static void config_data_swab(struct rsxx_card_cfg *cfg) | ||
| 72 | { | ||
| 73 | u32 *data = (u32 *) &cfg->data; | ||
| 74 | int i; | ||
| 75 | |||
| 76 | for (i = 0; i < (sizeof(cfg->data) / 4); i++) | ||
| 77 | data[i] = swab32(data[i]); | ||
| 78 | } | ||
| 79 | |||
| 80 | static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg) | ||
| 81 | { | ||
| 82 | u32 *data = (u32 *) &cfg->data; | ||
| 83 | int i; | ||
| 84 | |||
| 85 | for (i = 0; i < (sizeof(cfg->data) / 4); i++) | ||
| 86 | data[i] = le32_to_cpu((__force __le32) data[i]); | ||
| 87 | } | ||
| 88 | |||
| 89 | static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg) | ||
| 90 | { | ||
| 91 | u32 *data = (u32 *) &cfg->data; | ||
| 92 | int i; | ||
| 93 | |||
| 94 | for (i = 0; i < (sizeof(cfg->data) / 4); i++) | ||
| 95 | data[i] = (__force u32) cpu_to_le32(data[i]); | ||
| 96 | } | ||
| 97 | |||
| 98 | |||
| 99 | /*----------------- Config Operations ------------------*/ | ||
| 100 | static int rsxx_save_config(struct rsxx_cardinfo *card) | ||
| 101 | { | ||
| 102 | struct rsxx_card_cfg cfg; | ||
| 103 | int st; | ||
| 104 | |||
| 105 | memcpy(&cfg, &card->config, sizeof(cfg)); | ||
| 106 | |||
| 107 | if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) { | ||
| 108 | dev_err(CARD_TO_DEV(card), | ||
| 109 | "Cannot save config with invalid version %d\n", | ||
| 110 | cfg.hdr.version); | ||
| 111 | return -EINVAL; | ||
| 112 | } | ||
| 113 | |||
| 114 | /* Convert data to little endian for the CRC calculation. */ | ||
| 115 | config_data_cpu_to_le(&cfg); | ||
| 116 | |||
| 117 | cfg.hdr.crc = config_data_crc32(&cfg); | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Swap the data from little endian to big endian so it can be | ||
| 121 | * stored. | ||
| 122 | */ | ||
| 123 | config_data_swab(&cfg); | ||
| 124 | config_hdr_cpu_to_be(&cfg.hdr); | ||
| 125 | |||
| 126 | st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1); | ||
| 127 | if (st) | ||
| 128 | return st; | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | int rsxx_load_config(struct rsxx_cardinfo *card) | ||
| 134 | { | ||
| 135 | int st; | ||
| 136 | u32 crc; | ||
| 137 | |||
| 138 | st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config), | ||
| 139 | &card->config, 1); | ||
| 140 | if (st) { | ||
| 141 | dev_err(CARD_TO_DEV(card), | ||
| 142 | "Failed reading card config.\n"); | ||
| 143 | return st; | ||
| 144 | } | ||
| 145 | |||
| 146 | config_hdr_be_to_cpu(&card->config.hdr); | ||
| 147 | |||
| 148 | if (card->config.hdr.version == RSXX_CFG_VERSION) { | ||
| 149 | /* | ||
| 150 | * We calculate the CRC with the data in little endian, because | ||
| 151 | * early drivers did not take big endian CPUs into account. | ||
| 152 | * The data is always stored in big endian, so we need to byte | ||
| 153 | * swap it before calculating the CRC. | ||
| 154 | */ | ||
| 155 | |||
| 156 | config_data_swab(&card->config); | ||
| 157 | |||
| 158 | /* Check the CRC */ | ||
| 159 | crc = config_data_crc32(&card->config); | ||
| 160 | if (crc != card->config.hdr.crc) { | ||
| 161 | dev_err(CARD_TO_DEV(card), | ||
| 162 | "Config corruption detected!\n"); | ||
| 163 | dev_info(CARD_TO_DEV(card), | ||
| 164 | "CRC (sb x%08x is x%08x)\n", | ||
| 165 | card->config.hdr.crc, crc); | ||
| 166 | return -EIO; | ||
| 167 | } | ||
| 168 | |||
| 169 | /* Convert the data to CPU byteorder */ | ||
| 170 | config_data_le_to_cpu(&card->config); | ||
| 171 | |||
| 172 | } else if (card->config.hdr.version != 0) { | ||
| 173 | dev_err(CARD_TO_DEV(card), | ||
| 174 | "Invalid config version %d.\n", | ||
| 175 | card->config.hdr.version); | ||
| 176 | /* | ||
| 177 | * Config version changes require special handling from the | ||
| 178 | * user | ||
| 179 | */ | ||
| 180 | return -EINVAL; | ||
| 181 | } else { | ||
| 182 | dev_info(CARD_TO_DEV(card), | ||
| 183 | "Initializing card configuration.\n"); | ||
| 184 | initialize_config(card); | ||
| 185 | st = rsxx_save_config(card); | ||
| 186 | if (st) | ||
| 187 | return st; | ||
| 188 | } | ||
| 189 | |||
| 190 | card->config_valid = 1; | ||
| 191 | |||
| 192 | dev_dbg(CARD_TO_DEV(card), "version: x%08x\n", | ||
| 193 | card->config.hdr.version); | ||
| 194 | dev_dbg(CARD_TO_DEV(card), "crc: x%08x\n", | ||
| 195 | card->config.hdr.crc); | ||
| 196 | dev_dbg(CARD_TO_DEV(card), "block_size: x%08x\n", | ||
| 197 | card->config.data.block_size); | ||
| 198 | dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n", | ||
| 199 | card->config.data.stripe_size); | ||
| 200 | dev_dbg(CARD_TO_DEV(card), "vendor_id: x%08x\n", | ||
| 201 | card->config.data.vendor_id); | ||
| 202 | dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n", | ||
| 203 | card->config.data.cache_order); | ||
| 204 | dev_dbg(CARD_TO_DEV(card), "mode: x%08x\n", | ||
| 205 | card->config.data.intr_coal.mode); | ||
| 206 | dev_dbg(CARD_TO_DEV(card), "count: x%08x\n", | ||
| 207 | card->config.data.intr_coal.count); | ||
| 208 | dev_dbg(CARD_TO_DEV(card), "latency: x%08x\n", | ||
| 209 | card->config.data.intr_coal.latency); | ||
| 210 | |||
| 211 | return 0; | ||
| 212 | } | ||
| 213 | |||
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c new file mode 100644 index 000000000000..e5162487686a --- /dev/null +++ b/drivers/block/rsxx/core.c | |||
| @@ -0,0 +1,649 @@ | |||
| 1 | /* | ||
| 2 | * Filename: core.c | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 7 | * | ||
| 8 | * (C) Copyright 2013 IBM Corporation | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation; either version 2 of the | ||
| 13 | * License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software Foundation, | ||
| 22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/init.h> | ||
| 27 | #include <linux/interrupt.h> | ||
| 28 | #include <linux/module.h> | ||
| 29 | #include <linux/pci.h> | ||
| 30 | #include <linux/reboot.h> | ||
| 31 | #include <linux/slab.h> | ||
| 32 | #include <linux/bitops.h> | ||
| 33 | |||
| 34 | #include <linux/genhd.h> | ||
| 35 | #include <linux/idr.h> | ||
| 36 | |||
| 37 | #include "rsxx_priv.h" | ||
| 38 | #include "rsxx_cfg.h" | ||
| 39 | |||
| 40 | #define NO_LEGACY 0 | ||
| 41 | |||
| 42 | MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver"); | ||
| 43 | MODULE_AUTHOR("IBM <support@ramsan.com>"); | ||
| 44 | MODULE_LICENSE("GPL"); | ||
| 45 | MODULE_VERSION(DRIVER_VERSION); | ||
| 46 | |||
| 47 | static unsigned int force_legacy = NO_LEGACY; | ||
| 48 | module_param(force_legacy, uint, 0444); | ||
| 49 | MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts"); | ||
| 50 | |||
| 51 | static DEFINE_IDA(rsxx_disk_ida); | ||
| 52 | static DEFINE_SPINLOCK(rsxx_ida_lock); | ||
| 53 | |||
| 54 | /*----------------- Interrupt Control & Handling -------------------*/ | ||
| 55 | static void __enable_intr(unsigned int *mask, unsigned int intr) | ||
| 56 | { | ||
| 57 | *mask |= intr; | ||
| 58 | } | ||
| 59 | |||
| 60 | static void __disable_intr(unsigned int *mask, unsigned int intr) | ||
| 61 | { | ||
| 62 | *mask &= ~intr; | ||
| 63 | } | ||
| 64 | |||
| 65 | /* | ||
| 66 | * NOTE: Disabling the IER will disable the hardware interrupt. | ||
| 67 | * Disabling the ISR will disable the software handling of the ISR bit. | ||
| 68 | * | ||
| 69 | * Enable/Disable interrupt functions assume the card->irq_lock | ||
| 70 | * is held by the caller. | ||
| 71 | */ | ||
| 72 | void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) | ||
| 73 | { | ||
| 74 | if (unlikely(card->halt)) | ||
| 75 | return; | ||
| 76 | |||
| 77 | __enable_intr(&card->ier_mask, intr); | ||
| 78 | iowrite32(card->ier_mask, card->regmap + IER); | ||
| 79 | } | ||
| 80 | |||
| 81 | void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) | ||
| 82 | { | ||
| 83 | __disable_intr(&card->ier_mask, intr); | ||
| 84 | iowrite32(card->ier_mask, card->regmap + IER); | ||
| 85 | } | ||
| 86 | |||
| 87 | void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, | ||
| 88 | unsigned int intr) | ||
| 89 | { | ||
| 90 | if (unlikely(card->halt)) | ||
| 91 | return; | ||
| 92 | |||
| 93 | __enable_intr(&card->isr_mask, intr); | ||
| 94 | __enable_intr(&card->ier_mask, intr); | ||
| 95 | iowrite32(card->ier_mask, card->regmap + IER); | ||
| 96 | } | ||
| 97 | void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, | ||
| 98 | unsigned int intr) | ||
| 99 | { | ||
| 100 | __disable_intr(&card->isr_mask, intr); | ||
| 101 | __disable_intr(&card->ier_mask, intr); | ||
| 102 | iowrite32(card->ier_mask, card->regmap + IER); | ||
| 103 | } | ||
| 104 | |||
| 105 | static irqreturn_t rsxx_isr(int irq, void *pdata) | ||
| 106 | { | ||
| 107 | struct rsxx_cardinfo *card = pdata; | ||
| 108 | unsigned int isr; | ||
| 109 | int handled = 0; | ||
| 110 | int reread_isr; | ||
| 111 | int i; | ||
| 112 | |||
| 113 | spin_lock(&card->irq_lock); | ||
| 114 | |||
| 115 | do { | ||
| 116 | reread_isr = 0; | ||
| 117 | |||
| 118 | isr = ioread32(card->regmap + ISR); | ||
| 119 | if (isr == 0xffffffff) { | ||
| 120 | /* | ||
| 121 | * A few systems seem to have an intermittent issue | ||
| 122 | * where PCI reads return all Fs, but retrying the read | ||
| 123 | * a little later will return as expected. | ||
| 124 | */ | ||
| 125 | dev_info(CARD_TO_DEV(card), | ||
| 126 | "ISR = 0xFFFFFFFF, retrying later\n"); | ||
| 127 | break; | ||
| 128 | } | ||
| 129 | |||
| 130 | isr &= card->isr_mask; | ||
| 131 | if (!isr) | ||
| 132 | break; | ||
| 133 | |||
| 134 | for (i = 0; i < card->n_targets; i++) { | ||
| 135 | if (isr & CR_INTR_DMA(i)) { | ||
| 136 | if (card->ier_mask & CR_INTR_DMA(i)) { | ||
| 137 | rsxx_disable_ier(card, CR_INTR_DMA(i)); | ||
| 138 | reread_isr = 1; | ||
| 139 | } | ||
| 140 | queue_work(card->ctrl[i].done_wq, | ||
| 141 | &card->ctrl[i].dma_done_work); | ||
| 142 | handled++; | ||
| 143 | } | ||
| 144 | } | ||
| 145 | |||
| 146 | if (isr & CR_INTR_CREG) { | ||
| 147 | schedule_work(&card->creg_ctrl.done_work); | ||
| 148 | handled++; | ||
| 149 | } | ||
| 150 | |||
| 151 | if (isr & CR_INTR_EVENT) { | ||
| 152 | schedule_work(&card->event_work); | ||
| 153 | rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); | ||
| 154 | handled++; | ||
| 155 | } | ||
| 156 | } while (reread_isr); | ||
| 157 | |||
| 158 | spin_unlock(&card->irq_lock); | ||
| 159 | |||
| 160 | return handled ? IRQ_HANDLED : IRQ_NONE; | ||
| 161 | } | ||
| 162 | |||
| 163 | /*----------------- Card Event Handler -------------------*/ | ||
| 164 | static char *rsxx_card_state_to_str(unsigned int state) | ||
| 165 | { | ||
| 166 | static char *state_strings[] = { | ||
| 167 | "Unknown", "Shutdown", "Starting", "Formatting", | ||
| 168 | "Uninitialized", "Good", "Shutting Down", | ||
| 169 | "Fault", "Read Only Fault", "dStroying" | ||
| 170 | }; | ||
| 171 | |||
| 172 | return state_strings[ffs(state)]; | ||
| 173 | } | ||
| 174 | |||
| 175 | static void card_state_change(struct rsxx_cardinfo *card, | ||
| 176 | unsigned int new_state) | ||
| 177 | { | ||
| 178 | int st; | ||
| 179 | |||
| 180 | dev_info(CARD_TO_DEV(card), | ||
| 181 | "card state change detected.(%s -> %s)\n", | ||
| 182 | rsxx_card_state_to_str(card->state), | ||
| 183 | rsxx_card_state_to_str(new_state)); | ||
| 184 | |||
| 185 | card->state = new_state; | ||
| 186 | |||
| 187 | /* Don't attach DMA interfaces if the card has an invalid config */ | ||
| 188 | if (!card->config_valid) | ||
| 189 | return; | ||
| 190 | |||
| 191 | switch (new_state) { | ||
| 192 | case CARD_STATE_RD_ONLY_FAULT: | ||
| 193 | dev_crit(CARD_TO_DEV(card), | ||
| 194 | "Hardware has entered read-only mode!\n"); | ||
| 195 | /* | ||
| 196 | * Fall through so the DMA devices can be attached and | ||
| 197 | * the user can attempt to pull off their data. | ||
| 198 | */ | ||
| 199 | case CARD_STATE_GOOD: | ||
| 200 | st = rsxx_get_card_size8(card, &card->size8); | ||
| 201 | if (st) | ||
| 202 | dev_err(CARD_TO_DEV(card), | ||
| 203 | "Failed attaching DMA devices\n"); | ||
| 204 | |||
| 205 | if (card->config_valid) | ||
| 206 | set_capacity(card->gendisk, card->size8 >> 9); | ||
| 207 | break; | ||
| 208 | |||
| 209 | case CARD_STATE_FAULT: | ||
| 210 | dev_crit(CARD_TO_DEV(card), | ||
| 211 | "Hardware Fault reported!\n"); | ||
| 212 | /* Fall through. */ | ||
| 213 | |||
| 214 | /* Everything else, detach DMA interface if it's attached. */ | ||
| 215 | case CARD_STATE_SHUTDOWN: | ||
| 216 | case CARD_STATE_STARTING: | ||
| 217 | case CARD_STATE_FORMATTING: | ||
| 218 | case CARD_STATE_UNINITIALIZED: | ||
| 219 | case CARD_STATE_SHUTTING_DOWN: | ||
| 220 | /* | ||
| 221 | * dStroy is a term coined by marketing to represent the low level | ||
| 222 | * secure erase. | ||
| 223 | */ | ||
| 224 | case CARD_STATE_DSTROYING: | ||
| 225 | set_capacity(card->gendisk, 0); | ||
| 226 | break; | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | static void card_event_handler(struct work_struct *work) | ||
| 231 | { | ||
| 232 | struct rsxx_cardinfo *card; | ||
| 233 | unsigned int state; | ||
| 234 | unsigned long flags; | ||
| 235 | int st; | ||
| 236 | |||
| 237 | card = container_of(work, struct rsxx_cardinfo, event_work); | ||
| 238 | |||
| 239 | if (unlikely(card->halt)) | ||
| 240 | return; | ||
| 241 | |||
| 242 | /* | ||
| 243 | * Enable the interrupt now to avoid any weird race conditions where a | ||
| 244 | * state change might occur while rsxx_get_card_state() is | ||
| 245 | * processing a returned creg cmd. | ||
| 246 | */ | ||
| 247 | spin_lock_irqsave(&card->irq_lock, flags); | ||
| 248 | rsxx_enable_ier_and_isr(card, CR_INTR_EVENT); | ||
| 249 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
| 250 | |||
| 251 | st = rsxx_get_card_state(card, &state); | ||
| 252 | if (st) { | ||
| 253 | dev_info(CARD_TO_DEV(card), | ||
| 254 | "Failed reading state after event.\n"); | ||
| 255 | return; | ||
| 256 | } | ||
| 257 | |||
| 258 | if (card->state != state) | ||
| 259 | card_state_change(card, state); | ||
| 260 | |||
| 261 | if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING) | ||
| 262 | rsxx_read_hw_log(card); | ||
| 263 | } | ||
| 264 | |||
| 265 | /*----------------- Card Operations -------------------*/ | ||
| 266 | static int card_shutdown(struct rsxx_cardinfo *card) | ||
| 267 | { | ||
| 268 | unsigned int state; | ||
| 269 | signed long start; | ||
| 270 | const int timeout = msecs_to_jiffies(120000); | ||
| 271 | int st; | ||
| 272 | |||
| 273 | /* We can't issue a shutdown if the card is in a transition state */ | ||
| 274 | start = jiffies; | ||
| 275 | do { | ||
| 276 | st = rsxx_get_card_state(card, &state); | ||
| 277 | if (st) | ||
| 278 | return st; | ||
| 279 | } while (state == CARD_STATE_STARTING && | ||
| 280 | (jiffies - start < timeout)); | ||
| 281 | |||
| 282 | if (state == CARD_STATE_STARTING) | ||
| 283 | return -ETIMEDOUT; | ||
| 284 | |||
| 285 | /* Only issue a shutdown if we need to */ | ||
| 286 | if ((state != CARD_STATE_SHUTTING_DOWN) && | ||
| 287 | (state != CARD_STATE_SHUTDOWN)) { | ||
| 288 | st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN); | ||
| 289 | if (st) | ||
| 290 | return st; | ||
| 291 | } | ||
| 292 | |||
| 293 | start = jiffies; | ||
| 294 | do { | ||
| 295 | st = rsxx_get_card_state(card, &state); | ||
| 296 | if (st) | ||
| 297 | return st; | ||
| 298 | } while (state != CARD_STATE_SHUTDOWN && | ||
| 299 | (jiffies - start < timeout)); | ||
| 300 | |||
| 301 | if (state != CARD_STATE_SHUTDOWN) | ||
| 302 | return -ETIMEDOUT; | ||
| 303 | |||
| 304 | return 0; | ||
| 305 | } | ||
| 306 | |||
| 307 | /*----------------- Driver Initialization & Setup -------------------*/ | ||
| 308 | /* Returns: 0 if the driver is compatible with the device | ||
| 309 | -1 if the driver is NOT compatible with the device */ | ||
| 310 | static int rsxx_compatibility_check(struct rsxx_cardinfo *card) | ||
| 311 | { | ||
| 312 | unsigned char pci_rev; | ||
| 313 | |||
| 314 | pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev); | ||
| 315 | |||
| 316 | if (pci_rev > RS70_PCI_REV_SUPPORTED) | ||
| 317 | return -1; | ||
| 318 | return 0; | ||
| 319 | } | ||
| 320 | |||
| 321 | static int rsxx_pci_probe(struct pci_dev *dev, | ||
| 322 | const struct pci_device_id *id) | ||
| 323 | { | ||
| 324 | struct rsxx_cardinfo *card; | ||
| 325 | int st; | ||
| 326 | |||
| 327 | dev_info(&dev->dev, "PCI-Flash SSD discovered\n"); | ||
| 328 | |||
| 329 | card = kzalloc(sizeof(*card), GFP_KERNEL); | ||
| 330 | if (!card) | ||
| 331 | return -ENOMEM; | ||
| 332 | |||
| 333 | card->dev = dev; | ||
| 334 | pci_set_drvdata(dev, card); | ||
| 335 | |||
| 336 | do { | ||
| 337 | if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) { | ||
| 338 | st = -ENOMEM; | ||
| 339 | goto failed_ida_get; | ||
| 340 | } | ||
| 341 | |||
| 342 | spin_lock(&rsxx_ida_lock); | ||
| 343 | st = ida_get_new(&rsxx_disk_ida, &card->disk_id); | ||
| 344 | spin_unlock(&rsxx_ida_lock); | ||
| 345 | } while (st == -EAGAIN); | ||
| 346 | |||
| 347 | if (st) | ||
| 348 | goto failed_ida_get; | ||
| 349 | |||
| 350 | st = pci_enable_device(dev); | ||
| 351 | if (st) | ||
| 352 | goto failed_enable; | ||
| 353 | |||
| 354 | pci_set_master(dev); | ||
| 355 | pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE); | ||
| 356 | |||
| 357 | st = pci_set_dma_mask(dev, DMA_BIT_MASK(64)); | ||
| 358 | if (st) { | ||
| 359 | dev_err(CARD_TO_DEV(card), | ||
| 360 | "No usable DMA configuration,aborting\n"); | ||
| 361 | goto failed_dma_mask; | ||
| 362 | } | ||
| 363 | |||
| 364 | st = pci_request_regions(dev, DRIVER_NAME); | ||
| 365 | if (st) { | ||
| 366 | dev_err(CARD_TO_DEV(card), | ||
| 367 | "Failed to request memory region\n"); | ||
| 368 | goto failed_request_regions; | ||
| 369 | } | ||
| 370 | |||
| 371 | if (pci_resource_len(dev, 0) == 0) { | ||
| 372 | dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n"); | ||
| 373 | st = -ENOMEM; | ||
| 374 | goto failed_iomap; | ||
| 375 | } | ||
| 376 | |||
| 377 | card->regmap = pci_iomap(dev, 0, 0); | ||
| 378 | if (!card->regmap) { | ||
| 379 | dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n"); | ||
| 380 | st = -ENOMEM; | ||
| 381 | goto failed_iomap; | ||
| 382 | } | ||
| 383 | |||
| 384 | spin_lock_init(&card->irq_lock); | ||
| 385 | card->halt = 0; | ||
| 386 | |||
| 387 | spin_lock_irq(&card->irq_lock); | ||
| 388 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); | ||
| 389 | spin_unlock_irq(&card->irq_lock); | ||
| 390 | |||
| 391 | if (!force_legacy) { | ||
| 392 | st = pci_enable_msi(dev); | ||
| 393 | if (st) | ||
| 394 | dev_warn(CARD_TO_DEV(card), | ||
| 395 | "Failed to enable MSI\n"); | ||
| 396 | } | ||
| 397 | |||
| 398 | st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED, | ||
| 399 | DRIVER_NAME, card); | ||
| 400 | if (st) { | ||
| 401 | dev_err(CARD_TO_DEV(card), | ||
| 402 | "Failed requesting IRQ%d\n", dev->irq); | ||
| 403 | goto failed_irq; | ||
| 404 | } | ||
| 405 | |||
| 406 | /************* Setup Processor Command Interface *************/ | ||
| 407 | rsxx_creg_setup(card); | ||
| 408 | |||
| 409 | spin_lock_irq(&card->irq_lock); | ||
| 410 | rsxx_enable_ier_and_isr(card, CR_INTR_CREG); | ||
| 411 | spin_unlock_irq(&card->irq_lock); | ||
| 412 | |||
| 413 | st = rsxx_compatibility_check(card); | ||
| 414 | if (st) { | ||
| 415 | dev_warn(CARD_TO_DEV(card), | ||
| 416 | "Incompatible driver detected. Please update the driver.\n"); | ||
| 417 | st = -EINVAL; | ||
| 418 | goto failed_compatiblity_check; | ||
| 419 | } | ||
| 420 | |||
| 421 | /************* Load Card Config *************/ | ||
| 422 | st = rsxx_load_config(card); | ||
| 423 | if (st) | ||
| 424 | dev_err(CARD_TO_DEV(card), | ||
| 425 | "Failed loading card config\n"); | ||
| 426 | |||
| 427 | /************* Setup DMA Engine *************/ | ||
| 428 | st = rsxx_get_num_targets(card, &card->n_targets); | ||
| 429 | if (st) | ||
| 430 | dev_info(CARD_TO_DEV(card), | ||
| 431 | "Failed reading the number of DMA targets\n"); | ||
| 432 | |||
| 433 | card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL); | ||
| 434 | if (!card->ctrl) { | ||
| 435 | st = -ENOMEM; | ||
| 436 | goto failed_dma_setup; | ||
| 437 | } | ||
| 438 | |||
| 439 | st = rsxx_dma_setup(card); | ||
| 440 | if (st) { | ||
| 441 | dev_info(CARD_TO_DEV(card), | ||
| 442 | "Failed to setup DMA engine\n"); | ||
| 443 | goto failed_dma_setup; | ||
| 444 | } | ||
| 445 | |||
| 446 | /************* Setup Card Event Handler *************/ | ||
| 447 | INIT_WORK(&card->event_work, card_event_handler); | ||
| 448 | |||
| 449 | st = rsxx_setup_dev(card); | ||
| 450 | if (st) | ||
| 451 | goto failed_create_dev; | ||
| 452 | |||
| 453 | rsxx_get_card_state(card, &card->state); | ||
| 454 | |||
| 455 | dev_info(CARD_TO_DEV(card), | ||
| 456 | "card state: %s\n", | ||
| 457 | rsxx_card_state_to_str(card->state)); | ||
| 458 | |||
| 459 | /* | ||
| 460 | * Now that the DMA Engine and devices have been setup, | ||
| 461 | * we can enable the event interrupt(it kicks off actions in | ||
| 462 | * those layers so we couldn't enable it right away.) | ||
| 463 | */ | ||
| 464 | spin_lock_irq(&card->irq_lock); | ||
| 465 | rsxx_enable_ier_and_isr(card, CR_INTR_EVENT); | ||
| 466 | spin_unlock_irq(&card->irq_lock); | ||
| 467 | |||
| 468 | if (card->state == CARD_STATE_SHUTDOWN) { | ||
| 469 | st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP); | ||
| 470 | if (st) | ||
| 471 | dev_crit(CARD_TO_DEV(card), | ||
| 472 | "Failed issuing card startup\n"); | ||
| 473 | } else if (card->state == CARD_STATE_GOOD || | ||
| 474 | card->state == CARD_STATE_RD_ONLY_FAULT) { | ||
| 475 | st = rsxx_get_card_size8(card, &card->size8); | ||
| 476 | if (st) | ||
| 477 | card->size8 = 0; | ||
| 478 | } | ||
| 479 | |||
| 480 | rsxx_attach_dev(card); | ||
| 481 | |||
| 482 | return 0; | ||
| 483 | |||
| 484 | failed_create_dev: | ||
| 485 | rsxx_dma_destroy(card); | ||
| 486 | failed_dma_setup: | ||
| 487 | failed_compatiblity_check: | ||
| 488 | spin_lock_irq(&card->irq_lock); | ||
| 489 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); | ||
| 490 | spin_unlock_irq(&card->irq_lock); | ||
| 491 | free_irq(dev->irq, card); | ||
| 492 | if (!force_legacy) | ||
| 493 | pci_disable_msi(dev); | ||
| 494 | failed_irq: | ||
| 495 | pci_iounmap(dev, card->regmap); | ||
| 496 | failed_iomap: | ||
| 497 | pci_release_regions(dev); | ||
| 498 | failed_request_regions: | ||
| 499 | failed_dma_mask: | ||
| 500 | pci_disable_device(dev); | ||
| 501 | failed_enable: | ||
| 502 | spin_lock(&rsxx_ida_lock); | ||
| 503 | ida_remove(&rsxx_disk_ida, card->disk_id); | ||
| 504 | spin_unlock(&rsxx_ida_lock); | ||
| 505 | failed_ida_get: | ||
| 506 | kfree(card); | ||
| 507 | |||
| 508 | return st; | ||
| 509 | } | ||
| 510 | |||
| 511 | static void rsxx_pci_remove(struct pci_dev *dev) | ||
| 512 | { | ||
| 513 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | ||
| 514 | unsigned long flags; | ||
| 515 | int st; | ||
| 516 | int i; | ||
| 517 | |||
| 518 | if (!card) | ||
| 519 | return; | ||
| 520 | |||
| 521 | dev_info(CARD_TO_DEV(card), | ||
| 522 | "Removing PCI-Flash SSD.\n"); | ||
| 523 | |||
| 524 | rsxx_detach_dev(card); | ||
| 525 | |||
| 526 | for (i = 0; i < card->n_targets; i++) { | ||
| 527 | spin_lock_irqsave(&card->irq_lock, flags); | ||
| 528 | rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i)); | ||
| 529 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
| 530 | } | ||
| 531 | |||
| 532 | st = card_shutdown(card); | ||
| 533 | if (st) | ||
| 534 | dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n"); | ||
| 535 | |||
| 536 | /* Sync outstanding event handlers. */ | ||
| 537 | spin_lock_irqsave(&card->irq_lock, flags); | ||
| 538 | rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); | ||
| 539 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
| 540 | |||
| 541 | /* Prevent work_structs from re-queuing themselves. */ | ||
| 542 | card->halt = 1; | ||
| 543 | |||
| 544 | cancel_work_sync(&card->event_work); | ||
| 545 | |||
| 546 | rsxx_destroy_dev(card); | ||
| 547 | rsxx_dma_destroy(card); | ||
| 548 | |||
| 549 | spin_lock_irqsave(&card->irq_lock, flags); | ||
| 550 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); | ||
| 551 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
| 552 | free_irq(dev->irq, card); | ||
| 553 | |||
| 554 | if (!force_legacy) | ||
| 555 | pci_disable_msi(dev); | ||
| 556 | |||
| 557 | rsxx_creg_destroy(card); | ||
| 558 | |||
| 559 | pci_iounmap(dev, card->regmap); | ||
| 560 | |||
| 561 | pci_disable_device(dev); | ||
| 562 | pci_release_regions(dev); | ||
| 563 | |||
| 564 | kfree(card); | ||
| 565 | } | ||
| 566 | |||
| 567 | static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state) | ||
| 568 | { | ||
| 569 | /* We don't support suspend at this time. */ | ||
| 570 | return -ENOSYS; | ||
| 571 | } | ||
| 572 | |||
| 573 | static void rsxx_pci_shutdown(struct pci_dev *dev) | ||
| 574 | { | ||
| 575 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | ||
| 576 | unsigned long flags; | ||
| 577 | int i; | ||
| 578 | |||
| 579 | if (!card) | ||
| 580 | return; | ||
| 581 | |||
| 582 | dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n"); | ||
| 583 | |||
| 584 | rsxx_detach_dev(card); | ||
| 585 | |||
| 586 | for (i = 0; i < card->n_targets; i++) { | ||
| 587 | spin_lock_irqsave(&card->irq_lock, flags); | ||
| 588 | rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i)); | ||
| 589 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
| 590 | } | ||
| 591 | |||
| 592 | card_shutdown(card); | ||
| 593 | } | ||
| 594 | |||
| 595 | static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { | ||
| 596 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)}, | ||
| 597 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)}, | ||
| 598 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)}, | ||
| 599 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)}, | ||
| 600 | {0,}, | ||
| 601 | }; | ||
| 602 | |||
| 603 | MODULE_DEVICE_TABLE(pci, rsxx_pci_ids); | ||
| 604 | |||
| 605 | static struct pci_driver rsxx_pci_driver = { | ||
| 606 | .name = DRIVER_NAME, | ||
| 607 | .id_table = rsxx_pci_ids, | ||
| 608 | .probe = rsxx_pci_probe, | ||
| 609 | .remove = rsxx_pci_remove, | ||
| 610 | .suspend = rsxx_pci_suspend, | ||
| 611 | .shutdown = rsxx_pci_shutdown, | ||
| 612 | }; | ||
| 613 | |||
| 614 | static int __init rsxx_core_init(void) | ||
| 615 | { | ||
| 616 | int st; | ||
| 617 | |||
| 618 | st = rsxx_dev_init(); | ||
| 619 | if (st) | ||
| 620 | return st; | ||
| 621 | |||
| 622 | st = rsxx_dma_init(); | ||
| 623 | if (st) | ||
| 624 | goto dma_init_failed; | ||
| 625 | |||
| 626 | st = rsxx_creg_init(); | ||
| 627 | if (st) | ||
| 628 | goto creg_init_failed; | ||
| 629 | |||
| 630 | return pci_register_driver(&rsxx_pci_driver); | ||
| 631 | |||
| 632 | creg_init_failed: | ||
| 633 | rsxx_dma_cleanup(); | ||
| 634 | dma_init_failed: | ||
| 635 | rsxx_dev_cleanup(); | ||
| 636 | |||
| 637 | return st; | ||
| 638 | } | ||
| 639 | |||
| 640 | static void __exit rsxx_core_cleanup(void) | ||
| 641 | { | ||
| 642 | pci_unregister_driver(&rsxx_pci_driver); | ||
| 643 | rsxx_creg_cleanup(); | ||
| 644 | rsxx_dma_cleanup(); | ||
| 645 | rsxx_dev_cleanup(); | ||
| 646 | } | ||
| 647 | |||
| 648 | module_init(rsxx_core_init); | ||
| 649 | module_exit(rsxx_core_cleanup); | ||
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c new file mode 100644 index 000000000000..80bbe639fccd --- /dev/null +++ b/drivers/block/rsxx/cregs.c | |||
| @@ -0,0 +1,758 @@ | |||
| 1 | /* | ||
| 2 | * Filename: cregs.c | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 7 | * | ||
| 8 | * (C) Copyright 2013 IBM Corporation | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation; either version 2 of the | ||
| 13 | * License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software Foundation, | ||
| 22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/completion.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | |||
| 28 | #include "rsxx_priv.h" | ||
| 29 | |||
| 30 | #define CREG_TIMEOUT_MSEC 10000 | ||
| 31 | |||
| 32 | typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card, | ||
| 33 | struct creg_cmd *cmd, | ||
| 34 | int st); | ||
| 35 | |||
| 36 | struct creg_cmd { | ||
| 37 | struct list_head list; | ||
| 38 | creg_cmd_cb cb; | ||
| 39 | void *cb_private; | ||
| 40 | unsigned int op; | ||
| 41 | unsigned int addr; | ||
| 42 | int cnt8; | ||
| 43 | void *buf; | ||
| 44 | unsigned int stream; | ||
| 45 | unsigned int status; | ||
| 46 | }; | ||
| 47 | |||
| 48 | static struct kmem_cache *creg_cmd_pool; | ||
| 49 | |||
| 50 | |||
| 51 | /*------------ Private Functions --------------*/ | ||
| 52 | |||
| 53 | #if defined(__LITTLE_ENDIAN) | ||
| 54 | #define LITTLE_ENDIAN 1 | ||
| 55 | #elif defined(__BIG_ENDIAN) | ||
| 56 | #define LITTLE_ENDIAN 0 | ||
| 57 | #else | ||
| 58 | #error Unknown endianess!!! Aborting... | ||
| 59 | #endif | ||
| 60 | |||
| 61 | static void copy_to_creg_data(struct rsxx_cardinfo *card, | ||
| 62 | int cnt8, | ||
| 63 | void *buf, | ||
| 64 | unsigned int stream) | ||
| 65 | { | ||
| 66 | int i = 0; | ||
| 67 | u32 *data = buf; | ||
| 68 | |||
| 69 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { | ||
| 70 | /* | ||
| 71 | * Firmware implementation makes it necessary to byte swap on | ||
| 72 | * little endian processors. | ||
| 73 | */ | ||
| 74 | if (LITTLE_ENDIAN && stream) | ||
| 75 | iowrite32be(data[i], card->regmap + CREG_DATA(i)); | ||
| 76 | else | ||
| 77 | iowrite32(data[i], card->regmap + CREG_DATA(i)); | ||
| 78 | } | ||
| 79 | } | ||
| 80 | |||
| 81 | |||
| 82 | static void copy_from_creg_data(struct rsxx_cardinfo *card, | ||
| 83 | int cnt8, | ||
| 84 | void *buf, | ||
| 85 | unsigned int stream) | ||
| 86 | { | ||
| 87 | int i = 0; | ||
| 88 | u32 *data = buf; | ||
| 89 | |||
| 90 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { | ||
| 91 | /* | ||
| 92 | * Firmware implementation makes it necessary to byte swap on | ||
| 93 | * little endian processors. | ||
| 94 | */ | ||
| 95 | if (LITTLE_ENDIAN && stream) | ||
| 96 | data[i] = ioread32be(card->regmap + CREG_DATA(i)); | ||
| 97 | else | ||
| 98 | data[i] = ioread32(card->regmap + CREG_DATA(i)); | ||
| 99 | } | ||
| 100 | } | ||
| 101 | |||
| 102 | static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card) | ||
| 103 | { | ||
| 104 | struct creg_cmd *cmd; | ||
| 105 | |||
| 106 | /* | ||
| 107 | * Spin lock is needed because this can be called in atomic/interrupt | ||
| 108 | * context. | ||
| 109 | */ | ||
| 110 | spin_lock_bh(&card->creg_ctrl.lock); | ||
| 111 | cmd = card->creg_ctrl.active_cmd; | ||
| 112 | card->creg_ctrl.active_cmd = NULL; | ||
| 113 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
| 114 | |||
| 115 | return cmd; | ||
| 116 | } | ||
| 117 | |||
| 118 | static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) | ||
| 119 | { | ||
| 120 | iowrite32(cmd->addr, card->regmap + CREG_ADD); | ||
| 121 | iowrite32(cmd->cnt8, card->regmap + CREG_CNT); | ||
| 122 | |||
| 123 | if (cmd->op == CREG_OP_WRITE) { | ||
| 124 | if (cmd->buf) | ||
| 125 | copy_to_creg_data(card, cmd->cnt8, | ||
| 126 | cmd->buf, cmd->stream); | ||
| 127 | } | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Data copy must complete before initiating the command. This is | ||
| 131 | * needed for weakly ordered processors (i.e. PowerPC), so that all | ||
| 132 | * neccessary registers are written before we kick the hardware. | ||
| 133 | */ | ||
| 134 | wmb(); | ||
| 135 | |||
| 136 | /* Setting the valid bit will kick off the command. */ | ||
| 137 | iowrite32(cmd->op, card->regmap + CREG_CMD); | ||
| 138 | } | ||
| 139 | |||
| 140 | static void creg_kick_queue(struct rsxx_cardinfo *card) | ||
| 141 | { | ||
| 142 | if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue)) | ||
| 143 | return; | ||
| 144 | |||
| 145 | card->creg_ctrl.active = 1; | ||
| 146 | card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue, | ||
| 147 | struct creg_cmd, list); | ||
| 148 | list_del(&card->creg_ctrl.active_cmd->list); | ||
| 149 | card->creg_ctrl.q_depth--; | ||
| 150 | |||
| 151 | /* | ||
| 152 | * We have to set the timer before we push the new command. Otherwise, | ||
| 153 | * we could create a race condition that would occur if the timer | ||
| 154 | * was not canceled, and expired after the new command was pushed, | ||
| 155 | * but before the command was issued to hardware. | ||
| 156 | */ | ||
| 157 | mod_timer(&card->creg_ctrl.cmd_timer, | ||
| 158 | jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC)); | ||
| 159 | |||
| 160 | creg_issue_cmd(card, card->creg_ctrl.active_cmd); | ||
| 161 | } | ||
| 162 | |||
| 163 | static int creg_queue_cmd(struct rsxx_cardinfo *card, | ||
| 164 | unsigned int op, | ||
| 165 | unsigned int addr, | ||
| 166 | unsigned int cnt8, | ||
| 167 | void *buf, | ||
| 168 | int stream, | ||
| 169 | creg_cmd_cb callback, | ||
| 170 | void *cb_private) | ||
| 171 | { | ||
| 172 | struct creg_cmd *cmd; | ||
| 173 | |||
| 174 | /* Don't queue stuff up if we're halted. */ | ||
| 175 | if (unlikely(card->halt)) | ||
| 176 | return -EINVAL; | ||
| 177 | |||
| 178 | if (card->creg_ctrl.reset) | ||
| 179 | return -EAGAIN; | ||
| 180 | |||
| 181 | if (cnt8 > MAX_CREG_DATA8) | ||
| 182 | return -EINVAL; | ||
| 183 | |||
| 184 | cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL); | ||
| 185 | if (!cmd) | ||
| 186 | return -ENOMEM; | ||
| 187 | |||
| 188 | INIT_LIST_HEAD(&cmd->list); | ||
| 189 | |||
| 190 | cmd->op = op; | ||
| 191 | cmd->addr = addr; | ||
| 192 | cmd->cnt8 = cnt8; | ||
| 193 | cmd->buf = buf; | ||
| 194 | cmd->stream = stream; | ||
| 195 | cmd->cb = callback; | ||
| 196 | cmd->cb_private = cb_private; | ||
| 197 | cmd->status = 0; | ||
| 198 | |||
| 199 | spin_lock(&card->creg_ctrl.lock); | ||
| 200 | list_add_tail(&cmd->list, &card->creg_ctrl.queue); | ||
| 201 | card->creg_ctrl.q_depth++; | ||
| 202 | creg_kick_queue(card); | ||
| 203 | spin_unlock(&card->creg_ctrl.lock); | ||
| 204 | |||
| 205 | return 0; | ||
| 206 | } | ||
| 207 | |||
| 208 | static void creg_cmd_timed_out(unsigned long data) | ||
| 209 | { | ||
| 210 | struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; | ||
| 211 | struct creg_cmd *cmd; | ||
| 212 | |||
| 213 | cmd = pop_active_cmd(card); | ||
| 214 | if (cmd == NULL) { | ||
| 215 | card->creg_ctrl.creg_stats.creg_timeout++; | ||
| 216 | dev_warn(CARD_TO_DEV(card), | ||
| 217 | "No active command associated with timeout!\n"); | ||
| 218 | return; | ||
| 219 | } | ||
| 220 | |||
| 221 | if (cmd->cb) | ||
| 222 | cmd->cb(card, cmd, -ETIMEDOUT); | ||
| 223 | |||
| 224 | kmem_cache_free(creg_cmd_pool, cmd); | ||
| 225 | |||
| 226 | |||
| 227 | spin_lock(&card->creg_ctrl.lock); | ||
| 228 | card->creg_ctrl.active = 0; | ||
| 229 | creg_kick_queue(card); | ||
| 230 | spin_unlock(&card->creg_ctrl.lock); | ||
| 231 | } | ||
| 232 | |||
| 233 | |||
| 234 | static void creg_cmd_done(struct work_struct *work) | ||
| 235 | { | ||
| 236 | struct rsxx_cardinfo *card; | ||
| 237 | struct creg_cmd *cmd; | ||
| 238 | int st = 0; | ||
| 239 | |||
| 240 | card = container_of(work, struct rsxx_cardinfo, | ||
| 241 | creg_ctrl.done_work); | ||
| 242 | |||
| 243 | /* | ||
| 244 | * The timer could not be cancelled for some reason, | ||
| 245 | * race to pop the active command. | ||
| 246 | */ | ||
| 247 | if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) | ||
| 248 | card->creg_ctrl.creg_stats.failed_cancel_timer++; | ||
| 249 | |||
| 250 | cmd = pop_active_cmd(card); | ||
| 251 | if (cmd == NULL) { | ||
| 252 | dev_err(CARD_TO_DEV(card), | ||
| 253 | "Spurious creg interrupt!\n"); | ||
| 254 | return; | ||
| 255 | } | ||
| 256 | |||
| 257 | card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT); | ||
| 258 | cmd->status = card->creg_ctrl.creg_stats.stat; | ||
| 259 | if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) { | ||
| 260 | dev_err(CARD_TO_DEV(card), | ||
| 261 | "Invalid status on creg command\n"); | ||
| 262 | /* | ||
| 263 | * At this point we're probably reading garbage from HW. Don't | ||
| 264 | * do anything else that could mess up the system and let | ||
| 265 | * the sync function return an error. | ||
| 266 | */ | ||
| 267 | st = -EIO; | ||
| 268 | goto creg_done; | ||
| 269 | } else if (cmd->status & CREG_STAT_ERROR) { | ||
| 270 | st = -EIO; | ||
| 271 | } | ||
| 272 | |||
| 273 | if ((cmd->op == CREG_OP_READ)) { | ||
| 274 | unsigned int cnt8 = ioread32(card->regmap + CREG_CNT); | ||
| 275 | |||
| 276 | /* Paranoid Sanity Checks */ | ||
| 277 | if (!cmd->buf) { | ||
| 278 | dev_err(CARD_TO_DEV(card), | ||
| 279 | "Buffer not given for read.\n"); | ||
| 280 | st = -EIO; | ||
| 281 | goto creg_done; | ||
| 282 | } | ||
| 283 | if (cnt8 != cmd->cnt8) { | ||
| 284 | dev_err(CARD_TO_DEV(card), | ||
| 285 | "count mismatch\n"); | ||
| 286 | st = -EIO; | ||
| 287 | goto creg_done; | ||
| 288 | } | ||
| 289 | |||
| 290 | copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); | ||
| 291 | } | ||
| 292 | |||
| 293 | creg_done: | ||
| 294 | if (cmd->cb) | ||
| 295 | cmd->cb(card, cmd, st); | ||
| 296 | |||
| 297 | kmem_cache_free(creg_cmd_pool, cmd); | ||
| 298 | |||
| 299 | spin_lock(&card->creg_ctrl.lock); | ||
| 300 | card->creg_ctrl.active = 0; | ||
| 301 | creg_kick_queue(card); | ||
| 302 | spin_unlock(&card->creg_ctrl.lock); | ||
| 303 | } | ||
| 304 | |||
| 305 | static void creg_reset(struct rsxx_cardinfo *card) | ||
| 306 | { | ||
| 307 | struct creg_cmd *cmd = NULL; | ||
| 308 | struct creg_cmd *tmp; | ||
| 309 | unsigned long flags; | ||
| 310 | |||
| 311 | /* | ||
| 312 | * mutex_trylock is used here because if reset_lock is taken then a | ||
| 313 | * reset is already happening. So, we can just go ahead and return. | ||
| 314 | */ | ||
| 315 | if (!mutex_trylock(&card->creg_ctrl.reset_lock)) | ||
| 316 | return; | ||
| 317 | |||
| 318 | card->creg_ctrl.reset = 1; | ||
| 319 | spin_lock_irqsave(&card->irq_lock, flags); | ||
| 320 | rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT); | ||
| 321 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
| 322 | |||
| 323 | dev_warn(CARD_TO_DEV(card), | ||
| 324 | "Resetting creg interface for recovery\n"); | ||
| 325 | |||
| 326 | /* Cancel outstanding commands */ | ||
| 327 | spin_lock(&card->creg_ctrl.lock); | ||
| 328 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { | ||
| 329 | list_del(&cmd->list); | ||
| 330 | card->creg_ctrl.q_depth--; | ||
| 331 | if (cmd->cb) | ||
| 332 | cmd->cb(card, cmd, -ECANCELED); | ||
| 333 | kmem_cache_free(creg_cmd_pool, cmd); | ||
| 334 | } | ||
| 335 | |||
| 336 | cmd = card->creg_ctrl.active_cmd; | ||
| 337 | card->creg_ctrl.active_cmd = NULL; | ||
| 338 | if (cmd) { | ||
| 339 | if (timer_pending(&card->creg_ctrl.cmd_timer)) | ||
| 340 | del_timer_sync(&card->creg_ctrl.cmd_timer); | ||
| 341 | |||
| 342 | if (cmd->cb) | ||
| 343 | cmd->cb(card, cmd, -ECANCELED); | ||
| 344 | kmem_cache_free(creg_cmd_pool, cmd); | ||
| 345 | |||
| 346 | card->creg_ctrl.active = 0; | ||
| 347 | } | ||
| 348 | spin_unlock(&card->creg_ctrl.lock); | ||
| 349 | |||
| 350 | card->creg_ctrl.reset = 0; | ||
| 351 | spin_lock_irqsave(&card->irq_lock, flags); | ||
| 352 | rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT); | ||
| 353 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
| 354 | |||
| 355 | mutex_unlock(&card->creg_ctrl.reset_lock); | ||
| 356 | } | ||
| 357 | |||
| 358 | /* Used for synchronous accesses */ | ||
| 359 | struct creg_completion { | ||
| 360 | struct completion *cmd_done; | ||
| 361 | int st; | ||
| 362 | u32 creg_status; | ||
| 363 | }; | ||
| 364 | |||
| 365 | static void creg_cmd_done_cb(struct rsxx_cardinfo *card, | ||
| 366 | struct creg_cmd *cmd, | ||
| 367 | int st) | ||
| 368 | { | ||
| 369 | struct creg_completion *cmd_completion; | ||
| 370 | |||
| 371 | cmd_completion = cmd->cb_private; | ||
| 372 | BUG_ON(!cmd_completion); | ||
| 373 | |||
| 374 | cmd_completion->st = st; | ||
| 375 | cmd_completion->creg_status = cmd->status; | ||
| 376 | complete(cmd_completion->cmd_done); | ||
| 377 | } | ||
| 378 | |||
| 379 | static int __issue_creg_rw(struct rsxx_cardinfo *card, | ||
| 380 | unsigned int op, | ||
| 381 | unsigned int addr, | ||
| 382 | unsigned int cnt8, | ||
| 383 | void *buf, | ||
| 384 | int stream, | ||
| 385 | unsigned int *hw_stat) | ||
| 386 | { | ||
| 387 | DECLARE_COMPLETION_ONSTACK(cmd_done); | ||
| 388 | struct creg_completion completion; | ||
| 389 | unsigned long timeout; | ||
| 390 | int st; | ||
| 391 | |||
| 392 | completion.cmd_done = &cmd_done; | ||
| 393 | completion.st = 0; | ||
| 394 | completion.creg_status = 0; | ||
| 395 | |||
| 396 | st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb, | ||
| 397 | &completion); | ||
| 398 | if (st) | ||
| 399 | return st; | ||
| 400 | |||
| 401 | /* | ||
| 402 | * This timeout is neccessary for unresponsive hardware. The additional | ||
| 403 | * 20 seconds to used to guarantee that each cregs requests has time to | ||
| 404 | * complete. | ||
| 405 | */ | ||
| 406 | timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC * | ||
| 407 | card->creg_ctrl.q_depth) + 20000); | ||
| 408 | |||
| 409 | /* | ||
| 410 | * The creg interface is guaranteed to complete. It has a timeout | ||
| 411 | * mechanism that will kick in if hardware does not respond. | ||
| 412 | */ | ||
| 413 | st = wait_for_completion_timeout(completion.cmd_done, timeout); | ||
| 414 | if (st == 0) { | ||
| 415 | /* | ||
| 416 | * This is really bad, because the kernel timer did not | ||
| 417 | * expire and notify us of a timeout! | ||
| 418 | */ | ||
| 419 | dev_crit(CARD_TO_DEV(card), | ||
| 420 | "cregs timer failed\n"); | ||
| 421 | creg_reset(card); | ||
| 422 | return -EIO; | ||
| 423 | } | ||
| 424 | |||
| 425 | *hw_stat = completion.creg_status; | ||
| 426 | |||
| 427 | if (completion.st) { | ||
| 428 | dev_warn(CARD_TO_DEV(card), | ||
| 429 | "creg command failed(%d x%08x)\n", | ||
| 430 | completion.st, addr); | ||
| 431 | return completion.st; | ||
| 432 | } | ||
| 433 | |||
| 434 | return 0; | ||
| 435 | } | ||
| 436 | |||
| 437 | static int issue_creg_rw(struct rsxx_cardinfo *card, | ||
| 438 | u32 addr, | ||
| 439 | unsigned int size8, | ||
| 440 | void *data, | ||
| 441 | int stream, | ||
| 442 | int read) | ||
| 443 | { | ||
| 444 | unsigned int hw_stat; | ||
| 445 | unsigned int xfer; | ||
| 446 | unsigned int op; | ||
| 447 | int st; | ||
| 448 | |||
| 449 | op = read ? CREG_OP_READ : CREG_OP_WRITE; | ||
| 450 | |||
| 451 | do { | ||
| 452 | xfer = min_t(unsigned int, size8, MAX_CREG_DATA8); | ||
| 453 | |||
| 454 | st = __issue_creg_rw(card, op, addr, xfer, | ||
| 455 | data, stream, &hw_stat); | ||
| 456 | if (st) | ||
| 457 | return st; | ||
| 458 | |||
| 459 | data = (char *)data + xfer; | ||
| 460 | addr += xfer; | ||
| 461 | size8 -= xfer; | ||
| 462 | } while (size8); | ||
| 463 | |||
| 464 | return 0; | ||
| 465 | } | ||
| 466 | |||
| 467 | /* ---------------------------- Public API ---------------------------------- */ | ||
| 468 | int rsxx_creg_write(struct rsxx_cardinfo *card, | ||
| 469 | u32 addr, | ||
| 470 | unsigned int size8, | ||
| 471 | void *data, | ||
| 472 | int byte_stream) | ||
| 473 | { | ||
| 474 | return issue_creg_rw(card, addr, size8, data, byte_stream, 0); | ||
| 475 | } | ||
| 476 | |||
| 477 | int rsxx_creg_read(struct rsxx_cardinfo *card, | ||
| 478 | u32 addr, | ||
| 479 | unsigned int size8, | ||
| 480 | void *data, | ||
| 481 | int byte_stream) | ||
| 482 | { | ||
| 483 | return issue_creg_rw(card, addr, size8, data, byte_stream, 1); | ||
| 484 | } | ||
| 485 | |||
| 486 | int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state) | ||
| 487 | { | ||
| 488 | return rsxx_creg_read(card, CREG_ADD_CARD_STATE, | ||
| 489 | sizeof(*state), state, 0); | ||
| 490 | } | ||
| 491 | |||
| 492 | int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8) | ||
| 493 | { | ||
| 494 | unsigned int size; | ||
| 495 | int st; | ||
| 496 | |||
| 497 | st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE, | ||
| 498 | sizeof(size), &size, 0); | ||
| 499 | if (st) | ||
| 500 | return st; | ||
| 501 | |||
| 502 | *size8 = (u64)size * RSXX_HW_BLK_SIZE; | ||
| 503 | return 0; | ||
| 504 | } | ||
| 505 | |||
| 506 | int rsxx_get_num_targets(struct rsxx_cardinfo *card, | ||
| 507 | unsigned int *n_targets) | ||
| 508 | { | ||
| 509 | return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS, | ||
| 510 | sizeof(*n_targets), n_targets, 0); | ||
| 511 | } | ||
| 512 | |||
| 513 | int rsxx_get_card_capabilities(struct rsxx_cardinfo *card, | ||
| 514 | u32 *capabilities) | ||
| 515 | { | ||
| 516 | return rsxx_creg_read(card, CREG_ADD_CAPABILITIES, | ||
| 517 | sizeof(*capabilities), capabilities, 0); | ||
| 518 | } | ||
| 519 | |||
| 520 | int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd) | ||
| 521 | { | ||
| 522 | return rsxx_creg_write(card, CREG_ADD_CARD_CMD, | ||
| 523 | sizeof(cmd), &cmd, 0); | ||
| 524 | } | ||
| 525 | |||
| 526 | |||
| 527 | /*----------------- HW Log Functions -------------------*/ | ||
| 528 | static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len) | ||
| 529 | { | ||
| 530 | static char level; | ||
| 531 | |||
| 532 | /* | ||
| 533 | * New messages start with "<#>", where # is the log level. Messages | ||
| 534 | * that extend past the log buffer will use the previous level | ||
| 535 | */ | ||
| 536 | if ((len > 3) && (str[0] == '<') && (str[2] == '>')) { | ||
| 537 | level = str[1]; | ||
| 538 | str += 3; /* Skip past the log level. */ | ||
| 539 | len -= 3; | ||
| 540 | } | ||
| 541 | |||
| 542 | switch (level) { | ||
| 543 | case '0': | ||
| 544 | dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 545 | break; | ||
| 546 | case '1': | ||
| 547 | dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 548 | break; | ||
| 549 | case '2': | ||
| 550 | dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 551 | break; | ||
| 552 | case '3': | ||
| 553 | dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 554 | break; | ||
| 555 | case '4': | ||
| 556 | dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 557 | break; | ||
| 558 | case '5': | ||
| 559 | dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 560 | break; | ||
| 561 | case '6': | ||
| 562 | dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 563 | break; | ||
| 564 | case '7': | ||
| 565 | dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 566 | break; | ||
| 567 | default: | ||
| 568 | dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str); | ||
| 569 | break; | ||
| 570 | } | ||
| 571 | } | ||
| 572 | |||
| 573 | /* | ||
| 574 | * The substrncpy function copies the src string (which includes the | ||
| 575 | * terminating '\0' character), up to the count into the dest pointer. | ||
| 576 | * Returns the number of bytes copied to dest. | ||
| 577 | */ | ||
| 578 | static int substrncpy(char *dest, const char *src, int count) | ||
| 579 | { | ||
| 580 | int max_cnt = count; | ||
| 581 | |||
| 582 | while (count) { | ||
| 583 | count--; | ||
| 584 | *dest = *src; | ||
| 585 | if (*dest == '\0') | ||
| 586 | break; | ||
| 587 | src++; | ||
| 588 | dest++; | ||
| 589 | } | ||
| 590 | return max_cnt - count; | ||
| 591 | } | ||
| 592 | |||
| 593 | |||
| 594 | static void read_hw_log_done(struct rsxx_cardinfo *card, | ||
| 595 | struct creg_cmd *cmd, | ||
| 596 | int st) | ||
| 597 | { | ||
| 598 | char *buf; | ||
| 599 | char *log_str; | ||
| 600 | int cnt; | ||
| 601 | int len; | ||
| 602 | int off; | ||
| 603 | |||
| 604 | buf = cmd->buf; | ||
| 605 | off = 0; | ||
| 606 | |||
| 607 | /* Failed getting the log message */ | ||
| 608 | if (st) | ||
| 609 | return; | ||
| 610 | |||
| 611 | while (off < cmd->cnt8) { | ||
| 612 | log_str = &card->log.buf[card->log.buf_len]; | ||
| 613 | cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len); | ||
| 614 | len = substrncpy(log_str, &buf[off], cnt); | ||
| 615 | |||
| 616 | off += len; | ||
| 617 | card->log.buf_len += len; | ||
| 618 | |||
| 619 | /* | ||
| 620 | * Flush the log if we've hit the end of a message or if we've | ||
| 621 | * run out of buffer space. | ||
| 622 | */ | ||
| 623 | if ((log_str[len - 1] == '\0') || | ||
| 624 | (card->log.buf_len == LOG_BUF_SIZE8)) { | ||
| 625 | if (card->log.buf_len != 1) /* Don't log blank lines. */ | ||
| 626 | hw_log_msg(card, card->log.buf, | ||
| 627 | card->log.buf_len); | ||
| 628 | card->log.buf_len = 0; | ||
| 629 | } | ||
| 630 | |||
| 631 | } | ||
| 632 | |||
| 633 | if (cmd->status & CREG_STAT_LOG_PENDING) | ||
| 634 | rsxx_read_hw_log(card); | ||
| 635 | } | ||
| 636 | |||
| 637 | int rsxx_read_hw_log(struct rsxx_cardinfo *card) | ||
| 638 | { | ||
| 639 | int st; | ||
| 640 | |||
| 641 | st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG, | ||
| 642 | sizeof(card->log.tmp), card->log.tmp, | ||
| 643 | 1, read_hw_log_done, NULL); | ||
| 644 | if (st) | ||
| 645 | dev_err(CARD_TO_DEV(card), | ||
| 646 | "Failed getting log text\n"); | ||
| 647 | |||
| 648 | return st; | ||
| 649 | } | ||
| 650 | |||
| 651 | /*-------------- IOCTL REG Access ------------------*/ | ||
| 652 | static int issue_reg_cmd(struct rsxx_cardinfo *card, | ||
| 653 | struct rsxx_reg_access *cmd, | ||
| 654 | int read) | ||
| 655 | { | ||
| 656 | unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE; | ||
| 657 | |||
| 658 | return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data, | ||
| 659 | cmd->stream, &cmd->stat); | ||
| 660 | } | ||
| 661 | |||
| 662 | int rsxx_reg_access(struct rsxx_cardinfo *card, | ||
| 663 | struct rsxx_reg_access __user *ucmd, | ||
| 664 | int read) | ||
| 665 | { | ||
| 666 | struct rsxx_reg_access cmd; | ||
| 667 | int st; | ||
| 668 | |||
| 669 | st = copy_from_user(&cmd, ucmd, sizeof(cmd)); | ||
| 670 | if (st) | ||
| 671 | return -EFAULT; | ||
| 672 | |||
| 673 | if (cmd.cnt > RSXX_MAX_REG_CNT) | ||
| 674 | return -EFAULT; | ||
| 675 | |||
| 676 | st = issue_reg_cmd(card, &cmd, read); | ||
| 677 | if (st) | ||
| 678 | return st; | ||
| 679 | |||
| 680 | st = put_user(cmd.stat, &ucmd->stat); | ||
| 681 | if (st) | ||
| 682 | return -EFAULT; | ||
| 683 | |||
| 684 | if (read) { | ||
| 685 | st = copy_to_user(ucmd->data, cmd.data, cmd.cnt); | ||
| 686 | if (st) | ||
| 687 | return -EFAULT; | ||
| 688 | } | ||
| 689 | |||
| 690 | return 0; | ||
| 691 | } | ||
| 692 | |||
| 693 | /*------------ Initialization & Setup --------------*/ | ||
| 694 | int rsxx_creg_setup(struct rsxx_cardinfo *card) | ||
| 695 | { | ||
| 696 | card->creg_ctrl.active_cmd = NULL; | ||
| 697 | |||
| 698 | INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done); | ||
| 699 | mutex_init(&card->creg_ctrl.reset_lock); | ||
| 700 | INIT_LIST_HEAD(&card->creg_ctrl.queue); | ||
| 701 | spin_lock_init(&card->creg_ctrl.lock); | ||
| 702 | setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, | ||
| 703 | (unsigned long) card); | ||
| 704 | |||
| 705 | return 0; | ||
| 706 | } | ||
| 707 | |||
| 708 | void rsxx_creg_destroy(struct rsxx_cardinfo *card) | ||
| 709 | { | ||
| 710 | struct creg_cmd *cmd; | ||
| 711 | struct creg_cmd *tmp; | ||
| 712 | int cnt = 0; | ||
| 713 | |||
| 714 | /* Cancel outstanding commands */ | ||
| 715 | spin_lock(&card->creg_ctrl.lock); | ||
| 716 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { | ||
| 717 | list_del(&cmd->list); | ||
| 718 | if (cmd->cb) | ||
| 719 | cmd->cb(card, cmd, -ECANCELED); | ||
| 720 | kmem_cache_free(creg_cmd_pool, cmd); | ||
| 721 | cnt++; | ||
| 722 | } | ||
| 723 | |||
| 724 | if (cnt) | ||
| 725 | dev_info(CARD_TO_DEV(card), | ||
| 726 | "Canceled %d queue creg commands\n", cnt); | ||
| 727 | |||
| 728 | cmd = card->creg_ctrl.active_cmd; | ||
| 729 | card->creg_ctrl.active_cmd = NULL; | ||
| 730 | if (cmd) { | ||
| 731 | if (timer_pending(&card->creg_ctrl.cmd_timer)) | ||
| 732 | del_timer_sync(&card->creg_ctrl.cmd_timer); | ||
| 733 | |||
| 734 | if (cmd->cb) | ||
| 735 | cmd->cb(card, cmd, -ECANCELED); | ||
| 736 | dev_info(CARD_TO_DEV(card), | ||
| 737 | "Canceled active creg command\n"); | ||
| 738 | kmem_cache_free(creg_cmd_pool, cmd); | ||
| 739 | } | ||
| 740 | spin_unlock(&card->creg_ctrl.lock); | ||
| 741 | |||
| 742 | cancel_work_sync(&card->creg_ctrl.done_work); | ||
| 743 | } | ||
| 744 | |||
| 745 | |||
| 746 | int rsxx_creg_init(void) | ||
| 747 | { | ||
| 748 | creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN); | ||
| 749 | if (!creg_cmd_pool) | ||
| 750 | return -ENOMEM; | ||
| 751 | |||
| 752 | return 0; | ||
| 753 | } | ||
| 754 | |||
| 755 | void rsxx_creg_cleanup(void) | ||
| 756 | { | ||
| 757 | kmem_cache_destroy(creg_cmd_pool); | ||
| 758 | } | ||
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c new file mode 100644 index 000000000000..4346d17d2949 --- /dev/null +++ b/drivers/block/rsxx/dev.c | |||
| @@ -0,0 +1,367 @@ | |||
| 1 | /* | ||
| 2 | * Filename: dev.c | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 7 | * | ||
| 8 | * (C) Copyright 2013 IBM Corporation | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation; either version 2 of the | ||
| 13 | * License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software Foundation, | ||
| 22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/interrupt.h> | ||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/pci.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | |||
| 31 | #include <linux/hdreg.h> | ||
| 32 | #include <linux/genhd.h> | ||
| 33 | #include <linux/blkdev.h> | ||
| 34 | #include <linux/bio.h> | ||
| 35 | |||
| 36 | #include <linux/fs.h> | ||
| 37 | |||
| 38 | #include "rsxx_priv.h" | ||
| 39 | |||
| 40 | static unsigned int blkdev_minors = 64; | ||
| 41 | module_param(blkdev_minors, uint, 0444); | ||
| 42 | MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)"); | ||
| 43 | |||
| 44 | /* | ||
| 45 | * For now I'm making this tweakable in case any applications hit this limit. | ||
| 46 | * If you see a "bio too big" error in the log you will need to raise this | ||
| 47 | * value. | ||
| 48 | */ | ||
| 49 | static unsigned int blkdev_max_hw_sectors = 1024; | ||
| 50 | module_param(blkdev_max_hw_sectors, uint, 0444); | ||
| 51 | MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO"); | ||
| 52 | |||
| 53 | static unsigned int enable_blkdev = 1; | ||
| 54 | module_param(enable_blkdev , uint, 0444); | ||
| 55 | MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces"); | ||
| 56 | |||
| 57 | |||
| 58 | struct rsxx_bio_meta { | ||
| 59 | struct bio *bio; | ||
| 60 | atomic_t pending_dmas; | ||
| 61 | atomic_t error; | ||
| 62 | unsigned long start_time; | ||
| 63 | }; | ||
| 64 | |||
| 65 | static struct kmem_cache *bio_meta_pool; | ||
| 66 | |||
| 67 | /*----------------- Block Device Operations -----------------*/ | ||
| 68 | static int rsxx_blkdev_ioctl(struct block_device *bdev, | ||
| 69 | fmode_t mode, | ||
| 70 | unsigned int cmd, | ||
| 71 | unsigned long arg) | ||
| 72 | { | ||
| 73 | struct rsxx_cardinfo *card = bdev->bd_disk->private_data; | ||
| 74 | |||
| 75 | switch (cmd) { | ||
| 76 | case RSXX_GETREG: | ||
| 77 | return rsxx_reg_access(card, (void __user *)arg, 1); | ||
| 78 | case RSXX_SETREG: | ||
| 79 | return rsxx_reg_access(card, (void __user *)arg, 0); | ||
| 80 | } | ||
| 81 | |||
| 82 | return -ENOTTY; | ||
| 83 | } | ||
| 84 | |||
| 85 | static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo) | ||
| 86 | { | ||
| 87 | struct rsxx_cardinfo *card = bdev->bd_disk->private_data; | ||
| 88 | u64 blocks = card->size8 >> 9; | ||
| 89 | |||
| 90 | /* | ||
| 91 | * get geometry: Fake it. I haven't found any drivers that set | ||
| 92 | * geo->start, so we won't either. | ||
| 93 | */ | ||
| 94 | if (card->size8) { | ||
| 95 | geo->heads = 64; | ||
| 96 | geo->sectors = 16; | ||
| 97 | do_div(blocks, (geo->heads * geo->sectors)); | ||
| 98 | geo->cylinders = blocks; | ||
| 99 | } else { | ||
| 100 | geo->heads = 0; | ||
| 101 | geo->sectors = 0; | ||
| 102 | geo->cylinders = 0; | ||
| 103 | } | ||
| 104 | return 0; | ||
| 105 | } | ||
| 106 | |||
| 107 | static const struct block_device_operations rsxx_fops = { | ||
| 108 | .owner = THIS_MODULE, | ||
| 109 | .getgeo = rsxx_getgeo, | ||
| 110 | .ioctl = rsxx_blkdev_ioctl, | ||
| 111 | }; | ||
| 112 | |||
| 113 | static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) | ||
| 114 | { | ||
| 115 | struct hd_struct *part0 = &card->gendisk->part0; | ||
| 116 | int rw = bio_data_dir(bio); | ||
| 117 | int cpu; | ||
| 118 | |||
| 119 | cpu = part_stat_lock(); | ||
| 120 | |||
| 121 | part_round_stats(cpu, part0); | ||
| 122 | part_inc_in_flight(part0, rw); | ||
| 123 | |||
| 124 | part_stat_unlock(); | ||
| 125 | } | ||
| 126 | |||
| 127 | static void disk_stats_complete(struct rsxx_cardinfo *card, | ||
| 128 | struct bio *bio, | ||
| 129 | unsigned long start_time) | ||
| 130 | { | ||
| 131 | struct hd_struct *part0 = &card->gendisk->part0; | ||
| 132 | unsigned long duration = jiffies - start_time; | ||
| 133 | int rw = bio_data_dir(bio); | ||
| 134 | int cpu; | ||
| 135 | |||
| 136 | cpu = part_stat_lock(); | ||
| 137 | |||
| 138 | part_stat_add(cpu, part0, sectors[rw], bio_sectors(bio)); | ||
| 139 | part_stat_inc(cpu, part0, ios[rw]); | ||
| 140 | part_stat_add(cpu, part0, ticks[rw], duration); | ||
| 141 | |||
| 142 | part_round_stats(cpu, part0); | ||
| 143 | part_dec_in_flight(part0, rw); | ||
| 144 | |||
| 145 | part_stat_unlock(); | ||
| 146 | } | ||
| 147 | |||
| 148 | static void bio_dma_done_cb(struct rsxx_cardinfo *card, | ||
| 149 | void *cb_data, | ||
| 150 | unsigned int error) | ||
| 151 | { | ||
| 152 | struct rsxx_bio_meta *meta = cb_data; | ||
| 153 | |||
| 154 | if (error) | ||
| 155 | atomic_set(&meta->error, 1); | ||
| 156 | |||
| 157 | if (atomic_dec_and_test(&meta->pending_dmas)) { | ||
| 158 | disk_stats_complete(card, meta->bio, meta->start_time); | ||
| 159 | |||
| 160 | bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); | ||
| 161 | kmem_cache_free(bio_meta_pool, meta); | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | static void rsxx_make_request(struct request_queue *q, struct bio *bio) | ||
| 166 | { | ||
| 167 | struct rsxx_cardinfo *card = q->queuedata; | ||
| 168 | struct rsxx_bio_meta *bio_meta; | ||
| 169 | int st = -EINVAL; | ||
| 170 | |||
| 171 | might_sleep(); | ||
| 172 | |||
| 173 | if (unlikely(card->halt)) { | ||
| 174 | st = -EFAULT; | ||
| 175 | goto req_err; | ||
| 176 | } | ||
| 177 | |||
| 178 | if (unlikely(card->dma_fault)) { | ||
| 179 | st = (-EFAULT); | ||
| 180 | goto req_err; | ||
| 181 | } | ||
| 182 | |||
| 183 | if (bio->bi_size == 0) { | ||
| 184 | dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); | ||
| 185 | goto req_err; | ||
| 186 | } | ||
| 187 | |||
| 188 | bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL); | ||
| 189 | if (!bio_meta) { | ||
| 190 | st = -ENOMEM; | ||
| 191 | goto req_err; | ||
| 192 | } | ||
| 193 | |||
| 194 | bio_meta->bio = bio; | ||
| 195 | atomic_set(&bio_meta->error, 0); | ||
| 196 | atomic_set(&bio_meta->pending_dmas, 0); | ||
| 197 | bio_meta->start_time = jiffies; | ||
| 198 | |||
| 199 | disk_stats_start(card, bio); | ||
| 200 | |||
| 201 | dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", | ||
| 202 | bio_data_dir(bio) ? 'W' : 'R', bio_meta, | ||
| 203 | (u64)bio->bi_sector << 9, bio->bi_size); | ||
| 204 | |||
| 205 | st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, | ||
| 206 | bio_dma_done_cb, bio_meta); | ||
| 207 | if (st) | ||
| 208 | goto queue_err; | ||
| 209 | |||
| 210 | return; | ||
| 211 | |||
| 212 | queue_err: | ||
| 213 | kmem_cache_free(bio_meta_pool, bio_meta); | ||
| 214 | req_err: | ||
| 215 | bio_endio(bio, st); | ||
| 216 | } | ||
| 217 | |||
| 218 | /*----------------- Device Setup -------------------*/ | ||
| 219 | static bool rsxx_discard_supported(struct rsxx_cardinfo *card) | ||
| 220 | { | ||
| 221 | unsigned char pci_rev; | ||
| 222 | |||
| 223 | pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev); | ||
| 224 | |||
| 225 | return (pci_rev >= RSXX_DISCARD_SUPPORT); | ||
| 226 | } | ||
| 227 | |||
| 228 | static unsigned short rsxx_get_logical_block_size( | ||
| 229 | struct rsxx_cardinfo *card) | ||
| 230 | { | ||
| 231 | u32 capabilities = 0; | ||
| 232 | int st; | ||
| 233 | |||
| 234 | st = rsxx_get_card_capabilities(card, &capabilities); | ||
| 235 | if (st) | ||
| 236 | dev_warn(CARD_TO_DEV(card), | ||
| 237 | "Failed reading card capabilities register\n"); | ||
| 238 | |||
| 239 | /* Earlier firmware did not have support for 512 byte accesses */ | ||
| 240 | if (capabilities & CARD_CAP_SUBPAGE_WRITES) | ||
| 241 | return 512; | ||
| 242 | else | ||
| 243 | return RSXX_HW_BLK_SIZE; | ||
| 244 | } | ||
| 245 | |||
| 246 | int rsxx_attach_dev(struct rsxx_cardinfo *card) | ||
| 247 | { | ||
| 248 | mutex_lock(&card->dev_lock); | ||
| 249 | |||
| 250 | /* The block device requires the stripe size from the config. */ | ||
| 251 | if (enable_blkdev) { | ||
| 252 | if (card->config_valid) | ||
| 253 | set_capacity(card->gendisk, card->size8 >> 9); | ||
| 254 | else | ||
| 255 | set_capacity(card->gendisk, 0); | ||
| 256 | add_disk(card->gendisk); | ||
| 257 | |||
| 258 | card->bdev_attached = 1; | ||
| 259 | } | ||
| 260 | |||
| 261 | mutex_unlock(&card->dev_lock); | ||
| 262 | |||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | |||
| 266 | void rsxx_detach_dev(struct rsxx_cardinfo *card) | ||
| 267 | { | ||
| 268 | mutex_lock(&card->dev_lock); | ||
| 269 | |||
| 270 | if (card->bdev_attached) { | ||
| 271 | del_gendisk(card->gendisk); | ||
| 272 | card->bdev_attached = 0; | ||
| 273 | } | ||
| 274 | |||
| 275 | mutex_unlock(&card->dev_lock); | ||
| 276 | } | ||
| 277 | |||
| 278 | int rsxx_setup_dev(struct rsxx_cardinfo *card) | ||
| 279 | { | ||
| 280 | unsigned short blk_size; | ||
| 281 | |||
| 282 | mutex_init(&card->dev_lock); | ||
| 283 | |||
| 284 | if (!enable_blkdev) | ||
| 285 | return 0; | ||
| 286 | |||
| 287 | card->major = register_blkdev(0, DRIVER_NAME); | ||
| 288 | if (card->major < 0) { | ||
| 289 | dev_err(CARD_TO_DEV(card), "Failed to get major number\n"); | ||
| 290 | return -ENOMEM; | ||
| 291 | } | ||
| 292 | |||
| 293 | card->queue = blk_alloc_queue(GFP_KERNEL); | ||
| 294 | if (!card->queue) { | ||
| 295 | dev_err(CARD_TO_DEV(card), "Failed queue alloc\n"); | ||
| 296 | unregister_blkdev(card->major, DRIVER_NAME); | ||
| 297 | return -ENOMEM; | ||
| 298 | } | ||
| 299 | |||
| 300 | card->gendisk = alloc_disk(blkdev_minors); | ||
| 301 | if (!card->gendisk) { | ||
| 302 | dev_err(CARD_TO_DEV(card), "Failed disk alloc\n"); | ||
| 303 | blk_cleanup_queue(card->queue); | ||
| 304 | unregister_blkdev(card->major, DRIVER_NAME); | ||
| 305 | return -ENOMEM; | ||
| 306 | } | ||
| 307 | |||
| 308 | blk_size = rsxx_get_logical_block_size(card); | ||
| 309 | |||
| 310 | blk_queue_make_request(card->queue, rsxx_make_request); | ||
| 311 | blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); | ||
| 312 | blk_queue_dma_alignment(card->queue, blk_size - 1); | ||
| 313 | blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); | ||
| 314 | blk_queue_logical_block_size(card->queue, blk_size); | ||
| 315 | blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); | ||
| 316 | |||
| 317 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); | ||
| 318 | if (rsxx_discard_supported(card)) { | ||
| 319 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); | ||
| 320 | blk_queue_max_discard_sectors(card->queue, | ||
| 321 | RSXX_HW_BLK_SIZE >> 9); | ||
| 322 | card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; | ||
| 323 | card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE; | ||
| 324 | card->queue->limits.discard_zeroes_data = 1; | ||
| 325 | } | ||
| 326 | |||
| 327 | card->queue->queuedata = card; | ||
| 328 | |||
| 329 | snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name), | ||
| 330 | "rsxx%d", card->disk_id); | ||
| 331 | card->gendisk->driverfs_dev = &card->dev->dev; | ||
| 332 | card->gendisk->major = card->major; | ||
| 333 | card->gendisk->first_minor = 0; | ||
| 334 | card->gendisk->fops = &rsxx_fops; | ||
| 335 | card->gendisk->private_data = card; | ||
| 336 | card->gendisk->queue = card->queue; | ||
| 337 | |||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 341 | void rsxx_destroy_dev(struct rsxx_cardinfo *card) | ||
| 342 | { | ||
| 343 | if (!enable_blkdev) | ||
| 344 | return; | ||
| 345 | |||
| 346 | put_disk(card->gendisk); | ||
| 347 | card->gendisk = NULL; | ||
| 348 | |||
| 349 | blk_cleanup_queue(card->queue); | ||
| 350 | unregister_blkdev(card->major, DRIVER_NAME); | ||
| 351 | } | ||
| 352 | |||
| 353 | int rsxx_dev_init(void) | ||
| 354 | { | ||
| 355 | bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN); | ||
| 356 | if (!bio_meta_pool) | ||
| 357 | return -ENOMEM; | ||
| 358 | |||
| 359 | return 0; | ||
| 360 | } | ||
| 361 | |||
| 362 | void rsxx_dev_cleanup(void) | ||
| 363 | { | ||
| 364 | kmem_cache_destroy(bio_meta_pool); | ||
| 365 | } | ||
| 366 | |||
| 367 | |||
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c new file mode 100644 index 000000000000..63176e67662f --- /dev/null +++ b/drivers/block/rsxx/dma.c | |||
| @@ -0,0 +1,998 @@ | |||
| 1 | /* | ||
| 2 | * Filename: dma.c | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 7 | * | ||
| 8 | * (C) Copyright 2013 IBM Corporation | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation; either version 2 of the | ||
| 13 | * License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software Foundation, | ||
| 22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/slab.h> | ||
| 26 | #include "rsxx_priv.h" | ||
| 27 | |||
| 28 | struct rsxx_dma { | ||
| 29 | struct list_head list; | ||
| 30 | u8 cmd; | ||
| 31 | unsigned int laddr; /* Logical address on the ramsan */ | ||
| 32 | struct { | ||
| 33 | u32 off; | ||
| 34 | u32 cnt; | ||
| 35 | } sub_page; | ||
| 36 | dma_addr_t dma_addr; | ||
| 37 | struct page *page; | ||
| 38 | unsigned int pg_off; /* Page Offset */ | ||
| 39 | rsxx_dma_cb cb; | ||
| 40 | void *cb_data; | ||
| 41 | }; | ||
| 42 | |||
| 43 | /* This timeout is used to detect a stalled DMA channel */ | ||
| 44 | #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000) | ||
| 45 | |||
| 46 | struct hw_status { | ||
| 47 | u8 status; | ||
| 48 | u8 tag; | ||
| 49 | __le16 count; | ||
| 50 | __le32 _rsvd2; | ||
| 51 | __le64 _rsvd3; | ||
| 52 | } __packed; | ||
| 53 | |||
| 54 | enum rsxx_dma_status { | ||
| 55 | DMA_SW_ERR = 0x1, | ||
| 56 | DMA_HW_FAULT = 0x2, | ||
| 57 | DMA_CANCELLED = 0x4, | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct hw_cmd { | ||
| 61 | u8 command; | ||
| 62 | u8 tag; | ||
| 63 | u8 _rsvd; | ||
| 64 | u8 sub_page; /* Bit[0:2]: 512byte offset */ | ||
| 65 | /* Bit[4:6]: 512byte count */ | ||
| 66 | __le32 device_addr; | ||
| 67 | __le64 host_addr; | ||
| 68 | } __packed; | ||
| 69 | |||
| 70 | enum rsxx_hw_cmd { | ||
| 71 | HW_CMD_BLK_DISCARD = 0x70, | ||
| 72 | HW_CMD_BLK_WRITE = 0x80, | ||
| 73 | HW_CMD_BLK_READ = 0xC0, | ||
| 74 | HW_CMD_BLK_RECON_READ = 0xE0, | ||
| 75 | }; | ||
| 76 | |||
| 77 | enum rsxx_hw_status { | ||
| 78 | HW_STATUS_CRC = 0x01, | ||
| 79 | HW_STATUS_HARD_ERR = 0x02, | ||
| 80 | HW_STATUS_SOFT_ERR = 0x04, | ||
| 81 | HW_STATUS_FAULT = 0x08, | ||
| 82 | }; | ||
| 83 | |||
| 84 | #define STATUS_BUFFER_SIZE8 4096 | ||
| 85 | #define COMMAND_BUFFER_SIZE8 4096 | ||
| 86 | |||
| 87 | static struct kmem_cache *rsxx_dma_pool; | ||
| 88 | |||
| 89 | struct dma_tracker { | ||
| 90 | int next_tag; | ||
| 91 | struct rsxx_dma *dma; | ||
| 92 | }; | ||
| 93 | |||
| 94 | #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \ | ||
| 95 | (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS)) | ||
| 96 | |||
| 97 | struct dma_tracker_list { | ||
| 98 | spinlock_t lock; | ||
| 99 | int head; | ||
| 100 | struct dma_tracker list[0]; | ||
| 101 | }; | ||
| 102 | |||
| 103 | |||
| 104 | /*----------------- Misc Utility Functions -------------------*/ | ||
| 105 | static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card) | ||
| 106 | { | ||
| 107 | unsigned long long tgt_addr8; | ||
| 108 | |||
| 109 | tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) & | ||
| 110 | card->_stripe.upper_mask) | | ||
| 111 | ((addr8) & card->_stripe.lower_mask); | ||
| 112 | do_div(tgt_addr8, RSXX_HW_BLK_SIZE); | ||
| 113 | return tgt_addr8; | ||
| 114 | } | ||
| 115 | |||
| 116 | static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) | ||
| 117 | { | ||
| 118 | unsigned int tgt; | ||
| 119 | |||
| 120 | tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask; | ||
| 121 | |||
| 122 | return tgt; | ||
| 123 | } | ||
| 124 | |||
| 125 | static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) | ||
| 126 | { | ||
| 127 | /* Reset all DMA Command/Status Queues */ | ||
| 128 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | ||
| 129 | } | ||
| 130 | |||
| 131 | static unsigned int get_dma_size(struct rsxx_dma *dma) | ||
| 132 | { | ||
| 133 | if (dma->sub_page.cnt) | ||
| 134 | return dma->sub_page.cnt << 9; | ||
| 135 | else | ||
| 136 | return RSXX_HW_BLK_SIZE; | ||
| 137 | } | ||
| 138 | |||
| 139 | |||
| 140 | /*----------------- DMA Tracker -------------------*/ | ||
| 141 | static void set_tracker_dma(struct dma_tracker_list *trackers, | ||
| 142 | int tag, | ||
| 143 | struct rsxx_dma *dma) | ||
| 144 | { | ||
| 145 | trackers->list[tag].dma = dma; | ||
| 146 | } | ||
| 147 | |||
| 148 | static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers, | ||
| 149 | int tag) | ||
| 150 | { | ||
| 151 | return trackers->list[tag].dma; | ||
| 152 | } | ||
| 153 | |||
| 154 | static int pop_tracker(struct dma_tracker_list *trackers) | ||
| 155 | { | ||
| 156 | int tag; | ||
| 157 | |||
| 158 | spin_lock(&trackers->lock); | ||
| 159 | tag = trackers->head; | ||
| 160 | if (tag != -1) { | ||
| 161 | trackers->head = trackers->list[tag].next_tag; | ||
| 162 | trackers->list[tag].next_tag = -1; | ||
| 163 | } | ||
| 164 | spin_unlock(&trackers->lock); | ||
| 165 | |||
| 166 | return tag; | ||
| 167 | } | ||
| 168 | |||
| 169 | static void push_tracker(struct dma_tracker_list *trackers, int tag) | ||
| 170 | { | ||
| 171 | spin_lock(&trackers->lock); | ||
| 172 | trackers->list[tag].next_tag = trackers->head; | ||
| 173 | trackers->head = tag; | ||
| 174 | trackers->list[tag].dma = NULL; | ||
| 175 | spin_unlock(&trackers->lock); | ||
| 176 | } | ||
| 177 | |||
| 178 | |||
| 179 | /*----------------- Interrupt Coalescing -------------*/ | ||
| 180 | /* | ||
| 181 | * Interrupt Coalescing Register Format: | ||
| 182 | * Interrupt Timer (64ns units) [15:0] | ||
| 183 | * Interrupt Count [24:16] | ||
| 184 | * Reserved [31:25] | ||
| 185 | */ | ||
| 186 | #define INTR_COAL_LATENCY_MASK (0x0000ffff) | ||
| 187 | |||
| 188 | #define INTR_COAL_COUNT_SHIFT 16 | ||
| 189 | #define INTR_COAL_COUNT_BITS 9 | ||
| 190 | #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \ | ||
| 191 | INTR_COAL_COUNT_SHIFT) | ||
| 192 | #define INTR_COAL_LATENCY_UNITS_NS 64 | ||
| 193 | |||
| 194 | |||
| 195 | static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency) | ||
| 196 | { | ||
| 197 | u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS; | ||
| 198 | |||
| 199 | if (mode == RSXX_INTR_COAL_DISABLED) | ||
| 200 | return 0; | ||
| 201 | |||
| 202 | return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) | | ||
| 203 | (latency_units & INTR_COAL_LATENCY_MASK); | ||
| 204 | |||
| 205 | } | ||
| 206 | |||
| 207 | static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | ||
| 208 | { | ||
| 209 | int i; | ||
| 210 | u32 q_depth = 0; | ||
| 211 | u32 intr_coal; | ||
| 212 | |||
| 213 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) | ||
| 214 | return; | ||
| 215 | |||
| 216 | for (i = 0; i < card->n_targets; i++) | ||
| 217 | q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); | ||
| 218 | |||
| 219 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | ||
| 220 | q_depth / 2, | ||
| 221 | card->config.data.intr_coal.latency); | ||
| 222 | iowrite32(intr_coal, card->regmap + INTR_COAL); | ||
| 223 | } | ||
| 224 | |||
| 225 | /*----------------- RSXX DMA Handling -------------------*/ | ||
| 226 | static void rsxx_complete_dma(struct rsxx_cardinfo *card, | ||
| 227 | struct rsxx_dma *dma, | ||
| 228 | unsigned int status) | ||
| 229 | { | ||
| 230 | if (status & DMA_SW_ERR) | ||
| 231 | printk_ratelimited(KERN_ERR | ||
| 232 | "SW Error in DMA(cmd x%02x, laddr x%08x)\n", | ||
| 233 | dma->cmd, dma->laddr); | ||
| 234 | if (status & DMA_HW_FAULT) | ||
| 235 | printk_ratelimited(KERN_ERR | ||
| 236 | "HW Fault in DMA(cmd x%02x, laddr x%08x)\n", | ||
| 237 | dma->cmd, dma->laddr); | ||
| 238 | if (status & DMA_CANCELLED) | ||
| 239 | printk_ratelimited(KERN_ERR | ||
| 240 | "DMA Cancelled(cmd x%02x, laddr x%08x)\n", | ||
| 241 | dma->cmd, dma->laddr); | ||
| 242 | |||
| 243 | if (dma->dma_addr) | ||
| 244 | pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), | ||
| 245 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
| 246 | PCI_DMA_TODEVICE : | ||
| 247 | PCI_DMA_FROMDEVICE); | ||
| 248 | |||
| 249 | if (dma->cb) | ||
| 250 | dma->cb(card, dma->cb_data, status ? 1 : 0); | ||
| 251 | |||
| 252 | kmem_cache_free(rsxx_dma_pool, dma); | ||
| 253 | } | ||
| 254 | |||
| 255 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, | ||
| 256 | struct rsxx_dma *dma) | ||
| 257 | { | ||
| 258 | /* | ||
| 259 | * Requeued DMAs go to the front of the queue so they are issued | ||
| 260 | * first. | ||
| 261 | */ | ||
| 262 | spin_lock(&ctrl->queue_lock); | ||
| 263 | list_add(&dma->list, &ctrl->queue); | ||
| 264 | spin_unlock(&ctrl->queue_lock); | ||
| 265 | } | ||
| 266 | |||
| 267 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | ||
| 268 | struct rsxx_dma *dma, | ||
| 269 | u8 hw_st) | ||
| 270 | { | ||
| 271 | unsigned int status = 0; | ||
| 272 | int requeue_cmd = 0; | ||
| 273 | |||
| 274 | dev_dbg(CARD_TO_DEV(ctrl->card), | ||
| 275 | "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n", | ||
| 276 | dma->cmd, dma->laddr, hw_st); | ||
| 277 | |||
| 278 | if (hw_st & HW_STATUS_CRC) | ||
| 279 | ctrl->stats.crc_errors++; | ||
| 280 | if (hw_st & HW_STATUS_HARD_ERR) | ||
| 281 | ctrl->stats.hard_errors++; | ||
| 282 | if (hw_st & HW_STATUS_SOFT_ERR) | ||
| 283 | ctrl->stats.soft_errors++; | ||
| 284 | |||
| 285 | switch (dma->cmd) { | ||
| 286 | case HW_CMD_BLK_READ: | ||
| 287 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | ||
| 288 | if (ctrl->card->scrub_hard) { | ||
| 289 | dma->cmd = HW_CMD_BLK_RECON_READ; | ||
| 290 | requeue_cmd = 1; | ||
| 291 | ctrl->stats.reads_retried++; | ||
| 292 | } else { | ||
| 293 | status |= DMA_HW_FAULT; | ||
| 294 | ctrl->stats.reads_failed++; | ||
| 295 | } | ||
| 296 | } else if (hw_st & HW_STATUS_FAULT) { | ||
| 297 | status |= DMA_HW_FAULT; | ||
| 298 | ctrl->stats.reads_failed++; | ||
| 299 | } | ||
| 300 | |||
| 301 | break; | ||
| 302 | case HW_CMD_BLK_RECON_READ: | ||
| 303 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | ||
| 304 | /* Data could not be reconstructed. */ | ||
| 305 | status |= DMA_HW_FAULT; | ||
| 306 | ctrl->stats.reads_failed++; | ||
| 307 | } | ||
| 308 | |||
| 309 | break; | ||
| 310 | case HW_CMD_BLK_WRITE: | ||
| 311 | status |= DMA_HW_FAULT; | ||
| 312 | ctrl->stats.writes_failed++; | ||
| 313 | |||
| 314 | break; | ||
| 315 | case HW_CMD_BLK_DISCARD: | ||
| 316 | status |= DMA_HW_FAULT; | ||
| 317 | ctrl->stats.discards_failed++; | ||
| 318 | |||
| 319 | break; | ||
| 320 | default: | ||
| 321 | dev_err(CARD_TO_DEV(ctrl->card), | ||
| 322 | "Unknown command in DMA!(cmd: x%02x " | ||
| 323 | "laddr x%08x st: x%02x\n", | ||
| 324 | dma->cmd, dma->laddr, hw_st); | ||
| 325 | status |= DMA_SW_ERR; | ||
| 326 | |||
| 327 | break; | ||
| 328 | } | ||
| 329 | |||
| 330 | if (requeue_cmd) | ||
| 331 | rsxx_requeue_dma(ctrl, dma); | ||
| 332 | else | ||
| 333 | rsxx_complete_dma(ctrl->card, dma, status); | ||
| 334 | } | ||
| 335 | |||
| 336 | static void dma_engine_stalled(unsigned long data) | ||
| 337 | { | ||
| 338 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | ||
| 339 | |||
| 340 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | ||
| 341 | return; | ||
| 342 | |||
| 343 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | ||
| 344 | /* | ||
| 345 | * The dma engine was stalled because the SW_CMD_IDX write | ||
| 346 | * was lost. Issue it again to recover. | ||
| 347 | */ | ||
| 348 | dev_warn(CARD_TO_DEV(ctrl->card), | ||
| 349 | "SW_CMD_IDX write was lost, re-writing...\n"); | ||
| 350 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
| 351 | mod_timer(&ctrl->activity_timer, | ||
| 352 | jiffies + DMA_ACTIVITY_TIMEOUT); | ||
| 353 | } else { | ||
| 354 | dev_warn(CARD_TO_DEV(ctrl->card), | ||
| 355 | "DMA channel %d has stalled, faulting interface.\n", | ||
| 356 | ctrl->id); | ||
| 357 | ctrl->card->dma_fault = 1; | ||
| 358 | } | ||
| 359 | } | ||
| 360 | |||
| 361 | static void rsxx_issue_dmas(struct work_struct *work) | ||
| 362 | { | ||
| 363 | struct rsxx_dma_ctrl *ctrl; | ||
| 364 | struct rsxx_dma *dma; | ||
| 365 | int tag; | ||
| 366 | int cmds_pending = 0; | ||
| 367 | struct hw_cmd *hw_cmd_buf; | ||
| 368 | |||
| 369 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | ||
| 370 | hw_cmd_buf = ctrl->cmd.buf; | ||
| 371 | |||
| 372 | if (unlikely(ctrl->card->halt)) | ||
| 373 | return; | ||
| 374 | |||
| 375 | while (1) { | ||
| 376 | spin_lock(&ctrl->queue_lock); | ||
| 377 | if (list_empty(&ctrl->queue)) { | ||
| 378 | spin_unlock(&ctrl->queue_lock); | ||
| 379 | break; | ||
| 380 | } | ||
| 381 | spin_unlock(&ctrl->queue_lock); | ||
| 382 | |||
| 383 | tag = pop_tracker(ctrl->trackers); | ||
| 384 | if (tag == -1) | ||
| 385 | break; | ||
| 386 | |||
| 387 | spin_lock(&ctrl->queue_lock); | ||
| 388 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); | ||
| 389 | list_del(&dma->list); | ||
| 390 | ctrl->stats.sw_q_depth--; | ||
| 391 | spin_unlock(&ctrl->queue_lock); | ||
| 392 | |||
| 393 | /* | ||
| 394 | * This will catch any DMAs that slipped in right before the | ||
| 395 | * fault, but was queued after all the other DMAs were | ||
| 396 | * cancelled. | ||
| 397 | */ | ||
| 398 | if (unlikely(ctrl->card->dma_fault)) { | ||
| 399 | push_tracker(ctrl->trackers, tag); | ||
| 400 | rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); | ||
| 401 | continue; | ||
| 402 | } | ||
| 403 | |||
| 404 | set_tracker_dma(ctrl->trackers, tag, dma); | ||
| 405 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; | ||
| 406 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; | ||
| 407 | hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0; | ||
| 408 | hw_cmd_buf[ctrl->cmd.idx].sub_page = | ||
| 409 | ((dma->sub_page.cnt & 0x7) << 4) | | ||
| 410 | (dma->sub_page.off & 0x7); | ||
| 411 | |||
| 412 | hw_cmd_buf[ctrl->cmd.idx].device_addr = | ||
| 413 | cpu_to_le32(dma->laddr); | ||
| 414 | |||
| 415 | hw_cmd_buf[ctrl->cmd.idx].host_addr = | ||
| 416 | cpu_to_le64(dma->dma_addr); | ||
| 417 | |||
| 418 | dev_dbg(CARD_TO_DEV(ctrl->card), | ||
| 419 | "Issue DMA%d(laddr %d tag %d) to idx %d\n", | ||
| 420 | ctrl->id, dma->laddr, tag, ctrl->cmd.idx); | ||
| 421 | |||
| 422 | ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK; | ||
| 423 | cmds_pending++; | ||
| 424 | |||
| 425 | if (dma->cmd == HW_CMD_BLK_WRITE) | ||
| 426 | ctrl->stats.writes_issued++; | ||
| 427 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | ||
| 428 | ctrl->stats.discards_issued++; | ||
| 429 | else | ||
| 430 | ctrl->stats.reads_issued++; | ||
| 431 | } | ||
| 432 | |||
| 433 | /* Let HW know we've queued commands. */ | ||
| 434 | if (cmds_pending) { | ||
| 435 | /* | ||
| 436 | * We must guarantee that the CPU writes to 'ctrl->cmd.buf' | ||
| 437 | * (which is in PCI-consistent system-memory) from the loop | ||
| 438 | * above make it into the coherency domain before the | ||
| 439 | * following PIO "trigger" updating the cmd.idx. A WMB is | ||
| 440 | * sufficient. We need not explicitly CPU cache-flush since | ||
| 441 | * the memory is a PCI-consistent (ie; coherent) mapping. | ||
| 442 | */ | ||
| 443 | wmb(); | ||
| 444 | |||
| 445 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); | ||
| 446 | mod_timer(&ctrl->activity_timer, | ||
| 447 | jiffies + DMA_ACTIVITY_TIMEOUT); | ||
| 448 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
| 449 | } | ||
| 450 | } | ||
| 451 | |||
| 452 | static void rsxx_dma_done(struct work_struct *work) | ||
| 453 | { | ||
| 454 | struct rsxx_dma_ctrl *ctrl; | ||
| 455 | struct rsxx_dma *dma; | ||
| 456 | unsigned long flags; | ||
| 457 | u16 count; | ||
| 458 | u8 status; | ||
| 459 | u8 tag; | ||
| 460 | struct hw_status *hw_st_buf; | ||
| 461 | |||
| 462 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | ||
| 463 | hw_st_buf = ctrl->status.buf; | ||
| 464 | |||
| 465 | if (unlikely(ctrl->card->halt) || | ||
| 466 | unlikely(ctrl->card->dma_fault)) | ||
| 467 | return; | ||
| 468 | |||
| 469 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | ||
| 470 | |||
| 471 | while (count == ctrl->e_cnt) { | ||
| 472 | /* | ||
| 473 | * The read memory-barrier is necessary to keep aggressive | ||
| 474 | * processors/optimizers (such as the PPC Apple G5) from | ||
| 475 | * reordering the following status-buffer tag & status read | ||
| 476 | * *before* the count read on subsequent iterations of the | ||
| 477 | * loop! | ||
| 478 | */ | ||
| 479 | rmb(); | ||
| 480 | |||
| 481 | status = hw_st_buf[ctrl->status.idx].status; | ||
| 482 | tag = hw_st_buf[ctrl->status.idx].tag; | ||
| 483 | |||
| 484 | dma = get_tracker_dma(ctrl->trackers, tag); | ||
| 485 | if (dma == NULL) { | ||
| 486 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | ||
| 487 | rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL); | ||
| 488 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | ||
| 489 | |||
| 490 | dev_err(CARD_TO_DEV(ctrl->card), | ||
| 491 | "No tracker for tag %d " | ||
| 492 | "(idx %d id %d)\n", | ||
| 493 | tag, ctrl->status.idx, ctrl->id); | ||
| 494 | return; | ||
| 495 | } | ||
| 496 | |||
| 497 | dev_dbg(CARD_TO_DEV(ctrl->card), | ||
| 498 | "Completing DMA%d" | ||
| 499 | "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n", | ||
| 500 | ctrl->id, dma->laddr, tag, status, count, | ||
| 501 | ctrl->status.idx); | ||
| 502 | |||
| 503 | atomic_dec(&ctrl->stats.hw_q_depth); | ||
| 504 | |||
| 505 | mod_timer(&ctrl->activity_timer, | ||
| 506 | jiffies + DMA_ACTIVITY_TIMEOUT); | ||
| 507 | |||
| 508 | if (status) | ||
| 509 | rsxx_handle_dma_error(ctrl, dma, status); | ||
| 510 | else | ||
| 511 | rsxx_complete_dma(ctrl->card, dma, 0); | ||
| 512 | |||
| 513 | push_tracker(ctrl->trackers, tag); | ||
| 514 | |||
| 515 | ctrl->status.idx = (ctrl->status.idx + 1) & | ||
| 516 | RSXX_CS_IDX_MASK; | ||
| 517 | ctrl->e_cnt++; | ||
| 518 | |||
| 519 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | ||
| 520 | } | ||
| 521 | |||
| 522 | dma_intr_coal_auto_tune(ctrl->card); | ||
| 523 | |||
| 524 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | ||
| 525 | del_timer_sync(&ctrl->activity_timer); | ||
| 526 | |||
| 527 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | ||
| 528 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); | ||
| 529 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | ||
| 530 | |||
| 531 | spin_lock(&ctrl->queue_lock); | ||
| 532 | if (ctrl->stats.sw_q_depth) | ||
| 533 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); | ||
| 534 | spin_unlock(&ctrl->queue_lock); | ||
| 535 | } | ||
| 536 | |||
| 537 | static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card, | ||
| 538 | struct list_head *q) | ||
| 539 | { | ||
| 540 | struct rsxx_dma *dma; | ||
| 541 | struct rsxx_dma *tmp; | ||
| 542 | int cnt = 0; | ||
| 543 | |||
| 544 | list_for_each_entry_safe(dma, tmp, q, list) { | ||
| 545 | list_del(&dma->list); | ||
| 546 | |||
| 547 | if (dma->dma_addr) | ||
| 548 | pci_unmap_page(card->dev, dma->dma_addr, | ||
| 549 | get_dma_size(dma), | ||
| 550 | (dma->cmd == HW_CMD_BLK_WRITE) ? | ||
| 551 | PCI_DMA_TODEVICE : | ||
| 552 | PCI_DMA_FROMDEVICE); | ||
| 553 | kmem_cache_free(rsxx_dma_pool, dma); | ||
| 554 | cnt++; | ||
| 555 | } | ||
| 556 | |||
| 557 | return cnt; | ||
| 558 | } | ||
| 559 | |||
| 560 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, | ||
| 561 | struct list_head *q, | ||
| 562 | unsigned int laddr, | ||
| 563 | rsxx_dma_cb cb, | ||
| 564 | void *cb_data) | ||
| 565 | { | ||
| 566 | struct rsxx_dma *dma; | ||
| 567 | |||
| 568 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | ||
| 569 | if (!dma) | ||
| 570 | return -ENOMEM; | ||
| 571 | |||
| 572 | dma->cmd = HW_CMD_BLK_DISCARD; | ||
| 573 | dma->laddr = laddr; | ||
| 574 | dma->dma_addr = 0; | ||
| 575 | dma->sub_page.off = 0; | ||
| 576 | dma->sub_page.cnt = 0; | ||
| 577 | dma->page = NULL; | ||
| 578 | dma->pg_off = 0; | ||
| 579 | dma->cb = cb; | ||
| 580 | dma->cb_data = cb_data; | ||
| 581 | |||
| 582 | dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr); | ||
| 583 | |||
| 584 | list_add_tail(&dma->list, q); | ||
| 585 | |||
| 586 | return 0; | ||
| 587 | } | ||
| 588 | |||
| 589 | static int rsxx_queue_dma(struct rsxx_cardinfo *card, | ||
| 590 | struct list_head *q, | ||
| 591 | int dir, | ||
| 592 | unsigned int dma_off, | ||
| 593 | unsigned int dma_len, | ||
| 594 | unsigned int laddr, | ||
| 595 | struct page *page, | ||
| 596 | unsigned int pg_off, | ||
| 597 | rsxx_dma_cb cb, | ||
| 598 | void *cb_data) | ||
| 599 | { | ||
| 600 | struct rsxx_dma *dma; | ||
| 601 | |||
| 602 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | ||
| 603 | if (!dma) | ||
| 604 | return -ENOMEM; | ||
| 605 | |||
| 606 | dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, | ||
| 607 | dir ? PCI_DMA_TODEVICE : | ||
| 608 | PCI_DMA_FROMDEVICE); | ||
| 609 | if (!dma->dma_addr) { | ||
| 610 | kmem_cache_free(rsxx_dma_pool, dma); | ||
| 611 | return -ENOMEM; | ||
| 612 | } | ||
| 613 | |||
| 614 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; | ||
| 615 | dma->laddr = laddr; | ||
| 616 | dma->sub_page.off = (dma_off >> 9); | ||
| 617 | dma->sub_page.cnt = (dma_len >> 9); | ||
| 618 | dma->page = page; | ||
| 619 | dma->pg_off = pg_off; | ||
| 620 | dma->cb = cb; | ||
| 621 | dma->cb_data = cb_data; | ||
| 622 | |||
| 623 | dev_dbg(CARD_TO_DEV(card), | ||
| 624 | "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n", | ||
| 625 | dir ? 'W' : 'R', dma->laddr, dma->sub_page.off, | ||
| 626 | dma->sub_page.cnt, dma->page, dma->pg_off); | ||
| 627 | |||
| 628 | /* Queue the DMA */ | ||
| 629 | list_add_tail(&dma->list, q); | ||
| 630 | |||
| 631 | return 0; | ||
| 632 | } | ||
| 633 | |||
| 634 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | ||
| 635 | struct bio *bio, | ||
| 636 | atomic_t *n_dmas, | ||
| 637 | rsxx_dma_cb cb, | ||
| 638 | void *cb_data) | ||
| 639 | { | ||
| 640 | struct list_head dma_list[RSXX_MAX_TARGETS]; | ||
| 641 | struct bio_vec *bvec; | ||
| 642 | unsigned long long addr8; | ||
| 643 | unsigned int laddr; | ||
| 644 | unsigned int bv_len; | ||
| 645 | unsigned int bv_off; | ||
| 646 | unsigned int dma_off; | ||
| 647 | unsigned int dma_len; | ||
| 648 | int dma_cnt[RSXX_MAX_TARGETS]; | ||
| 649 | int tgt; | ||
| 650 | int st; | ||
| 651 | int i; | ||
| 652 | |||
| 653 | addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ | ||
| 654 | atomic_set(n_dmas, 0); | ||
| 655 | |||
| 656 | for (i = 0; i < card->n_targets; i++) { | ||
| 657 | INIT_LIST_HEAD(&dma_list[i]); | ||
| 658 | dma_cnt[i] = 0; | ||
| 659 | } | ||
| 660 | |||
| 661 | if (bio->bi_rw & REQ_DISCARD) { | ||
| 662 | bv_len = bio->bi_size; | ||
| 663 | |||
| 664 | while (bv_len > 0) { | ||
| 665 | tgt = rsxx_get_dma_tgt(card, addr8); | ||
| 666 | laddr = rsxx_addr8_to_laddr(addr8, card); | ||
| 667 | |||
| 668 | st = rsxx_queue_discard(card, &dma_list[tgt], laddr, | ||
| 669 | cb, cb_data); | ||
| 670 | if (st) | ||
| 671 | goto bvec_err; | ||
| 672 | |||
| 673 | dma_cnt[tgt]++; | ||
| 674 | atomic_inc(n_dmas); | ||
| 675 | addr8 += RSXX_HW_BLK_SIZE; | ||
| 676 | bv_len -= RSXX_HW_BLK_SIZE; | ||
| 677 | } | ||
| 678 | } else { | ||
| 679 | bio_for_each_segment(bvec, bio, i) { | ||
| 680 | bv_len = bvec->bv_len; | ||
| 681 | bv_off = bvec->bv_offset; | ||
| 682 | |||
| 683 | while (bv_len > 0) { | ||
| 684 | tgt = rsxx_get_dma_tgt(card, addr8); | ||
| 685 | laddr = rsxx_addr8_to_laddr(addr8, card); | ||
| 686 | dma_off = addr8 & RSXX_HW_BLK_MASK; | ||
| 687 | dma_len = min(bv_len, | ||
| 688 | RSXX_HW_BLK_SIZE - dma_off); | ||
| 689 | |||
| 690 | st = rsxx_queue_dma(card, &dma_list[tgt], | ||
| 691 | bio_data_dir(bio), | ||
| 692 | dma_off, dma_len, | ||
| 693 | laddr, bvec->bv_page, | ||
| 694 | bv_off, cb, cb_data); | ||
| 695 | if (st) | ||
| 696 | goto bvec_err; | ||
| 697 | |||
| 698 | dma_cnt[tgt]++; | ||
| 699 | atomic_inc(n_dmas); | ||
| 700 | addr8 += dma_len; | ||
| 701 | bv_off += dma_len; | ||
| 702 | bv_len -= dma_len; | ||
| 703 | } | ||
| 704 | } | ||
| 705 | } | ||
| 706 | |||
| 707 | for (i = 0; i < card->n_targets; i++) { | ||
| 708 | if (!list_empty(&dma_list[i])) { | ||
| 709 | spin_lock(&card->ctrl[i].queue_lock); | ||
| 710 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; | ||
| 711 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); | ||
| 712 | spin_unlock(&card->ctrl[i].queue_lock); | ||
| 713 | |||
| 714 | queue_work(card->ctrl[i].issue_wq, | ||
| 715 | &card->ctrl[i].issue_dma_work); | ||
| 716 | } | ||
| 717 | } | ||
| 718 | |||
| 719 | return 0; | ||
| 720 | |||
| 721 | bvec_err: | ||
| 722 | for (i = 0; i < card->n_targets; i++) | ||
| 723 | rsxx_cleanup_dma_queue(card, &dma_list[i]); | ||
| 724 | |||
| 725 | return st; | ||
| 726 | } | ||
| 727 | |||
| 728 | |||
| 729 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | ||
| 730 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, | ||
| 731 | struct rsxx_dma_ctrl *ctrl) | ||
| 732 | { | ||
| 733 | int i; | ||
| 734 | |||
| 735 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | ||
| 736 | |||
| 737 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
| 738 | &ctrl->status.dma_addr); | ||
| 739 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
| 740 | &ctrl->cmd.dma_addr); | ||
| 741 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
| 742 | return -ENOMEM; | ||
| 743 | |||
| 744 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); | ||
| 745 | if (!ctrl->trackers) | ||
| 746 | return -ENOMEM; | ||
| 747 | |||
| 748 | ctrl->trackers->head = 0; | ||
| 749 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | ||
| 750 | ctrl->trackers->list[i].next_tag = i + 1; | ||
| 751 | ctrl->trackers->list[i].dma = NULL; | ||
| 752 | } | ||
| 753 | ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1; | ||
| 754 | spin_lock_init(&ctrl->trackers->lock); | ||
| 755 | |||
| 756 | spin_lock_init(&ctrl->queue_lock); | ||
| 757 | INIT_LIST_HEAD(&ctrl->queue); | ||
| 758 | |||
| 759 | setup_timer(&ctrl->activity_timer, dma_engine_stalled, | ||
| 760 | (unsigned long)ctrl); | ||
| 761 | |||
| 762 | ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0); | ||
| 763 | if (!ctrl->issue_wq) | ||
| 764 | return -ENOMEM; | ||
| 765 | |||
| 766 | ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0); | ||
| 767 | if (!ctrl->done_wq) | ||
| 768 | return -ENOMEM; | ||
| 769 | |||
| 770 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); | ||
| 771 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); | ||
| 772 | |||
| 773 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | ||
| 774 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | ||
| 775 | ctrl->regmap + SB_ADD_LO); | ||
| 776 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
| 777 | ctrl->regmap + SB_ADD_HI); | ||
| 778 | |||
| 779 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
| 780 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
| 781 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
| 782 | |||
| 783 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
| 784 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
| 785 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
| 786 | ctrl->status.idx); | ||
| 787 | return -EINVAL; | ||
| 788 | } | ||
| 789 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
| 790 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
| 791 | |||
| 792 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
| 793 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
| 794 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
| 795 | ctrl->status.idx); | ||
| 796 | return -EINVAL; | ||
| 797 | } | ||
| 798 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
| 799 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
| 800 | |||
| 801 | wmb(); | ||
| 802 | |||
| 803 | return 0; | ||
| 804 | } | ||
| 805 | |||
| 806 | static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, | ||
| 807 | unsigned int stripe_size8) | ||
| 808 | { | ||
| 809 | if (!is_power_of_2(stripe_size8)) { | ||
| 810 | dev_err(CARD_TO_DEV(card), | ||
| 811 | "stripe_size is NOT a power of 2!\n"); | ||
| 812 | return -EINVAL; | ||
| 813 | } | ||
| 814 | |||
| 815 | card->_stripe.lower_mask = stripe_size8 - 1; | ||
| 816 | |||
| 817 | card->_stripe.upper_mask = ~(card->_stripe.lower_mask); | ||
| 818 | card->_stripe.upper_shift = ffs(card->n_targets) - 1; | ||
| 819 | |||
| 820 | card->_stripe.target_mask = card->n_targets - 1; | ||
| 821 | card->_stripe.target_shift = ffs(stripe_size8) - 1; | ||
| 822 | |||
| 823 | dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n", | ||
| 824 | card->_stripe.lower_mask); | ||
| 825 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n", | ||
| 826 | card->_stripe.upper_shift); | ||
| 827 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n", | ||
| 828 | card->_stripe.upper_mask); | ||
| 829 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n", | ||
| 830 | card->_stripe.target_mask); | ||
| 831 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n", | ||
| 832 | card->_stripe.target_shift); | ||
| 833 | |||
| 834 | return 0; | ||
| 835 | } | ||
| 836 | |||
| 837 | static int rsxx_dma_configure(struct rsxx_cardinfo *card) | ||
| 838 | { | ||
| 839 | u32 intr_coal; | ||
| 840 | |||
| 841 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | ||
| 842 | card->config.data.intr_coal.count, | ||
| 843 | card->config.data.intr_coal.latency); | ||
| 844 | iowrite32(intr_coal, card->regmap + INTR_COAL); | ||
| 845 | |||
| 846 | return rsxx_dma_stripe_setup(card, card->config.data.stripe_size); | ||
| 847 | } | ||
| 848 | |||
| 849 | int rsxx_dma_setup(struct rsxx_cardinfo *card) | ||
| 850 | { | ||
| 851 | unsigned long flags; | ||
| 852 | int st; | ||
| 853 | int i; | ||
| 854 | |||
| 855 | dev_info(CARD_TO_DEV(card), | ||
| 856 | "Initializing %d DMA targets\n", | ||
| 857 | card->n_targets); | ||
| 858 | |||
| 859 | /* Regmap is divided up into 4K chunks. One for each DMA channel */ | ||
| 860 | for (i = 0; i < card->n_targets; i++) | ||
| 861 | card->ctrl[i].regmap = card->regmap + (i * 4096); | ||
| 862 | |||
| 863 | card->dma_fault = 0; | ||
| 864 | |||
| 865 | /* Reset the DMA queues */ | ||
| 866 | rsxx_dma_queue_reset(card); | ||
| 867 | |||
| 868 | /************* Setup DMA Control *************/ | ||
| 869 | for (i = 0; i < card->n_targets; i++) { | ||
| 870 | st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]); | ||
| 871 | if (st) | ||
| 872 | goto failed_dma_setup; | ||
| 873 | |||
| 874 | card->ctrl[i].card = card; | ||
| 875 | card->ctrl[i].id = i; | ||
| 876 | } | ||
| 877 | |||
| 878 | card->scrub_hard = 1; | ||
| 879 | |||
| 880 | if (card->config_valid) | ||
| 881 | rsxx_dma_configure(card); | ||
| 882 | |||
| 883 | /* Enable the interrupts after all setup has completed. */ | ||
| 884 | for (i = 0; i < card->n_targets; i++) { | ||
| 885 | spin_lock_irqsave(&card->irq_lock, flags); | ||
| 886 | rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i)); | ||
| 887 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
| 888 | } | ||
| 889 | |||
| 890 | return 0; | ||
| 891 | |||
| 892 | failed_dma_setup: | ||
| 893 | for (i = 0; i < card->n_targets; i++) { | ||
| 894 | struct rsxx_dma_ctrl *ctrl = &card->ctrl[i]; | ||
| 895 | |||
| 896 | if (ctrl->issue_wq) { | ||
| 897 | destroy_workqueue(ctrl->issue_wq); | ||
| 898 | ctrl->issue_wq = NULL; | ||
| 899 | } | ||
| 900 | |||
| 901 | if (ctrl->done_wq) { | ||
| 902 | destroy_workqueue(ctrl->done_wq); | ||
| 903 | ctrl->done_wq = NULL; | ||
| 904 | } | ||
| 905 | |||
| 906 | if (ctrl->trackers) | ||
| 907 | vfree(ctrl->trackers); | ||
| 908 | |||
| 909 | if (ctrl->status.buf) | ||
| 910 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | ||
| 911 | ctrl->status.buf, | ||
| 912 | ctrl->status.dma_addr); | ||
| 913 | if (ctrl->cmd.buf) | ||
| 914 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | ||
| 915 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | ||
| 916 | } | ||
| 917 | |||
| 918 | return st; | ||
| 919 | } | ||
| 920 | |||
| 921 | |||
| 922 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) | ||
| 923 | { | ||
| 924 | struct rsxx_dma_ctrl *ctrl; | ||
| 925 | struct rsxx_dma *dma; | ||
| 926 | int i, j; | ||
| 927 | int cnt = 0; | ||
| 928 | |||
| 929 | for (i = 0; i < card->n_targets; i++) { | ||
| 930 | ctrl = &card->ctrl[i]; | ||
| 931 | |||
| 932 | if (ctrl->issue_wq) { | ||
| 933 | destroy_workqueue(ctrl->issue_wq); | ||
| 934 | ctrl->issue_wq = NULL; | ||
| 935 | } | ||
| 936 | |||
| 937 | if (ctrl->done_wq) { | ||
| 938 | destroy_workqueue(ctrl->done_wq); | ||
| 939 | ctrl->done_wq = NULL; | ||
| 940 | } | ||
| 941 | |||
| 942 | if (timer_pending(&ctrl->activity_timer)) | ||
| 943 | del_timer_sync(&ctrl->activity_timer); | ||
| 944 | |||
| 945 | /* Clean up the DMA queue */ | ||
| 946 | spin_lock(&ctrl->queue_lock); | ||
| 947 | cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue); | ||
| 948 | spin_unlock(&ctrl->queue_lock); | ||
| 949 | |||
| 950 | if (cnt) | ||
| 951 | dev_info(CARD_TO_DEV(card), | ||
| 952 | "Freed %d queued DMAs on channel %d\n", | ||
| 953 | cnt, i); | ||
| 954 | |||
| 955 | /* Clean up issued DMAs */ | ||
| 956 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | ||
| 957 | dma = get_tracker_dma(ctrl->trackers, j); | ||
| 958 | if (dma) { | ||
| 959 | pci_unmap_page(card->dev, dma->dma_addr, | ||
| 960 | get_dma_size(dma), | ||
| 961 | (dma->cmd == HW_CMD_BLK_WRITE) ? | ||
| 962 | PCI_DMA_TODEVICE : | ||
| 963 | PCI_DMA_FROMDEVICE); | ||
| 964 | kmem_cache_free(rsxx_dma_pool, dma); | ||
| 965 | cnt++; | ||
| 966 | } | ||
| 967 | } | ||
| 968 | |||
| 969 | if (cnt) | ||
| 970 | dev_info(CARD_TO_DEV(card), | ||
| 971 | "Freed %d pending DMAs on channel %d\n", | ||
| 972 | cnt, i); | ||
| 973 | |||
| 974 | vfree(ctrl->trackers); | ||
| 975 | |||
| 976 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | ||
| 977 | ctrl->status.buf, ctrl->status.dma_addr); | ||
| 978 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | ||
| 979 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | ||
| 980 | } | ||
| 981 | } | ||
| 982 | |||
| 983 | |||
| 984 | int rsxx_dma_init(void) | ||
| 985 | { | ||
| 986 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); | ||
| 987 | if (!rsxx_dma_pool) | ||
| 988 | return -ENOMEM; | ||
| 989 | |||
| 990 | return 0; | ||
| 991 | } | ||
| 992 | |||
| 993 | |||
| 994 | void rsxx_dma_cleanup(void) | ||
| 995 | { | ||
| 996 | kmem_cache_destroy(rsxx_dma_pool); | ||
| 997 | } | ||
| 998 | |||
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h new file mode 100644 index 000000000000..2e50b65902b7 --- /dev/null +++ b/drivers/block/rsxx/rsxx.h | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | /* | ||
| 2 | * Filename: rsxx.h | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 7 | * | ||
| 8 | * (C) Copyright 2013 IBM Corporation | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation; either version 2 of the | ||
| 13 | * License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software Foundation, | ||
| 22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef __RSXX_H__ | ||
| 26 | #define __RSXX_H__ | ||
| 27 | |||
| 28 | /*----------------- IOCTL Definitions -------------------*/ | ||
| 29 | |||
| 30 | struct rsxx_reg_access { | ||
| 31 | __u32 addr; | ||
| 32 | __u32 cnt; | ||
| 33 | __u32 stat; | ||
| 34 | __u32 stream; | ||
| 35 | __u32 data[8]; | ||
| 36 | }; | ||
| 37 | |||
| 38 | #define RSXX_MAX_REG_CNT (8 * (sizeof(__u32))) | ||
| 39 | |||
| 40 | #define RSXX_IOC_MAGIC 'r' | ||
| 41 | |||
| 42 | #define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access) | ||
| 43 | #define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access) | ||
| 44 | |||
| 45 | #endif /* __RSXX_H_ */ | ||
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h new file mode 100644 index 000000000000..c025fe5fdb70 --- /dev/null +++ b/drivers/block/rsxx/rsxx_cfg.h | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | /* | ||
| 2 | * Filename: rsXX_cfg.h | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 7 | * | ||
| 8 | * (C) Copyright 2013 IBM Corporation | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation; either version 2 of the | ||
| 13 | * License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software Foundation, | ||
| 22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef __RSXX_CFG_H__ | ||
| 26 | #define __RSXX_CFG_H__ | ||
| 27 | |||
| 28 | /* NOTE: Config values will be saved in network byte order (i.e. Big endian) */ | ||
| 29 | #include <linux/types.h> | ||
| 30 | |||
| 31 | /* | ||
| 32 | * The card config version must match the driver's expected version. If it does | ||
| 33 | * not, the DMA interfaces will not be attached and the user will need to | ||
| 34 | * initialize/upgrade the card configuration using the card config utility. | ||
| 35 | */ | ||
| 36 | #define RSXX_CFG_VERSION 4 | ||
| 37 | |||
| 38 | struct card_cfg_hdr { | ||
| 39 | __u32 version; | ||
| 40 | __u32 crc; | ||
| 41 | }; | ||
| 42 | |||
| 43 | struct card_cfg_data { | ||
| 44 | __u32 block_size; | ||
| 45 | __u32 stripe_size; | ||
| 46 | __u32 vendor_id; | ||
| 47 | __u32 cache_order; | ||
| 48 | struct { | ||
| 49 | __u32 mode; /* Disabled, manual, auto-tune... */ | ||
| 50 | __u32 count; /* Number of intr to coalesce */ | ||
| 51 | __u32 latency;/* Max wait time (in ns) */ | ||
| 52 | } intr_coal; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct rsxx_card_cfg { | ||
| 56 | struct card_cfg_hdr hdr; | ||
| 57 | struct card_cfg_data data; | ||
| 58 | }; | ||
| 59 | |||
| 60 | /* Vendor ID Values */ | ||
| 61 | #define RSXX_VENDOR_ID_TMS_IBM 0 | ||
| 62 | #define RSXX_VENDOR_ID_DSI 1 | ||
| 63 | #define RSXX_VENDOR_COUNT 2 | ||
| 64 | |||
| 65 | /* Interrupt Coalescing Values */ | ||
| 66 | #define RSXX_INTR_COAL_DISABLED 0 | ||
| 67 | #define RSXX_INTR_COAL_EXPLICIT 1 | ||
| 68 | #define RSXX_INTR_COAL_AUTO_TUNE 2 | ||
| 69 | |||
| 70 | |||
| 71 | #endif /* __RSXX_CFG_H__ */ | ||
| 72 | |||
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h new file mode 100644 index 000000000000..a1ac907d8f4c --- /dev/null +++ b/drivers/block/rsxx/rsxx_priv.h | |||
| @@ -0,0 +1,399 @@ | |||
| 1 | /* | ||
| 2 | * Filename: rsxx_priv.h | ||
| 3 | * | ||
| 4 | * | ||
| 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | ||
| 6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
| 7 | * | ||
| 8 | * (C) Copyright 2013 IBM Corporation | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation; either version 2 of the | ||
| 13 | * License, or (at your option) any later version. | ||
| 14 | * | ||
| 15 | * This program is distributed in the hope that it will be useful, but | ||
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 18 | * General Public License for more details. | ||
| 19 | * | ||
| 20 | * You should have received a copy of the GNU General Public License | ||
| 21 | * along with this program; if not, write to the Free Software Foundation, | ||
| 22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 23 | */ | ||
| 24 | |||
| 25 | #ifndef __RSXX_PRIV_H__ | ||
| 26 | #define __RSXX_PRIV_H__ | ||
| 27 | |||
| 28 | #include <linux/version.h> | ||
| 29 | #include <linux/semaphore.h> | ||
| 30 | |||
| 31 | #include <linux/fs.h> | ||
| 32 | #include <linux/interrupt.h> | ||
| 33 | #include <linux/mutex.h> | ||
| 34 | #include <linux/pci.h> | ||
| 35 | #include <linux/spinlock.h> | ||
| 36 | #include <linux/sysfs.h> | ||
| 37 | #include <linux/workqueue.h> | ||
| 38 | #include <linux/bio.h> | ||
| 39 | #include <linux/vmalloc.h> | ||
| 40 | #include <linux/timer.h> | ||
| 41 | #include <linux/ioctl.h> | ||
| 42 | |||
| 43 | #include "rsxx.h" | ||
| 44 | #include "rsxx_cfg.h" | ||
| 45 | |||
| 46 | struct proc_cmd; | ||
| 47 | |||
| 48 | #define PCI_VENDOR_ID_TMS_IBM 0x15B6 | ||
| 49 | #define PCI_DEVICE_ID_RS70_FLASH 0x0019 | ||
| 50 | #define PCI_DEVICE_ID_RS70D_FLASH 0x001A | ||
| 51 | #define PCI_DEVICE_ID_RS80_FLASH 0x001C | ||
| 52 | #define PCI_DEVICE_ID_RS81_FLASH 0x001E | ||
| 53 | |||
| 54 | #define RS70_PCI_REV_SUPPORTED 4 | ||
| 55 | |||
| 56 | #define DRIVER_NAME "rsxx" | ||
| 57 | #define DRIVER_VERSION "3.7" | ||
| 58 | |||
| 59 | /* Block size is 4096 */ | ||
| 60 | #define RSXX_HW_BLK_SHIFT 12 | ||
| 61 | #define RSXX_HW_BLK_SIZE (1 << RSXX_HW_BLK_SHIFT) | ||
| 62 | #define RSXX_HW_BLK_MASK (RSXX_HW_BLK_SIZE - 1) | ||
| 63 | |||
| 64 | #define MAX_CREG_DATA8 32 | ||
| 65 | #define LOG_BUF_SIZE8 128 | ||
| 66 | |||
| 67 | #define RSXX_MAX_OUTSTANDING_CMDS 255 | ||
| 68 | #define RSXX_CS_IDX_MASK 0xff | ||
| 69 | |||
| 70 | #define RSXX_MAX_TARGETS 8 | ||
| 71 | |||
| 72 | struct dma_tracker_list; | ||
| 73 | |||
| 74 | /* DMA Command/Status Buffer structure */ | ||
| 75 | struct rsxx_cs_buffer { | ||
| 76 | dma_addr_t dma_addr; | ||
| 77 | void *buf; | ||
| 78 | u32 idx; | ||
| 79 | }; | ||
| 80 | |||
| 81 | struct rsxx_dma_stats { | ||
| 82 | u32 crc_errors; | ||
| 83 | u32 hard_errors; | ||
| 84 | u32 soft_errors; | ||
| 85 | u32 writes_issued; | ||
| 86 | u32 writes_failed; | ||
| 87 | u32 reads_issued; | ||
| 88 | u32 reads_failed; | ||
| 89 | u32 reads_retried; | ||
| 90 | u32 discards_issued; | ||
| 91 | u32 discards_failed; | ||
| 92 | u32 done_rescheduled; | ||
| 93 | u32 issue_rescheduled; | ||
| 94 | u32 sw_q_depth; /* Number of DMAs on the SW queue. */ | ||
| 95 | atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ | ||
| 96 | }; | ||
| 97 | |||
| 98 | struct rsxx_dma_ctrl { | ||
| 99 | struct rsxx_cardinfo *card; | ||
| 100 | int id; | ||
| 101 | void __iomem *regmap; | ||
| 102 | struct rsxx_cs_buffer status; | ||
| 103 | struct rsxx_cs_buffer cmd; | ||
| 104 | u16 e_cnt; | ||
| 105 | spinlock_t queue_lock; | ||
| 106 | struct list_head queue; | ||
| 107 | struct workqueue_struct *issue_wq; | ||
| 108 | struct work_struct issue_dma_work; | ||
| 109 | struct workqueue_struct *done_wq; | ||
| 110 | struct work_struct dma_done_work; | ||
| 111 | struct timer_list activity_timer; | ||
| 112 | struct dma_tracker_list *trackers; | ||
| 113 | struct rsxx_dma_stats stats; | ||
| 114 | }; | ||
| 115 | |||
| 116 | struct rsxx_cardinfo { | ||
| 117 | struct pci_dev *dev; | ||
| 118 | unsigned int halt; | ||
| 119 | |||
| 120 | void __iomem *regmap; | ||
| 121 | spinlock_t irq_lock; | ||
| 122 | unsigned int isr_mask; | ||
| 123 | unsigned int ier_mask; | ||
| 124 | |||
| 125 | struct rsxx_card_cfg config; | ||
| 126 | int config_valid; | ||
| 127 | |||
| 128 | /* Embedded CPU Communication */ | ||
| 129 | struct { | ||
| 130 | spinlock_t lock; | ||
| 131 | bool active; | ||
| 132 | struct creg_cmd *active_cmd; | ||
| 133 | struct work_struct done_work; | ||
| 134 | struct list_head queue; | ||
| 135 | unsigned int q_depth; | ||
| 136 | /* Cache the creg status to prevent ioreads */ | ||
| 137 | struct { | ||
| 138 | u32 stat; | ||
| 139 | u32 failed_cancel_timer; | ||
| 140 | u32 creg_timeout; | ||
| 141 | } creg_stats; | ||
| 142 | struct timer_list cmd_timer; | ||
| 143 | struct mutex reset_lock; | ||
| 144 | int reset; | ||
| 145 | } creg_ctrl; | ||
| 146 | |||
| 147 | struct { | ||
| 148 | char tmp[MAX_CREG_DATA8]; | ||
| 149 | char buf[LOG_BUF_SIZE8]; /* terminated */ | ||
| 150 | int buf_len; | ||
| 151 | } log; | ||
| 152 | |||
| 153 | struct work_struct event_work; | ||
| 154 | unsigned int state; | ||
| 155 | u64 size8; | ||
| 156 | |||
| 157 | /* Lock the device attach/detach function */ | ||
| 158 | struct mutex dev_lock; | ||
| 159 | |||
| 160 | /* Block Device Variables */ | ||
| 161 | bool bdev_attached; | ||
| 162 | int disk_id; | ||
| 163 | int major; | ||
| 164 | struct request_queue *queue; | ||
| 165 | struct gendisk *gendisk; | ||
| 166 | struct { | ||
| 167 | /* Used to convert a byte address to a device address. */ | ||
| 168 | u64 lower_mask; | ||
| 169 | u64 upper_shift; | ||
| 170 | u64 upper_mask; | ||
| 171 | u64 target_mask; | ||
| 172 | u64 target_shift; | ||
| 173 | } _stripe; | ||
| 174 | unsigned int dma_fault; | ||
| 175 | |||
| 176 | int scrub_hard; | ||
| 177 | |||
| 178 | int n_targets; | ||
| 179 | struct rsxx_dma_ctrl *ctrl; | ||
| 180 | }; | ||
| 181 | |||
| 182 | enum rsxx_pci_regmap { | ||
| 183 | HWID = 0x00, /* Hardware Identification Register */ | ||
| 184 | SCRATCH = 0x04, /* Scratch/Debug Register */ | ||
| 185 | RESET = 0x08, /* Reset Register */ | ||
| 186 | ISR = 0x10, /* Interrupt Status Register */ | ||
| 187 | IER = 0x14, /* Interrupt Enable Register */ | ||
| 188 | IPR = 0x18, /* Interrupt Poll Register */ | ||
| 189 | CB_ADD_LO = 0x20, /* Command Host Buffer Address [31:0] */ | ||
| 190 | CB_ADD_HI = 0x24, /* Command Host Buffer Address [63:32]*/ | ||
| 191 | HW_CMD_IDX = 0x28, /* Hardware Processed Command Index */ | ||
| 192 | SW_CMD_IDX = 0x2C, /* Software Processed Command Index */ | ||
| 193 | SB_ADD_LO = 0x30, /* Status Host Buffer Address [31:0] */ | ||
| 194 | SB_ADD_HI = 0x34, /* Status Host Buffer Address [63:32] */ | ||
| 195 | HW_STATUS_CNT = 0x38, /* Hardware Status Counter */ | ||
| 196 | SW_STATUS_CNT = 0x3C, /* Deprecated */ | ||
| 197 | CREG_CMD = 0x40, /* CPU Command Register */ | ||
| 198 | CREG_ADD = 0x44, /* CPU Address Register */ | ||
| 199 | CREG_CNT = 0x48, /* CPU Count Register */ | ||
| 200 | CREG_STAT = 0x4C, /* CPU Status Register */ | ||
| 201 | CREG_DATA0 = 0x50, /* CPU Data Registers */ | ||
| 202 | CREG_DATA1 = 0x54, | ||
| 203 | CREG_DATA2 = 0x58, | ||
| 204 | CREG_DATA3 = 0x5C, | ||
| 205 | CREG_DATA4 = 0x60, | ||
| 206 | CREG_DATA5 = 0x64, | ||
| 207 | CREG_DATA6 = 0x68, | ||
| 208 | CREG_DATA7 = 0x6c, | ||
| 209 | INTR_COAL = 0x70, /* Interrupt Coalescing Register */ | ||
| 210 | HW_ERROR = 0x74, /* Card Error Register */ | ||
| 211 | PCI_DEBUG0 = 0x78, /* PCI Debug Registers */ | ||
| 212 | PCI_DEBUG1 = 0x7C, | ||
| 213 | PCI_DEBUG2 = 0x80, | ||
| 214 | PCI_DEBUG3 = 0x84, | ||
| 215 | PCI_DEBUG4 = 0x88, | ||
| 216 | PCI_DEBUG5 = 0x8C, | ||
| 217 | PCI_DEBUG6 = 0x90, | ||
| 218 | PCI_DEBUG7 = 0x94, | ||
| 219 | PCI_POWER_THROTTLE = 0x98, | ||
| 220 | PERF_CTRL = 0x9c, | ||
| 221 | PERF_TIMER_LO = 0xa0, | ||
| 222 | PERF_TIMER_HI = 0xa4, | ||
| 223 | PERF_RD512_LO = 0xa8, | ||
| 224 | PERF_RD512_HI = 0xac, | ||
| 225 | PERF_WR512_LO = 0xb0, | ||
| 226 | PERF_WR512_HI = 0xb4, | ||
| 227 | }; | ||
| 228 | |||
| 229 | enum rsxx_intr { | ||
| 230 | CR_INTR_DMA0 = 0x00000001, | ||
| 231 | CR_INTR_CREG = 0x00000002, | ||
| 232 | CR_INTR_DMA1 = 0x00000004, | ||
| 233 | CR_INTR_EVENT = 0x00000008, | ||
| 234 | CR_INTR_DMA2 = 0x00000010, | ||
| 235 | CR_INTR_DMA3 = 0x00000020, | ||
| 236 | CR_INTR_DMA4 = 0x00000040, | ||
| 237 | CR_INTR_DMA5 = 0x00000080, | ||
| 238 | CR_INTR_DMA6 = 0x00000100, | ||
| 239 | CR_INTR_DMA7 = 0x00000200, | ||
| 240 | CR_INTR_DMA_ALL = 0x000003f5, | ||
| 241 | CR_INTR_ALL = 0xffffffff, | ||
| 242 | }; | ||
| 243 | |||
| 244 | static inline int CR_INTR_DMA(int N) | ||
| 245 | { | ||
| 246 | static const unsigned int _CR_INTR_DMA[] = { | ||
| 247 | CR_INTR_DMA0, CR_INTR_DMA1, CR_INTR_DMA2, CR_INTR_DMA3, | ||
| 248 | CR_INTR_DMA4, CR_INTR_DMA5, CR_INTR_DMA6, CR_INTR_DMA7 | ||
| 249 | }; | ||
| 250 | return _CR_INTR_DMA[N]; | ||
| 251 | } | ||
| 252 | enum rsxx_pci_reset { | ||
| 253 | DMA_QUEUE_RESET = 0x00000001, | ||
| 254 | }; | ||
| 255 | |||
| 256 | enum rsxx_pci_revision { | ||
| 257 | RSXX_DISCARD_SUPPORT = 2, | ||
| 258 | }; | ||
| 259 | |||
| 260 | enum rsxx_creg_cmd { | ||
| 261 | CREG_CMD_TAG_MASK = 0x0000FF00, | ||
| 262 | CREG_OP_WRITE = 0x000000C0, | ||
| 263 | CREG_OP_READ = 0x000000E0, | ||
| 264 | }; | ||
| 265 | |||
| 266 | enum rsxx_creg_addr { | ||
| 267 | CREG_ADD_CARD_CMD = 0x80001000, | ||
| 268 | CREG_ADD_CARD_STATE = 0x80001004, | ||
| 269 | CREG_ADD_CARD_SIZE = 0x8000100c, | ||
| 270 | CREG_ADD_CAPABILITIES = 0x80001050, | ||
| 271 | CREG_ADD_LOG = 0x80002000, | ||
| 272 | CREG_ADD_NUM_TARGETS = 0x80003000, | ||
| 273 | CREG_ADD_CONFIG = 0xB0000000, | ||
| 274 | }; | ||
| 275 | |||
| 276 | enum rsxx_creg_card_cmd { | ||
| 277 | CARD_CMD_STARTUP = 1, | ||
| 278 | CARD_CMD_SHUTDOWN = 2, | ||
| 279 | CARD_CMD_LOW_LEVEL_FORMAT = 3, | ||
| 280 | CARD_CMD_FPGA_RECONFIG_BR = 4, | ||
| 281 | CARD_CMD_FPGA_RECONFIG_MAIN = 5, | ||
| 282 | CARD_CMD_BACKUP = 6, | ||
| 283 | CARD_CMD_RESET = 7, | ||
| 284 | CARD_CMD_deprecated = 8, | ||
| 285 | CARD_CMD_UNINITIALIZE = 9, | ||
| 286 | CARD_CMD_DSTROY_EMERGENCY = 10, | ||
| 287 | CARD_CMD_DSTROY_NORMAL = 11, | ||
| 288 | CARD_CMD_DSTROY_EXTENDED = 12, | ||
| 289 | CARD_CMD_DSTROY_ABORT = 13, | ||
| 290 | }; | ||
| 291 | |||
| 292 | enum rsxx_card_state { | ||
| 293 | CARD_STATE_SHUTDOWN = 0x00000001, | ||
| 294 | CARD_STATE_STARTING = 0x00000002, | ||
| 295 | CARD_STATE_FORMATTING = 0x00000004, | ||
| 296 | CARD_STATE_UNINITIALIZED = 0x00000008, | ||
| 297 | CARD_STATE_GOOD = 0x00000010, | ||
| 298 | CARD_STATE_SHUTTING_DOWN = 0x00000020, | ||
| 299 | CARD_STATE_FAULT = 0x00000040, | ||
| 300 | CARD_STATE_RD_ONLY_FAULT = 0x00000080, | ||
| 301 | CARD_STATE_DSTROYING = 0x00000100, | ||
| 302 | }; | ||
| 303 | |||
| 304 | enum rsxx_led { | ||
| 305 | LED_DEFAULT = 0x0, | ||
| 306 | LED_IDENTIFY = 0x1, | ||
| 307 | LED_SOAK = 0x2, | ||
| 308 | }; | ||
| 309 | |||
| 310 | enum rsxx_creg_flash_lock { | ||
| 311 | CREG_FLASH_LOCK = 1, | ||
| 312 | CREG_FLASH_UNLOCK = 2, | ||
| 313 | }; | ||
| 314 | |||
| 315 | enum rsxx_card_capabilities { | ||
| 316 | CARD_CAP_SUBPAGE_WRITES = 0x00000080, | ||
| 317 | }; | ||
| 318 | |||
| 319 | enum rsxx_creg_stat { | ||
| 320 | CREG_STAT_STATUS_MASK = 0x00000003, | ||
| 321 | CREG_STAT_SUCCESS = 0x1, | ||
| 322 | CREG_STAT_ERROR = 0x2, | ||
| 323 | CREG_STAT_CHAR_PENDING = 0x00000004, /* Character I/O pending bit */ | ||
| 324 | CREG_STAT_LOG_PENDING = 0x00000008, /* HW log message pending bit */ | ||
| 325 | CREG_STAT_TAG_MASK = 0x0000ff00, | ||
| 326 | }; | ||
| 327 | |||
| 328 | static inline unsigned int CREG_DATA(int N) | ||
| 329 | { | ||
| 330 | return CREG_DATA0 + (N << 2); | ||
| 331 | } | ||
| 332 | |||
| 333 | /*----------------- Convenient Log Wrappers -------------------*/ | ||
| 334 | #define CARD_TO_DEV(__CARD) (&(__CARD)->dev->dev) | ||
| 335 | |||
| 336 | /***** config.c *****/ | ||
| 337 | int rsxx_load_config(struct rsxx_cardinfo *card); | ||
| 338 | |||
| 339 | /***** core.c *****/ | ||
| 340 | void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr); | ||
| 341 | void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr); | ||
| 342 | void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, | ||
| 343 | unsigned int intr); | ||
| 344 | void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, | ||
| 345 | unsigned int intr); | ||
| 346 | |||
| 347 | /***** dev.c *****/ | ||
| 348 | int rsxx_attach_dev(struct rsxx_cardinfo *card); | ||
| 349 | void rsxx_detach_dev(struct rsxx_cardinfo *card); | ||
| 350 | int rsxx_setup_dev(struct rsxx_cardinfo *card); | ||
| 351 | void rsxx_destroy_dev(struct rsxx_cardinfo *card); | ||
| 352 | int rsxx_dev_init(void); | ||
| 353 | void rsxx_dev_cleanup(void); | ||
| 354 | |||
| 355 | /***** dma.c ****/ | ||
| 356 | typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card, | ||
| 357 | void *cb_data, | ||
| 358 | unsigned int status); | ||
| 359 | int rsxx_dma_setup(struct rsxx_cardinfo *card); | ||
| 360 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); | ||
| 361 | int rsxx_dma_init(void); | ||
| 362 | void rsxx_dma_cleanup(void); | ||
| 363 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | ||
| 364 | struct bio *bio, | ||
| 365 | atomic_t *n_dmas, | ||
| 366 | rsxx_dma_cb cb, | ||
| 367 | void *cb_data); | ||
| 368 | |||
| 369 | /***** cregs.c *****/ | ||
| 370 | int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, | ||
| 371 | unsigned int size8, | ||
| 372 | void *data, | ||
| 373 | int byte_stream); | ||
| 374 | int rsxx_creg_read(struct rsxx_cardinfo *card, | ||
| 375 | u32 addr, | ||
| 376 | unsigned int size8, | ||
| 377 | void *data, | ||
| 378 | int byte_stream); | ||
| 379 | int rsxx_read_hw_log(struct rsxx_cardinfo *card); | ||
| 380 | int rsxx_get_card_state(struct rsxx_cardinfo *card, | ||
| 381 | unsigned int *state); | ||
| 382 | int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8); | ||
| 383 | int rsxx_get_num_targets(struct rsxx_cardinfo *card, | ||
| 384 | unsigned int *n_targets); | ||
| 385 | int rsxx_get_card_capabilities(struct rsxx_cardinfo *card, | ||
| 386 | u32 *capabilities); | ||
| 387 | int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd); | ||
| 388 | int rsxx_creg_setup(struct rsxx_cardinfo *card); | ||
| 389 | void rsxx_creg_destroy(struct rsxx_cardinfo *card); | ||
| 390 | int rsxx_creg_init(void); | ||
| 391 | void rsxx_creg_cleanup(void); | ||
| 392 | |||
| 393 | int rsxx_reg_access(struct rsxx_cardinfo *card, | ||
| 394 | struct rsxx_reg_access __user *ucmd, | ||
| 395 | int read); | ||
| 396 | |||
| 397 | |||
| 398 | |||
| 399 | #endif /* __DRIVERS_BLOCK_RSXX_H__ */ | ||
diff --git a/drivers/block/xd.c b/drivers/block/xd.c deleted file mode 100644 index ff540520bada..000000000000 --- a/drivers/block/xd.c +++ /dev/null | |||
| @@ -1,1123 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * This file contains the driver for an XT hard disk controller | ||
| 3 | * (at least the DTC 5150X) for Linux. | ||
| 4 | * | ||
| 5 | * Author: Pat Mackinlay, pat@it.com.au | ||
| 6 | * Date: 29/09/92 | ||
| 7 | * | ||
| 8 | * Revised: 01/01/93, ... | ||
| 9 | * | ||
| 10 | * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, | ||
| 11 | * kevinf@agora.rain.com) | ||
| 12 | * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and | ||
| 13 | * Wim Van Dorst. | ||
| 14 | * | ||
| 15 | * Revised: 04/04/94 by Risto Kankkunen | ||
| 16 | * Moved the detection code from xd_init() to xd_geninit() as it needed | ||
| 17 | * interrupts enabled and Linus didn't want to enable them in that first | ||
| 18 | * phase. xd_geninit() is the place to do these kinds of things anyway, | ||
| 19 | * he says. | ||
| 20 | * | ||
| 21 | * Modularized: 04/10/96 by Todd Fries, tfries@umr.edu | ||
| 22 | * | ||
| 23 | * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl | ||
| 24 | * Fixed some problems with disk initialization and module initiation. | ||
| 25 | * Added support for manual geometry setting (except Seagate controllers) | ||
| 26 | * in form: | ||
| 27 | * xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>] | ||
| 28 | * Recovered DMA access. Abridged messages. Added support for DTC5051CX, | ||
| 29 | * WD1002-27X & XEBEC controllers. Driver uses now some jumper settings. | ||
| 30 | * Extended ioctl() support. | ||
| 31 | * | ||
| 32 | * Bugfix: 15/02/01, Paul G. - inform queue layer of tiny xd_maxsect. | ||
| 33 | * | ||
| 34 | */ | ||
| 35 | |||
| 36 | #include <linux/module.h> | ||
| 37 | #include <linux/errno.h> | ||
| 38 | #include <linux/interrupt.h> | ||
| 39 | #include <linux/mm.h> | ||
| 40 | #include <linux/fs.h> | ||
| 41 | #include <linux/kernel.h> | ||
| 42 | #include <linux/timer.h> | ||
| 43 | #include <linux/genhd.h> | ||
| 44 | #include <linux/hdreg.h> | ||
| 45 | #include <linux/ioport.h> | ||
| 46 | #include <linux/init.h> | ||
| 47 | #include <linux/wait.h> | ||
| 48 | #include <linux/blkdev.h> | ||
| 49 | #include <linux/mutex.h> | ||
| 50 | #include <linux/blkpg.h> | ||
| 51 | #include <linux/delay.h> | ||
| 52 | #include <linux/io.h> | ||
| 53 | #include <linux/gfp.h> | ||
| 54 | |||
| 55 | #include <asm/uaccess.h> | ||
| 56 | #include <asm/dma.h> | ||
| 57 | |||
| 58 | #include "xd.h" | ||
| 59 | |||
| 60 | static DEFINE_MUTEX(xd_mutex); | ||
| 61 | static void __init do_xd_setup (int *integers); | ||
| 62 | #ifdef MODULE | ||
| 63 | static int xd[5] = { -1,-1,-1,-1, }; | ||
| 64 | #endif | ||
| 65 | |||
| 66 | #define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using | ||
| 67 | "nodma" module option */ | ||
| 68 | #define XD_INIT_DISK_DELAY (30) /* 30 ms delay during disk initialization */ | ||
| 69 | |||
| 70 | /* Above may need to be increased if a problem with the 2nd drive detection | ||
| 71 | (ST11M controller) or resetting a controller (WD) appears */ | ||
| 72 | |||
| 73 | static XD_INFO xd_info[XD_MAXDRIVES]; | ||
| 74 | |||
| 75 | /* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS | ||
| 76 | signature and details to the following list of signatures. A BIOS signature is a string embedded into the first | ||
| 77 | few bytes of your controller's on-board ROM BIOS. To find out what yours is, use something like MS-DOS's DEBUG | ||
| 78 | command. Run DEBUG, and then you can examine your BIOS signature with: | ||
| 79 | |||
| 80 | d xxxx:0000 | ||
| 81 | |||
| 82 | where xxxx is the segment of your controller (like C800 or D000 or something). On the ASCII dump at the right, you should | ||
| 83 | be able to see a string mentioning the manufacturer's copyright etc. Add this string into the table below. The parameters | ||
| 84 | in the table are, in order: | ||
| 85 | |||
| 86 | offset ; this is the offset (in bytes) from the start of your ROM where the signature starts | ||
| 87 | signature ; this is the actual text of the signature | ||
| 88 | xd_?_init_controller ; this is the controller init routine used by your controller | ||
| 89 | xd_?_init_drive ; this is the drive init routine used by your controller | ||
| 90 | |||
| 91 | The controllers directly supported at the moment are: DTC 5150x, WD 1004A27X, ST11M/R and override. If your controller is | ||
| 92 | made by the same manufacturer as one of these, try using the same init routines as they do. If that doesn't work, your | ||
| 93 | best bet is to use the "override" routines. These routines use a "portable" method of getting the disk's geometry, and | ||
| 94 | may work with your card. If none of these seem to work, try sending me some email and I'll see what I can do <grin>. | ||
| 95 | |||
| 96 | NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver | ||
| 97 | should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */ | ||
| 98 | |||
| 99 | #include <asm/page.h> | ||
| 100 | #define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size)) | ||
| 101 | #define xd_dma_mem_free(addr, size) free_pages(addr, get_order(size)) | ||
| 102 | static char *xd_dma_buffer; | ||
| 103 | |||
| 104 | static XD_SIGNATURE xd_sigs[] __initdata = { | ||
| 105 | { 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */ | ||
| 106 | { 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ | ||
| 107 | { 0x000B,"CRD18A Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */ | ||
| 108 | { 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */ | ||
| 109 | { 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ | ||
| 110 | { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */ | ||
| 111 | { 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */ | ||
| 112 | { 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */ | ||
| 113 | { 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */ | ||
| 114 | { 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */ | ||
| 115 | { 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ | ||
| 116 | { 0x0008,"(C) Copyright 1984 Western Digital Corp", xd_wd_init_controller, xd_wd_init_drive," Western Dig. 1002s-wx2" }, | ||
| 117 | { 0x0008,"(C) Copyright 1986 Western Digital Corporation", xd_wd_init_controller, xd_wd_init_drive," 1986 Western Digital" }, /* jfree@sovereign.org */ | ||
| 118 | }; | ||
| 119 | |||
| 120 | static unsigned int xd_bases[] __initdata = | ||
| 121 | { | ||
| 122 | 0xC8000, 0xCA000, 0xCC000, | ||
| 123 | 0xCE000, 0xD0000, 0xD2000, | ||
| 124 | 0xD4000, 0xD6000, 0xD8000, | ||
| 125 | 0xDA000, 0xDC000, 0xDE000, | ||
| 126 | 0xE0000 | ||
| 127 | }; | ||
| 128 | |||
| 129 | static DEFINE_SPINLOCK(xd_lock); | ||
| 130 | |||
| 131 | static struct gendisk *xd_gendisk[2]; | ||
| 132 | |||
| 133 | static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo); | ||
| 134 | |||
| 135 | static const struct block_device_operations xd_fops = { | ||
| 136 | .owner = THIS_MODULE, | ||
| 137 | .ioctl = xd_ioctl, | ||
| 138 | .getgeo = xd_getgeo, | ||
| 139 | }; | ||
| 140 | static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int); | ||
| 141 | static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors; | ||
| 142 | static u_char xd_override __initdata = 0, xd_type __initdata = 0; | ||
| 143 | static u_short xd_iobase = 0x320; | ||
| 144 | static int xd_geo[XD_MAXDRIVES*3] __initdata = { 0, }; | ||
| 145 | |||
| 146 | static volatile int xdc_busy; | ||
| 147 | static struct timer_list xd_watchdog_int; | ||
| 148 | |||
| 149 | static volatile u_char xd_error; | ||
| 150 | static bool nodma = XD_DONT_USE_DMA; | ||
| 151 | |||
| 152 | static struct request_queue *xd_queue; | ||
| 153 | |||
| 154 | /* xd_init: register the block device number and set up pointer tables */ | ||
| 155 | static int __init xd_init(void) | ||
| 156 | { | ||
| 157 | u_char i,controller; | ||
| 158 | unsigned int address; | ||
| 159 | int err; | ||
| 160 | |||
| 161 | #ifdef MODULE | ||
| 162 | { | ||
| 163 | u_char count = 0; | ||
| 164 | for (i = 4; i > 0; i--) | ||
| 165 | if (((xd[i] = xd[i-1]) >= 0) && !count) | ||
| 166 | count = i; | ||
| 167 | if ((xd[0] = count)) | ||
| 168 | do_xd_setup(xd); | ||
| 169 | } | ||
| 170 | #endif | ||
| 171 | |||
| 172 | init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; | ||
| 173 | |||
| 174 | err = -EBUSY; | ||
| 175 | if (register_blkdev(XT_DISK_MAJOR, "xd")) | ||
| 176 | goto out1; | ||
| 177 | |||
| 178 | err = -ENOMEM; | ||
| 179 | xd_queue = blk_init_queue(do_xd_request, &xd_lock); | ||
| 180 | if (!xd_queue) | ||
| 181 | goto out1a; | ||
| 182 | |||
| 183 | if (xd_detect(&controller,&address)) { | ||
| 184 | |||
| 185 | printk("Detected a%s controller (type %d) at address %06x\n", | ||
| 186 | xd_sigs[controller].name,controller,address); | ||
| 187 | if (!request_region(xd_iobase,4,"xd")) { | ||
| 188 | printk("xd: Ports at 0x%x are not available\n", | ||
| 189 | xd_iobase); | ||
| 190 | goto out2; | ||
| 191 | } | ||
| 192 | if (controller) | ||
| 193 | xd_sigs[controller].init_controller(address); | ||
| 194 | xd_drives = xd_initdrives(xd_sigs[controller].init_drive); | ||
| 195 | |||
| 196 | printk("Detected %d hard drive%s (using IRQ%d & DMA%d)\n", | ||
| 197 | xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma); | ||
| 198 | } | ||
| 199 | |||
| 200 | /* | ||
| 201 | * With the drive detected, xd_maxsectors should now be known. | ||
| 202 | * If xd_maxsectors is 0, nothing was detected and we fall through | ||
| 203 | * to return -ENODEV | ||
| 204 | */ | ||
| 205 | if (!xd_dma_buffer && xd_maxsectors) { | ||
| 206 | xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); | ||
| 207 | if (!xd_dma_buffer) { | ||
| 208 | printk(KERN_ERR "xd: Out of memory.\n"); | ||
| 209 | goto out3; | ||
| 210 | } | ||
| 211 | } | ||
| 212 | |||
| 213 | err = -ENODEV; | ||
| 214 | if (!xd_drives) | ||
| 215 | goto out3; | ||
| 216 | |||
| 217 | for (i = 0; i < xd_drives; i++) { | ||
| 218 | XD_INFO *p = &xd_info[i]; | ||
| 219 | struct gendisk *disk = alloc_disk(64); | ||
| 220 | if (!disk) | ||
| 221 | goto Enomem; | ||
| 222 | p->unit = i; | ||
| 223 | disk->major = XT_DISK_MAJOR; | ||
| 224 | disk->first_minor = i<<6; | ||
| 225 | sprintf(disk->disk_name, "xd%c", i+'a'); | ||
| 226 | disk->fops = &xd_fops; | ||
| 227 | disk->private_data = p; | ||
| 228 | disk->queue = xd_queue; | ||
| 229 | set_capacity(disk, p->heads * p->cylinders * p->sectors); | ||
| 230 | printk(" %s: CHS=%d/%d/%d\n", disk->disk_name, | ||
| 231 | p->cylinders, p->heads, p->sectors); | ||
| 232 | xd_gendisk[i] = disk; | ||
| 233 | } | ||
| 234 | |||
| 235 | err = -EBUSY; | ||
| 236 | if (request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) { | ||
| 237 | printk("xd: unable to get IRQ%d\n",xd_irq); | ||
| 238 | goto out4; | ||
| 239 | } | ||
| 240 | |||
| 241 | if (request_dma(xd_dma,"xd")) { | ||
| 242 | printk("xd: unable to get DMA%d\n",xd_dma); | ||
| 243 | goto out5; | ||
| 244 | } | ||
| 245 | |||
| 246 | /* xd_maxsectors depends on controller - so set after detection */ | ||
| 247 | blk_queue_max_hw_sectors(xd_queue, xd_maxsectors); | ||
| 248 | |||
| 249 | for (i = 0; i < xd_drives; i++) | ||
| 250 | add_disk(xd_gendisk[i]); | ||
| 251 | |||
| 252 | return 0; | ||
| 253 | |||
| 254 | out5: | ||
| 255 | free_irq(xd_irq, NULL); | ||
| 256 | out4: | ||
| 257 | for (i = 0; i < xd_drives; i++) | ||
| 258 | put_disk(xd_gendisk[i]); | ||
| 259 | out3: | ||
| 260 | if (xd_maxsectors) | ||
| 261 | release_region(xd_iobase,4); | ||
| 262 | |||
| 263 | if (xd_dma_buffer) | ||
| 264 | xd_dma_mem_free((unsigned long)xd_dma_buffer, | ||
| 265 | xd_maxsectors * 0x200); | ||
| 266 | out2: | ||
| 267 | blk_cleanup_queue(xd_queue); | ||
| 268 | out1a: | ||
| 269 | unregister_blkdev(XT_DISK_MAJOR, "xd"); | ||
| 270 | out1: | ||
| 271 | return err; | ||
| 272 | Enomem: | ||
| 273 | err = -ENOMEM; | ||
| 274 | while (i--) | ||
| 275 | put_disk(xd_gendisk[i]); | ||
| 276 | goto out3; | ||
| 277 | } | ||
| 278 | |||
| 279 | /* xd_detect: scan the possible BIOS ROM locations for the signature strings */ | ||
| 280 | static u_char __init xd_detect (u_char *controller, unsigned int *address) | ||
| 281 | { | ||
| 282 | int i, j; | ||
| 283 | |||
| 284 | if (xd_override) | ||
| 285 | { | ||
| 286 | *controller = xd_type; | ||
| 287 | *address = 0; | ||
| 288 | return(1); | ||
| 289 | } | ||
| 290 | |||
| 291 | for (i = 0; i < ARRAY_SIZE(xd_bases); i++) { | ||
| 292 | void __iomem *p = ioremap(xd_bases[i], 0x2000); | ||
| 293 | if (!p) | ||
| 294 | continue; | ||
| 295 | for (j = 1; j < ARRAY_SIZE(xd_sigs); j++) { | ||
| 296 | const char *s = xd_sigs[j].string; | ||
| 297 | if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) { | ||
| 298 | *controller = j; | ||
| 299 | xd_type = j; | ||
| 300 | *address = xd_bases[i]; | ||
| 301 | iounmap(p); | ||
| 302 | return 1; | ||
| 303 | } | ||
| 304 | } | ||
| 305 | iounmap(p); | ||
| 306 | } | ||
| 307 | return 0; | ||
| 308 | } | ||
| 309 | |||
| 310 | /* do_xd_request: handle an incoming request */ | ||
| 311 | static void do_xd_request (struct request_queue * q) | ||
| 312 | { | ||
| 313 | struct request *req; | ||
| 314 | |||
| 315 | if (xdc_busy) | ||
| 316 | return; | ||
| 317 | |||
| 318 | req = blk_fetch_request(q); | ||
| 319 | while (req) { | ||
| 320 | unsigned block = blk_rq_pos(req); | ||
| 321 | unsigned count = blk_rq_cur_sectors(req); | ||
| 322 | XD_INFO *disk = req->rq_disk->private_data; | ||
| 323 | int res = -EIO; | ||
| 324 | int retry; | ||
| 325 | |||
| 326 | if (req->cmd_type != REQ_TYPE_FS) | ||
| 327 | goto done; | ||
| 328 | if (block + count > get_capacity(req->rq_disk)) | ||
| 329 | goto done; | ||
| 330 | for (retry = 0; (retry < XD_RETRIES) && !res; retry++) | ||
| 331 | res = xd_readwrite(rq_data_dir(req), disk, req->buffer, | ||
| 332 | block, count); | ||
| 333 | done: | ||
| 334 | /* wrap up, 0 = success, -errno = fail */ | ||
| 335 | if (!__blk_end_request_cur(req, res)) | ||
| 336 | req = blk_fetch_request(q); | ||
| 337 | } | ||
| 338 | } | ||
| 339 | |||
| 340 | static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | ||
| 341 | { | ||
| 342 | XD_INFO *p = bdev->bd_disk->private_data; | ||
| 343 | |||
| 344 | geo->heads = p->heads; | ||
| 345 | geo->sectors = p->sectors; | ||
| 346 | geo->cylinders = p->cylinders; | ||
| 347 | return 0; | ||
| 348 | } | ||
| 349 | |||
| 350 | /* xd_ioctl: handle device ioctl's */ | ||
| 351 | static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg) | ||
| 352 | { | ||
| 353 | switch (cmd) { | ||
| 354 | case HDIO_SET_DMA: | ||
| 355 | if (!capable(CAP_SYS_ADMIN)) return -EACCES; | ||
| 356 | if (xdc_busy) return -EBUSY; | ||
| 357 | nodma = !arg; | ||
| 358 | if (nodma && xd_dma_buffer) { | ||
| 359 | xd_dma_mem_free((unsigned long)xd_dma_buffer, | ||
| 360 | xd_maxsectors * 0x200); | ||
| 361 | xd_dma_buffer = NULL; | ||
| 362 | } else if (!nodma && !xd_dma_buffer) { | ||
| 363 | xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); | ||
| 364 | if (!xd_dma_buffer) { | ||
| 365 | nodma = XD_DONT_USE_DMA; | ||
| 366 | return -ENOMEM; | ||
| 367 | } | ||
| 368 | } | ||
| 369 | return 0; | ||
| 370 | case HDIO_GET_DMA: | ||
| 371 | return put_user(!nodma, (long __user *) arg); | ||
| 372 | case HDIO_GET_MULTCOUNT: | ||
| 373 | return put_user(xd_maxsectors, (long __user *) arg); | ||
| 374 | default: | ||
| 375 | return -EINVAL; | ||
| 376 | } | ||
| 377 | } | ||
| 378 | |||
| 379 | static int xd_ioctl(struct block_device *bdev, fmode_t mode, | ||
| 380 | unsigned int cmd, unsigned long param) | ||
| 381 | { | ||
| 382 | int ret; | ||
| 383 | |||
| 384 | mutex_lock(&xd_mutex); | ||
| 385 | ret = xd_locked_ioctl(bdev, mode, cmd, param); | ||
| 386 | mutex_unlock(&xd_mutex); | ||
| 387 | |||
| 388 | return ret; | ||
| 389 | } | ||
| 390 | |||
| 391 | /* xd_readwrite: handle a read/write request */ | ||
| 392 | static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count) | ||
| 393 | { | ||
| 394 | int drive = p->unit; | ||
| 395 | u_char cmdblk[6],sense[4]; | ||
| 396 | u_short track,cylinder; | ||
| 397 | u_char head,sector,control,mode = PIO_MODE,temp; | ||
| 398 | char **real_buffer; | ||
| 399 | register int i; | ||
| 400 | |||
| 401 | #ifdef DEBUG_READWRITE | ||
| 402 | printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count); | ||
| 403 | #endif /* DEBUG_READWRITE */ | ||
| 404 | |||
| 405 | spin_unlock_irq(&xd_lock); | ||
| 406 | |||
| 407 | control = p->control; | ||
| 408 | if (!xd_dma_buffer) | ||
| 409 | xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); | ||
| 410 | while (count) { | ||
| 411 | temp = count < xd_maxsectors ? count : xd_maxsectors; | ||
| 412 | |||
| 413 | track = block / p->sectors; | ||
| 414 | head = track % p->heads; | ||
| 415 | cylinder = track / p->heads; | ||
| 416 | sector = block % p->sectors; | ||
| 417 | |||
| 418 | #ifdef DEBUG_READWRITE | ||
| 419 | printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp); | ||
| 420 | #endif /* DEBUG_READWRITE */ | ||
| 421 | |||
| 422 | if (xd_dma_buffer) { | ||
| 423 | mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200); | ||
| 424 | real_buffer = &xd_dma_buffer; | ||
| 425 | for (i=0; i < (temp * 0x200); i++) | ||
| 426 | xd_dma_buffer[i] = buffer[i]; | ||
| 427 | } | ||
| 428 | else | ||
| 429 | real_buffer = &buffer; | ||
| 430 | |||
| 431 | xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control); | ||
| 432 | |||
| 433 | switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) { | ||
| 434 | case 1: | ||
| 435 | printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); | ||
| 436 | xd_recalibrate(drive); | ||
| 437 | spin_lock_irq(&xd_lock); | ||
| 438 | return -EIO; | ||
| 439 | case 2: | ||
| 440 | if (sense[0] & 0x30) { | ||
| 441 | printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); | ||
| 442 | switch ((sense[0] & 0x30) >> 4) { | ||
| 443 | case 0: printk("drive error, code = 0x%X",sense[0] & 0x0F); | ||
| 444 | break; | ||
| 445 | case 1: printk("controller error, code = 0x%X",sense[0] & 0x0F); | ||
| 446 | break; | ||
| 447 | case 2: printk("command error, code = 0x%X",sense[0] & 0x0F); | ||
| 448 | break; | ||
| 449 | case 3: printk("miscellaneous error, code = 0x%X",sense[0] & 0x0F); | ||
| 450 | break; | ||
| 451 | } | ||
| 452 | } | ||
| 453 | if (sense[0] & 0x80) | ||
| 454 | printk(" - CHS = %d/%d/%d\n",((sense[2] & 0xC0) << 2) | sense[3],sense[1] & 0x1F,sense[2] & 0x3F); | ||
| 455 | /* reported drive number = (sense[1] & 0xE0) >> 5 */ | ||
| 456 | else | ||
| 457 | printk(" - no valid disk address\n"); | ||
| 458 | spin_lock_irq(&xd_lock); | ||
| 459 | return -EIO; | ||
| 460 | } | ||
| 461 | if (xd_dma_buffer) | ||
| 462 | for (i=0; i < (temp * 0x200); i++) | ||
| 463 | buffer[i] = xd_dma_buffer[i]; | ||
| 464 | |||
| 465 | count -= temp, buffer += temp * 0x200, block += temp; | ||
| 466 | } | ||
| 467 | spin_lock_irq(&xd_lock); | ||
| 468 | return 0; | ||
| 469 | } | ||
| 470 | |||
| 471 | /* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ | ||
| 472 | static void xd_recalibrate (u_char drive) | ||
| 473 | { | ||
| 474 | u_char cmdblk[6]; | ||
| 475 | |||
| 476 | xd_build(cmdblk,CMD_RECALIBRATE,drive,0,0,0,0,0); | ||
| 477 | if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 8)) | ||
| 478 | printk("xd%c: warning! error recalibrating, controller may be unstable\n", 'a'+drive); | ||
| 479 | } | ||
| 480 | |||
| 481 | /* xd_interrupt_handler: interrupt service routine */ | ||
| 482 | static irqreturn_t xd_interrupt_handler(int irq, void *dev_id) | ||
| 483 | { | ||
| 484 | if (inb(XD_STATUS) & STAT_INTERRUPT) { /* check if it was our device */ | ||
| 485 | #ifdef DEBUG_OTHER | ||
| 486 | printk("xd_interrupt_handler: interrupt detected\n"); | ||
| 487 | #endif /* DEBUG_OTHER */ | ||
| 488 | outb(0,XD_CONTROL); /* acknowledge interrupt */ | ||
| 489 | wake_up(&xd_wait_int); /* and wake up sleeping processes */ | ||
| 490 | return IRQ_HANDLED; | ||
| 491 | } | ||
| 492 | else | ||
| 493 | printk("xd: unexpected interrupt\n"); | ||
| 494 | return IRQ_NONE; | ||
| 495 | } | ||
| 496 | |||
| 497 | /* xd_setup_dma: set up the DMA controller for a data transfer */ | ||
| 498 | static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count) | ||
| 499 | { | ||
| 500 | unsigned long f; | ||
| 501 | |||
| 502 | if (nodma) | ||
| 503 | return (PIO_MODE); | ||
| 504 | if (((unsigned long) buffer & 0xFFFF0000) != (((unsigned long) buffer + count) & 0xFFFF0000)) { | ||
| 505 | #ifdef DEBUG_OTHER | ||
| 506 | printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n"); | ||
| 507 | #endif /* DEBUG_OTHER */ | ||
| 508 | return (PIO_MODE); | ||
| 509 | } | ||
| 510 | |||
| 511 | f=claim_dma_lock(); | ||
| 512 | disable_dma(xd_dma); | ||
| 513 | clear_dma_ff(xd_dma); | ||
| 514 | set_dma_mode(xd_dma,mode); | ||
| 515 | set_dma_addr(xd_dma, (unsigned long) buffer); | ||
| 516 | set_dma_count(xd_dma,count); | ||
| 517 | |||
| 518 | release_dma_lock(f); | ||
| 519 | |||
| 520 | return (DMA_MODE); /* use DMA and INT */ | ||
| 521 | } | ||
| 522 | |||
| 523 | /* xd_build: put stuff into an array in a format suitable for the controller */ | ||
| 524 | static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control) | ||
| 525 | { | ||
| 526 | cmdblk[0] = command; | ||
| 527 | cmdblk[1] = ((drive & 0x07) << 5) | (head & 0x1F); | ||
| 528 | cmdblk[2] = ((cylinder & 0x300) >> 2) | (sector & 0x3F); | ||
| 529 | cmdblk[3] = cylinder & 0xFF; | ||
| 530 | cmdblk[4] = count; | ||
| 531 | cmdblk[5] = control; | ||
| 532 | |||
| 533 | return (cmdblk); | ||
| 534 | } | ||
| 535 | |||
| 536 | static void xd_watchdog (unsigned long unused) | ||
| 537 | { | ||
| 538 | xd_error = 1; | ||
| 539 | wake_up(&xd_wait_int); | ||
| 540 | } | ||
| 541 | |||
| 542 | /* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */ | ||
| 543 | static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout) | ||
| 544 | { | ||
| 545 | u_long expiry = jiffies + timeout; | ||
| 546 | int success; | ||
| 547 | |||
| 548 | xdc_busy = 1; | ||
| 549 | while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) | ||
| 550 | schedule_timeout_uninterruptible(1); | ||
| 551 | xdc_busy = 0; | ||
| 552 | return (success); | ||
| 553 | } | ||
| 554 | |||
| 555 | static inline u_int xd_wait_for_IRQ (void) | ||
| 556 | { | ||
| 557 | unsigned long flags; | ||
| 558 | xd_watchdog_int.expires = jiffies + 8 * HZ; | ||
| 559 | add_timer(&xd_watchdog_int); | ||
| 560 | |||
| 561 | flags=claim_dma_lock(); | ||
| 562 | enable_dma(xd_dma); | ||
| 563 | release_dma_lock(flags); | ||
| 564 | |||
| 565 | sleep_on(&xd_wait_int); | ||
| 566 | del_timer(&xd_watchdog_int); | ||
| 567 | xdc_busy = 0; | ||
| 568 | |||
| 569 | flags=claim_dma_lock(); | ||
| 570 | disable_dma(xd_dma); | ||
| 571 | release_dma_lock(flags); | ||
| 572 | |||
| 573 | if (xd_error) { | ||
| 574 | printk("xd: missed IRQ - command aborted\n"); | ||
| 575 | xd_error = 0; | ||
| 576 | return (1); | ||
| 577 | } | ||
| 578 | return (0); | ||
| 579 | } | ||
| 580 | |||
| 581 | /* xd_command: handle all data transfers necessary for a single command */ | ||
| 582 | static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout) | ||
| 583 | { | ||
| 584 | u_char cmdblk[6],csb,complete = 0; | ||
| 585 | |||
| 586 | #ifdef DEBUG_COMMAND | ||
| 587 | printk("xd_command: command = 0x%X, mode = 0x%X, indata = 0x%X, outdata = 0x%X, sense = 0x%X\n",command,mode,indata,outdata,sense); | ||
| 588 | #endif /* DEBUG_COMMAND */ | ||
| 589 | |||
| 590 | outb(0,XD_SELECT); | ||
| 591 | outb(mode,XD_CONTROL); | ||
| 592 | |||
| 593 | if (xd_waitport(XD_STATUS,STAT_SELECT,STAT_SELECT,timeout)) | ||
| 594 | return (1); | ||
| 595 | |||
| 596 | while (!complete) { | ||
| 597 | if (xd_waitport(XD_STATUS,STAT_READY,STAT_READY,timeout)) | ||
| 598 | return (1); | ||
| 599 | |||
| 600 | switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) { | ||
| 601 | case 0: | ||
| 602 | if (mode == DMA_MODE) { | ||
| 603 | if (xd_wait_for_IRQ()) | ||
| 604 | return (1); | ||
| 605 | } else | ||
| 606 | outb(outdata ? *outdata++ : 0,XD_DATA); | ||
| 607 | break; | ||
| 608 | case STAT_INPUT: | ||
| 609 | if (mode == DMA_MODE) { | ||
| 610 | if (xd_wait_for_IRQ()) | ||
| 611 | return (1); | ||
| 612 | } else | ||
| 613 | if (indata) | ||
| 614 | *indata++ = inb(XD_DATA); | ||
| 615 | else | ||
| 616 | inb(XD_DATA); | ||
| 617 | break; | ||
| 618 | case STAT_COMMAND: | ||
| 619 | outb(command ? *command++ : 0,XD_DATA); | ||
| 620 | break; | ||
| 621 | case STAT_COMMAND | STAT_INPUT: | ||
| 622 | complete = 1; | ||
| 623 | break; | ||
| 624 | } | ||
| 625 | } | ||
| 626 | csb = inb(XD_DATA); | ||
| 627 | |||
| 628 | if (xd_waitport(XD_STATUS,0,STAT_SELECT,timeout)) /* wait until deselected */ | ||
| 629 | return (1); | ||
| 630 | |||
| 631 | if (csb & CSB_ERROR) { /* read sense data if error */ | ||
| 632 | xd_build(cmdblk,CMD_SENSE,(csb & CSB_LUN) >> 5,0,0,0,0,0); | ||
| 633 | if (xd_command(cmdblk,0,sense,NULL,NULL,XD_TIMEOUT)) | ||
| 634 | printk("xd: warning! sense command failed!\n"); | ||
| 635 | } | ||
| 636 | |||
| 637 | #ifdef DEBUG_COMMAND | ||
| 638 | printk("xd_command: completed with csb = 0x%X\n",csb); | ||
| 639 | #endif /* DEBUG_COMMAND */ | ||
| 640 | |||
| 641 | return (csb & CSB_ERROR); | ||
| 642 | } | ||
| 643 | |||
| 644 | static u_char __init xd_initdrives (void (*init_drive)(u_char drive)) | ||
| 645 | { | ||
| 646 | u_char cmdblk[6],i,count = 0; | ||
| 647 | |||
| 648 | for (i = 0; i < XD_MAXDRIVES; i++) { | ||
| 649 | xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0); | ||
| 650 | if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) { | ||
| 651 | msleep_interruptible(XD_INIT_DISK_DELAY); | ||
| 652 | |||
| 653 | init_drive(count); | ||
| 654 | count++; | ||
| 655 | |||
| 656 | msleep_interruptible(XD_INIT_DISK_DELAY); | ||
| 657 | } | ||
| 658 | } | ||
| 659 | return (count); | ||
| 660 | } | ||
| 661 | |||
| 662 | static void __init xd_manual_geo_set (u_char drive) | ||
| 663 | { | ||
| 664 | xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]); | ||
| 665 | xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]); | ||
| 666 | xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]); | ||
| 667 | } | ||
| 668 | |||
| 669 | static void __init xd_dtc_init_controller (unsigned int address) | ||
| 670 | { | ||
| 671 | switch (address) { | ||
| 672 | case 0x00000: | ||
| 673 | case 0xC8000: break; /*initial: 0x320 */ | ||
| 674 | case 0xCA000: xd_iobase = 0x324; | ||
| 675 | case 0xD0000: /*5150CX*/ | ||
| 676 | case 0xD8000: break; /*5150CX & 5150XL*/ | ||
| 677 | default: printk("xd_dtc_init_controller: unsupported BIOS address %06x\n",address); | ||
| 678 | break; | ||
| 679 | } | ||
| 680 | xd_maxsectors = 0x01; /* my card seems to have trouble doing multi-block transfers? */ | ||
| 681 | |||
| 682 | outb(0,XD_RESET); /* reset the controller */ | ||
| 683 | } | ||
| 684 | |||
| 685 | |||
| 686 | static void __init xd_dtc5150cx_init_drive (u_char drive) | ||
| 687 | { | ||
| 688 | /* values from controller's BIOS - BIOS chip may be removed */ | ||
| 689 | static u_short geometry_table[][4] = { | ||
| 690 | {0x200,8,0x200,0x100}, | ||
| 691 | {0x267,2,0x267,0x267}, | ||
| 692 | {0x264,4,0x264,0x80}, | ||
| 693 | {0x132,4,0x132,0x0}, | ||
| 694 | {0x132,2,0x80, 0x132}, | ||
| 695 | {0x177,8,0x177,0x0}, | ||
| 696 | {0x132,8,0x84, 0x0}, | ||
| 697 | {}, /* not used */ | ||
| 698 | {0x132,6,0x80, 0x100}, | ||
| 699 | {0x200,6,0x100,0x100}, | ||
| 700 | {0x264,2,0x264,0x80}, | ||
| 701 | {0x280,4,0x280,0x100}, | ||
| 702 | {0x2B9,3,0x2B9,0x2B9}, | ||
| 703 | {0x2B9,5,0x2B9,0x2B9}, | ||
| 704 | {0x280,6,0x280,0x100}, | ||
| 705 | {0x132,4,0x132,0x0}}; | ||
| 706 | u_char n; | ||
| 707 | |||
| 708 | n = inb(XD_JUMPER); | ||
| 709 | n = (drive ? n : (n >> 2)) & 0x33; | ||
| 710 | n = (n | (n >> 2)) & 0x0F; | ||
| 711 | if (xd_geo[3*drive]) | ||
| 712 | xd_manual_geo_set(drive); | ||
| 713 | else | ||
| 714 | if (n != 7) { | ||
| 715 | xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */ | ||
| 716 | xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */ | ||
| 717 | xd_info[drive].sectors = 17; /* sectors */ | ||
| 718 | #if 0 | ||
| 719 | xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */ | ||
| 720 | xd_info[drive].precomp = geometry_table[n][3] /* write precomp */ | ||
| 721 | xd_info[drive].ecc = 0x0B; /* ecc length */ | ||
| 722 | #endif /* 0 */ | ||
| 723 | } | ||
| 724 | else { | ||
| 725 | printk("xd%c: undetermined drive geometry\n",'a'+drive); | ||
| 726 | return; | ||
| 727 | } | ||
| 728 | xd_info[drive].control = 5; /* control byte */ | ||
| 729 | xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B); | ||
| 730 | xd_recalibrate(drive); | ||
| 731 | } | ||
| 732 | |||
| 733 | static void __init xd_dtc_init_drive (u_char drive) | ||
| 734 | { | ||
| 735 | u_char cmdblk[6],buf[64]; | ||
| 736 | |||
| 737 | xd_build(cmdblk,CMD_DTCGETGEOM,drive,0,0,0,0,0); | ||
| 738 | if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { | ||
| 739 | xd_info[drive].heads = buf[0x0A]; /* heads */ | ||
| 740 | xd_info[drive].cylinders = ((u_short *) (buf))[0x04]; /* cylinders */ | ||
| 741 | xd_info[drive].sectors = 17; /* sectors */ | ||
| 742 | if (xd_geo[3*drive]) | ||
| 743 | xd_manual_geo_set(drive); | ||
| 744 | #if 0 | ||
| 745 | xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05]; /* reduced write */ | ||
| 746 | xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06]; /* write precomp */ | ||
| 747 | xd_info[drive].ecc = buf[0x0F]; /* ecc length */ | ||
| 748 | #endif /* 0 */ | ||
| 749 | xd_info[drive].control = 0; /* control byte */ | ||
| 750 | |||
| 751 | xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf + 1))[0x05],((u_short *) (buf + 1))[0x06],buf[0x0F]); | ||
| 752 | xd_build(cmdblk,CMD_DTCSETSTEP,drive,0,0,0,0,7); | ||
| 753 | if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2)) | ||
| 754 | printk("xd_dtc_init_drive: error setting step rate for xd%c\n", 'a'+drive); | ||
| 755 | } | ||
| 756 | else | ||
| 757 | printk("xd_dtc_init_drive: error reading geometry for xd%c\n", 'a'+drive); | ||
| 758 | } | ||
| 759 | |||
| 760 | static void __init xd_wd_init_controller (unsigned int address) | ||
| 761 | { | ||
| 762 | switch (address) { | ||
| 763 | case 0x00000: | ||
| 764 | case 0xC8000: break; /*initial: 0x320 */ | ||
| 765 | case 0xCA000: xd_iobase = 0x324; break; | ||
| 766 | case 0xCC000: xd_iobase = 0x328; break; | ||
| 767 | case 0xCE000: xd_iobase = 0x32C; break; | ||
| 768 | case 0xD0000: xd_iobase = 0x328; break; /* ? */ | ||
| 769 | case 0xD8000: xd_iobase = 0x32C; break; /* ? */ | ||
| 770 | default: printk("xd_wd_init_controller: unsupported BIOS address %06x\n",address); | ||
| 771 | break; | ||
| 772 | } | ||
| 773 | xd_maxsectors = 0x01; /* this one doesn't wrap properly either... */ | ||
| 774 | |||
| 775 | outb(0,XD_RESET); /* reset the controller */ | ||
| 776 | |||
| 777 | msleep(XD_INIT_DISK_DELAY); | ||
| 778 | } | ||
| 779 | |||
| 780 | static void __init xd_wd_init_drive (u_char drive) | ||
| 781 | { | ||
| 782 | /* values from controller's BIOS - BIOS may be disabled */ | ||
| 783 | static u_short geometry_table[][4] = { | ||
| 784 | {0x264,4,0x1C2,0x1C2}, /* common part */ | ||
| 785 | {0x132,4,0x099,0x0}, | ||
| 786 | {0x267,2,0x1C2,0x1C2}, | ||
| 787 | {0x267,4,0x1C2,0x1C2}, | ||
| 788 | |||
| 789 | {0x334,6,0x335,0x335}, /* 1004 series RLL */ | ||
| 790 | {0x30E,4,0x30F,0x3DC}, | ||
| 791 | {0x30E,2,0x30F,0x30F}, | ||
| 792 | {0x267,4,0x268,0x268}, | ||
| 793 | |||
| 794 | {0x3D5,5,0x3D6,0x3D6}, /* 1002 series RLL */ | ||
| 795 | {0x3DB,7,0x3DC,0x3DC}, | ||
| 796 | {0x264,4,0x265,0x265}, | ||
| 797 | {0x267,4,0x268,0x268}}; | ||
| 798 | |||
| 799 | u_char cmdblk[6],buf[0x200]; | ||
| 800 | u_char n = 0,rll,jumper_state,use_jumper_geo; | ||
| 801 | u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6'); | ||
| 802 | |||
| 803 | jumper_state = ~(inb(0x322)); | ||
| 804 | if (jumper_state & 0x40) | ||
| 805 | xd_irq = 9; | ||
| 806 | rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0; | ||
| 807 | xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0); | ||
| 808 | if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { | ||
| 809 | xd_info[drive].heads = buf[0x1AF]; /* heads */ | ||
| 810 | xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6]; /* cylinders */ | ||
| 811 | xd_info[drive].sectors = 17; /* sectors */ | ||
| 812 | if (xd_geo[3*drive]) | ||
| 813 | xd_manual_geo_set(drive); | ||
| 814 | #if 0 | ||
| 815 | xd_info[drive].rwrite = ((u_short *) (buf))[0xD8]; /* reduced write */ | ||
| 816 | xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA]; /* write precomp */ | ||
| 817 | xd_info[drive].ecc = buf[0x1B4]; /* ecc length */ | ||
| 818 | #endif /* 0 */ | ||
| 819 | xd_info[drive].control = buf[0x1B5]; /* control byte */ | ||
| 820 | use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders); | ||
| 821 | if (xd_geo[3*drive]) { | ||
| 822 | xd_manual_geo_set(drive); | ||
| 823 | xd_info[drive].control = rll ? 7 : 5; | ||
| 824 | } | ||
| 825 | else if (use_jumper_geo) { | ||
| 826 | n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll; | ||
| 827 | xd_info[drive].cylinders = geometry_table[n][0]; | ||
| 828 | xd_info[drive].heads = (u_char)(geometry_table[n][1]); | ||
| 829 | xd_info[drive].control = rll ? 7 : 5; | ||
| 830 | #if 0 | ||
| 831 | xd_info[drive].rwrite = geometry_table[n][2]; | ||
| 832 | xd_info[drive].wprecomp = geometry_table[n][3]; | ||
| 833 | xd_info[drive].ecc = 0x0B; | ||
| 834 | #endif /* 0 */ | ||
| 835 | } | ||
| 836 | if (!wd_1002) { | ||
| 837 | if (use_jumper_geo) | ||
| 838 | xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders, | ||
| 839 | geometry_table[n][2],geometry_table[n][3],0x0B); | ||
| 840 | else | ||
| 841 | xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders, | ||
| 842 | ((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]); | ||
| 843 | } | ||
| 844 | /* 1002 based RLL controller requests converted addressing, but reports physical | ||
| 845 | (physical 26 sec., logical 17 sec.) | ||
| 846 | 1004 based ???? */ | ||
| 847 | if (rll & wd_1002) { | ||
| 848 | if ((xd_info[drive].cylinders *= 26, | ||
| 849 | xd_info[drive].cylinders /= 17) > 1023) | ||
| 850 | xd_info[drive].cylinders = 1023; /* 1024 ? */ | ||
| 851 | #if 0 | ||
| 852 | xd_info[drive].rwrite *= 26; | ||
| 853 | xd_info[drive].rwrite /= 17; | ||
| 854 | xd_info[drive].wprecomp *= 26 | ||
| 855 | xd_info[drive].wprecomp /= 17; | ||
| 856 | #endif /* 0 */ | ||
| 857 | } | ||
| 858 | } | ||
| 859 | else | ||
| 860 | printk("xd_wd_init_drive: error reading geometry for xd%c\n",'a'+drive); | ||
| 861 | |||
| 862 | } | ||
| 863 | |||
| 864 | static void __init xd_seagate_init_controller (unsigned int address) | ||
| 865 | { | ||
| 866 | switch (address) { | ||
| 867 | case 0x00000: | ||
| 868 | case 0xC8000: break; /*initial: 0x320 */ | ||
| 869 | case 0xD0000: xd_iobase = 0x324; break; | ||
| 870 | case 0xD8000: xd_iobase = 0x328; break; | ||
| 871 | case 0xE0000: xd_iobase = 0x32C; break; | ||
| 872 | default: printk("xd_seagate_init_controller: unsupported BIOS address %06x\n",address); | ||
| 873 | break; | ||
| 874 | } | ||
| 875 | xd_maxsectors = 0x40; | ||
| 876 | |||
| 877 | outb(0,XD_RESET); /* reset the controller */ | ||
| 878 | } | ||
| 879 | |||
| 880 | static void __init xd_seagate_init_drive (u_char drive) | ||
| 881 | { | ||
| 882 | u_char cmdblk[6],buf[0x200]; | ||
| 883 | |||
| 884 | xd_build(cmdblk,CMD_ST11GETGEOM,drive,0,0,0,1,0); | ||
| 885 | if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { | ||
| 886 | xd_info[drive].heads = buf[0x04]; /* heads */ | ||
| 887 | xd_info[drive].cylinders = (buf[0x02] << 8) | buf[0x03]; /* cylinders */ | ||
| 888 | xd_info[drive].sectors = buf[0x05]; /* sectors */ | ||
| 889 | xd_info[drive].control = 0; /* control byte */ | ||
| 890 | } | ||
| 891 | else | ||
| 892 | printk("xd_seagate_init_drive: error reading geometry from xd%c\n", 'a'+drive); | ||
| 893 | } | ||
| 894 | |||
| 895 | /* Omti support courtesy Dirk Melchers */ | ||
| 896 | static void __init xd_omti_init_controller (unsigned int address) | ||
| 897 | { | ||
| 898 | switch (address) { | ||
| 899 | case 0x00000: | ||
| 900 | case 0xC8000: break; /*initial: 0x320 */ | ||
| 901 | case 0xD0000: xd_iobase = 0x324; break; | ||
| 902 | case 0xD8000: xd_iobase = 0x328; break; | ||
| 903 | case 0xE0000: xd_iobase = 0x32C; break; | ||
| 904 | default: printk("xd_omti_init_controller: unsupported BIOS address %06x\n",address); | ||
| 905 | break; | ||
| 906 | } | ||
| 907 | |||
| 908 | xd_maxsectors = 0x40; | ||
| 909 | |||
| 910 | outb(0,XD_RESET); /* reset the controller */ | ||
| 911 | } | ||
| 912 | |||
| 913 | static void __init xd_omti_init_drive (u_char drive) | ||
| 914 | { | ||
| 915 | /* gets infos from drive */ | ||
| 916 | xd_override_init_drive(drive); | ||
| 917 | |||
| 918 | /* set other parameters, Hardcoded, not that nice :-) */ | ||
| 919 | xd_info[drive].control = 2; | ||
| 920 | } | ||
| 921 | |||
| 922 | /* Xebec support (AK) */ | ||
| 923 | static void __init xd_xebec_init_controller (unsigned int address) | ||
| 924 | { | ||
| 925 | /* iobase may be set manually in range 0x300 - 0x33C | ||
| 926 | irq may be set manually to 2(9),3,4,5,6,7 | ||
| 927 | dma may be set manually to 1,2,3 | ||
| 928 | (How to detect them ???) | ||
| 929 | BIOS address may be set manually in range 0x0 - 0xF8000 | ||
| 930 | If you need non-standard settings use the xd=... command */ | ||
| 931 | |||
| 932 | switch (address) { | ||
| 933 | case 0x00000: | ||
| 934 | case 0xC8000: /* initially: xd_iobase==0x320 */ | ||
| 935 | case 0xD0000: | ||
| 936 | case 0xD2000: | ||
| 937 | case 0xD4000: | ||
| 938 | case 0xD6000: | ||
| 939 | case 0xD8000: | ||
| 940 | case 0xDA000: | ||
| 941 | case 0xDC000: | ||
| 942 | case 0xDE000: | ||
| 943 | case 0xE0000: break; | ||
| 944 | default: printk("xd_xebec_init_controller: unsupported BIOS address %06x\n",address); | ||
| 945 | break; | ||
| 946 | } | ||
| 947 | |||
| 948 | xd_maxsectors = 0x01; | ||
| 949 | outb(0,XD_RESET); /* reset the controller */ | ||
| 950 | |||
| 951 | msleep(XD_INIT_DISK_DELAY); | ||
| 952 | } | ||
| 953 | |||
| 954 | static void __init xd_xebec_init_drive (u_char drive) | ||
| 955 | { | ||
| 956 | /* values from controller's BIOS - BIOS chip may be removed */ | ||
| 957 | static u_short geometry_table[][5] = { | ||
| 958 | {0x132,4,0x080,0x080,0x7}, | ||
| 959 | {0x132,4,0x080,0x080,0x17}, | ||
| 960 | {0x264,2,0x100,0x100,0x7}, | ||
| 961 | {0x264,2,0x100,0x100,0x17}, | ||
| 962 | {0x132,8,0x080,0x080,0x7}, | ||
| 963 | {0x132,8,0x080,0x080,0x17}, | ||
| 964 | {0x264,4,0x100,0x100,0x6}, | ||
| 965 | {0x264,4,0x100,0x100,0x17}, | ||
| 966 | {0x2BC,5,0x2BC,0x12C,0x6}, | ||
| 967 | {0x3A5,4,0x3A5,0x3A5,0x7}, | ||
| 968 | {0x26C,6,0x26C,0x26C,0x7}, | ||
| 969 | {0x200,8,0x200,0x100,0x17}, | ||
| 970 | {0x400,5,0x400,0x400,0x7}, | ||
| 971 | {0x400,6,0x400,0x400,0x7}, | ||
| 972 | {0x264,8,0x264,0x200,0x17}, | ||
| 973 | {0x33E,7,0x33E,0x200,0x7}}; | ||
| 974 | u_char n; | ||
| 975 | |||
| 976 | n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry | ||
| 977 | is assumed for BOTH drives */ | ||
| 978 | if (xd_geo[3*drive]) | ||
| 979 | xd_manual_geo_set(drive); | ||
| 980 | else { | ||
| 981 | xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */ | ||
| 982 | xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */ | ||
| 983 | xd_info[drive].sectors = 17; /* sectors */ | ||
| 984 | #if 0 | ||
| 985 | xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */ | ||
| 986 | xd_info[drive].precomp = geometry_table[n][3] /* write precomp */ | ||
| 987 | xd_info[drive].ecc = 0x0B; /* ecc length */ | ||
| 988 | #endif /* 0 */ | ||
| 989 | } | ||
| 990 | xd_info[drive].control = geometry_table[n][4]; /* control byte */ | ||
| 991 | xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B); | ||
| 992 | xd_recalibrate(drive); | ||
| 993 | } | ||
| 994 | |||
| 995 | /* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads | ||
| 996 | etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */ | ||
| 997 | static void __init xd_override_init_drive (u_char drive) | ||
| 998 | { | ||
| 999 | u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 }; | ||
| 1000 | u_char cmdblk[6],i; | ||
| 1001 | |||
| 1002 | if (xd_geo[3*drive]) | ||
| 1003 | xd_manual_geo_set(drive); | ||
| 1004 | else { | ||
| 1005 | for (i = 0; i < 3; i++) { | ||
| 1006 | while (min[i] != max[i] - 1) { | ||
| 1007 | test[i] = (min[i] + max[i]) / 2; | ||
| 1008 | xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0); | ||
| 1009 | if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2)) | ||
| 1010 | min[i] = test[i]; | ||
| 1011 | else | ||
| 1012 | max[i] = test[i]; | ||
| 1013 | } | ||
| 1014 | test[i] = min[i]; | ||
| 1015 | } | ||
| 1016 | xd_info[drive].heads = (u_char) min[0] + 1; | ||
| 1017 | xd_info[drive].cylinders = (u_short) min[1] + 1; | ||
| 1018 | xd_info[drive].sectors = (u_char) min[2] + 1; | ||
| 1019 | } | ||
| 1020 | xd_info[drive].control = 0; | ||
| 1021 | } | ||
| 1022 | |||
| 1023 | /* xd_setup: initialise controller from command line parameters */ | ||
| 1024 | static void __init do_xd_setup (int *integers) | ||
| 1025 | { | ||
| 1026 | switch (integers[0]) { | ||
| 1027 | case 4: if (integers[4] < 0) | ||
| 1028 | nodma = 1; | ||
| 1029 | else if (integers[4] < 8) | ||
| 1030 | xd_dma = integers[4]; | ||
| 1031 | case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC)) | ||
| 1032 | xd_iobase = integers[3]; | ||
| 1033 | case 2: if ((integers[2] > 0) && (integers[2] < 16)) | ||
| 1034 | xd_irq = integers[2]; | ||
| 1035 | case 1: xd_override = 1; | ||
| 1036 | if ((integers[1] >= 0) && (integers[1] < ARRAY_SIZE(xd_sigs))) | ||
| 1037 | xd_type = integers[1]; | ||
| 1038 | case 0: break; | ||
| 1039 | default:printk("xd: too many parameters for xd\n"); | ||
| 1040 | } | ||
| 1041 | xd_maxsectors = 0x01; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | /* xd_setparam: set the drive characteristics */ | ||
| 1045 | static void __init xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc) | ||
| 1046 | { | ||
| 1047 | u_char cmdblk[14]; | ||
| 1048 | |||
| 1049 | xd_build(cmdblk,command,drive,0,0,0,0,0); | ||
| 1050 | cmdblk[6] = (u_char) (cylinders >> 8) & 0x03; | ||
| 1051 | cmdblk[7] = (u_char) (cylinders & 0xFF); | ||
| 1052 | cmdblk[8] = heads & 0x1F; | ||
| 1053 | cmdblk[9] = (u_char) (rwrite >> 8) & 0x03; | ||
| 1054 | cmdblk[10] = (u_char) (rwrite & 0xFF); | ||
| 1055 | cmdblk[11] = (u_char) (wprecomp >> 8) & 0x03; | ||
| 1056 | cmdblk[12] = (u_char) (wprecomp & 0xFF); | ||
| 1057 | cmdblk[13] = ecc; | ||
| 1058 | |||
| 1059 | /* Some controllers require geometry info as data, not command */ | ||
| 1060 | |||
| 1061 | if (xd_command(cmdblk,PIO_MODE,NULL,&cmdblk[6],NULL,XD_TIMEOUT * 2)) | ||
| 1062 | printk("xd: error setting characteristics for xd%c\n", 'a'+drive); | ||
| 1063 | } | ||
| 1064 | |||
| 1065 | |||
| 1066 | #ifdef MODULE | ||
| 1067 | |||
| 1068 | module_param_array(xd, int, NULL, 0); | ||
| 1069 | module_param_array(xd_geo, int, NULL, 0); | ||
| 1070 | module_param(nodma, bool, 0); | ||
| 1071 | |||
| 1072 | MODULE_LICENSE("GPL"); | ||
| 1073 | |||
| 1074 | void cleanup_module(void) | ||
| 1075 | { | ||
| 1076 | int i; | ||
| 1077 | unregister_blkdev(XT_DISK_MAJOR, "xd"); | ||
| 1078 | for (i = 0; i < xd_drives; i++) { | ||
| 1079 | del_gendisk(xd_gendisk[i]); | ||
| 1080 | put_disk(xd_gendisk[i]); | ||
| 1081 | } | ||
| 1082 | blk_cleanup_queue(xd_queue); | ||
| 1083 | release_region(xd_iobase,4); | ||
| 1084 | if (xd_drives) { | ||
| 1085 | free_irq(xd_irq, NULL); | ||
| 1086 | free_dma(xd_dma); | ||
| 1087 | if (xd_dma_buffer) | ||
| 1088 | xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200); | ||
| 1089 | } | ||
| 1090 | } | ||
| 1091 | #else | ||
| 1092 | |||
| 1093 | static int __init xd_setup (char *str) | ||
| 1094 | { | ||
| 1095 | int ints[5]; | ||
| 1096 | get_options (str, ARRAY_SIZE (ints), ints); | ||
| 1097 | do_xd_setup (ints); | ||
| 1098 | return 1; | ||
| 1099 | } | ||
| 1100 | |||
| 1101 | /* xd_manual_geo_init: initialise drive geometry from command line parameters | ||
| 1102 | (used only for WD drives) */ | ||
| 1103 | static int __init xd_manual_geo_init (char *str) | ||
| 1104 | { | ||
| 1105 | int i, integers[1 + 3*XD_MAXDRIVES]; | ||
| 1106 | |||
| 1107 | get_options (str, ARRAY_SIZE (integers), integers); | ||
| 1108 | if (integers[0]%3 != 0) { | ||
| 1109 | printk("xd: incorrect number of parameters for xd_geo\n"); | ||
| 1110 | return 1; | ||
| 1111 | } | ||
| 1112 | for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++) | ||
| 1113 | xd_geo[i] = integers[i+1]; | ||
| 1114 | return 1; | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | __setup ("xd=", xd_setup); | ||
| 1118 | __setup ("xd_geo=", xd_manual_geo_init); | ||
| 1119 | |||
| 1120 | #endif /* MODULE */ | ||
| 1121 | |||
| 1122 | module_init(xd_init); | ||
| 1123 | MODULE_ALIAS_BLOCKDEV_MAJOR(XT_DISK_MAJOR); | ||
diff --git a/drivers/block/xd.h b/drivers/block/xd.h deleted file mode 100644 index 37cacef16e93..000000000000 --- a/drivers/block/xd.h +++ /dev/null | |||
| @@ -1,134 +0,0 @@ | |||
| 1 | #ifndef _LINUX_XD_H | ||
| 2 | #define _LINUX_XD_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * This file contains the definitions for the IO ports and errors etc. for XT hard disk controllers (at least the DTC 5150X). | ||
| 6 | * | ||
| 7 | * Author: Pat Mackinlay, pat@it.com.au | ||
| 8 | * Date: 29/09/92 | ||
| 9 | * | ||
| 10 | * Revised: 01/01/93, ... | ||
| 11 | * | ||
| 12 | * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, kevinf@agora.rain.com) | ||
| 13 | * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and Wim Van Dorst. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/interrupt.h> | ||
| 17 | |||
| 18 | /* XT hard disk controller registers */ | ||
| 19 | #define XD_DATA (xd_iobase + 0x00) /* data RW register */ | ||
| 20 | #define XD_RESET (xd_iobase + 0x01) /* reset WO register */ | ||
| 21 | #define XD_STATUS (xd_iobase + 0x01) /* status RO register */ | ||
| 22 | #define XD_SELECT (xd_iobase + 0x02) /* select WO register */ | ||
| 23 | #define XD_JUMPER (xd_iobase + 0x02) /* jumper RO register */ | ||
| 24 | #define XD_CONTROL (xd_iobase + 0x03) /* DMAE/INTE WO register */ | ||
| 25 | #define XD_RESERVED (xd_iobase + 0x03) /* reserved */ | ||
| 26 | |||
| 27 | /* XT hard disk controller commands (incomplete list) */ | ||
| 28 | #define CMD_TESTREADY 0x00 /* test drive ready */ | ||
| 29 | #define CMD_RECALIBRATE 0x01 /* recalibrate drive */ | ||
| 30 | #define CMD_SENSE 0x03 /* request sense */ | ||
| 31 | #define CMD_FORMATDRV 0x04 /* format drive */ | ||
| 32 | #define CMD_VERIFY 0x05 /* read verify */ | ||
| 33 | #define CMD_FORMATTRK 0x06 /* format track */ | ||
| 34 | #define CMD_FORMATBAD 0x07 /* format bad track */ | ||
| 35 | #define CMD_READ 0x08 /* read */ | ||
| 36 | #define CMD_WRITE 0x0A /* write */ | ||
| 37 | #define CMD_SEEK 0x0B /* seek */ | ||
| 38 | |||
| 39 | /* Controller specific commands */ | ||
| 40 | #define CMD_DTCSETPARAM 0x0C /* set drive parameters (DTC 5150X & CX only?) */ | ||
| 41 | #define CMD_DTCGETECC 0x0D /* get ecc error length (DTC 5150X only?) */ | ||
| 42 | #define CMD_DTCREADBUF 0x0E /* read sector buffer (DTC 5150X only?) */ | ||
| 43 | #define CMD_DTCWRITEBUF 0x0F /* write sector buffer (DTC 5150X only?) */ | ||
| 44 | #define CMD_DTCREMAPTRK 0x11 /* assign alternate track (DTC 5150X only?) */ | ||
| 45 | #define CMD_DTCGETPARAM 0xFB /* get drive parameters (DTC 5150X only?) */ | ||
| 46 | #define CMD_DTCSETSTEP 0xFC /* set step rate (DTC 5150X only?) */ | ||
| 47 | #define CMD_DTCSETGEOM 0xFE /* set geometry data (DTC 5150X only?) */ | ||
| 48 | #define CMD_DTCGETGEOM 0xFF /* get geometry data (DTC 5150X only?) */ | ||
| 49 | #define CMD_ST11GETGEOM 0xF8 /* get geometry data (Seagate ST11R/M only?) */ | ||
| 50 | #define CMD_WDSETPARAM 0x0C /* set drive parameters (WD 1004A27X only?) */ | ||
| 51 | #define CMD_XBSETPARAM 0x0C /* set drive parameters (XEBEC only?) */ | ||
| 52 | |||
| 53 | /* Bits for command status byte */ | ||
| 54 | #define CSB_ERROR 0x02 /* error */ | ||
| 55 | #define CSB_LUN 0x20 /* logical Unit Number */ | ||
| 56 | |||
| 57 | /* XT hard disk controller status bits */ | ||
| 58 | #define STAT_READY 0x01 /* controller is ready */ | ||
| 59 | #define STAT_INPUT 0x02 /* data flowing from controller to host */ | ||
| 60 | #define STAT_COMMAND 0x04 /* controller in command phase */ | ||
| 61 | #define STAT_SELECT 0x08 /* controller is selected */ | ||
| 62 | #define STAT_REQUEST 0x10 /* controller requesting data */ | ||
| 63 | #define STAT_INTERRUPT 0x20 /* controller requesting interrupt */ | ||
| 64 | |||
| 65 | /* XT hard disk controller control bits */ | ||
| 66 | #define PIO_MODE 0x00 /* control bits to set for PIO */ | ||
| 67 | #define DMA_MODE 0x03 /* control bits to set for DMA & interrupt */ | ||
| 68 | |||
| 69 | #define XD_MAXDRIVES 2 /* maximum 2 drives */ | ||
| 70 | #define XD_TIMEOUT HZ /* 1 second timeout */ | ||
| 71 | #define XD_RETRIES 4 /* maximum 4 retries */ | ||
| 72 | |||
| 73 | #undef DEBUG /* define for debugging output */ | ||
| 74 | |||
| 75 | #ifdef DEBUG | ||
| 76 | #define DEBUG_STARTUP /* debug driver initialisation */ | ||
| 77 | #define DEBUG_OVERRIDE /* debug override geometry detection */ | ||
| 78 | #define DEBUG_READWRITE /* debug each read/write command */ | ||
| 79 | #define DEBUG_OTHER /* debug misc. interrupt/DMA stuff */ | ||
| 80 | #define DEBUG_COMMAND /* debug each controller command */ | ||
| 81 | #endif /* DEBUG */ | ||
| 82 | |||
| 83 | /* this structure defines the XT drives and their types */ | ||
| 84 | typedef struct { | ||
| 85 | u_char heads; | ||
| 86 | u_short cylinders; | ||
| 87 | u_char sectors; | ||
| 88 | u_char control; | ||
| 89 | int unit; | ||
| 90 | } XD_INFO; | ||
| 91 | |||
| 92 | /* this structure defines a ROM BIOS signature */ | ||
| 93 | typedef struct { | ||
| 94 | unsigned int offset; | ||
| 95 | const char *string; | ||
| 96 | void (*init_controller)(unsigned int address); | ||
| 97 | void (*init_drive)(u_char drive); | ||
| 98 | const char *name; | ||
| 99 | } XD_SIGNATURE; | ||
| 100 | |||
| 101 | #ifndef MODULE | ||
| 102 | static int xd_manual_geo_init (char *command); | ||
| 103 | #endif /* MODULE */ | ||
| 104 | static u_char xd_detect (u_char *controller, unsigned int *address); | ||
| 105 | static u_char xd_initdrives (void (*init_drive)(u_char drive)); | ||
| 106 | |||
| 107 | static void do_xd_request (struct request_queue * q); | ||
| 108 | static int xd_ioctl (struct block_device *bdev,fmode_t mode,unsigned int cmd,unsigned long arg); | ||
| 109 | static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count); | ||
| 110 | static void xd_recalibrate (u_char drive); | ||
| 111 | |||
| 112 | static irqreturn_t xd_interrupt_handler(int irq, void *dev_id); | ||
| 113 | static u_char xd_setup_dma (u_char opcode,u_char *buffer,u_int count); | ||
| 114 | static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control); | ||
| 115 | static void xd_watchdog (unsigned long unused); | ||
| 116 | static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout); | ||
| 117 | static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout); | ||
| 118 | |||
| 119 | /* card specific setup and geometry gathering code */ | ||
| 120 | static void xd_dtc_init_controller (unsigned int address); | ||
| 121 | static void xd_dtc5150cx_init_drive (u_char drive); | ||
| 122 | static void xd_dtc_init_drive (u_char drive); | ||
| 123 | static void xd_wd_init_controller (unsigned int address); | ||
| 124 | static void xd_wd_init_drive (u_char drive); | ||
| 125 | static void xd_seagate_init_controller (unsigned int address); | ||
| 126 | static void xd_seagate_init_drive (u_char drive); | ||
| 127 | static void xd_omti_init_controller (unsigned int address); | ||
| 128 | static void xd_omti_init_drive (u_char drive); | ||
| 129 | static void xd_xebec_init_controller (unsigned int address); | ||
| 130 | static void xd_xebec_init_drive (u_char drive); | ||
| 131 | static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc); | ||
| 132 | static void xd_override_init_drive (u_char drive); | ||
| 133 | |||
| 134 | #endif /* _LINUX_XD_H */ | ||
