diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 13:28:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-11 13:28:45 -0500 |
commit | 540a7c5061f10a07748c89b6741af90db1a07252 (patch) | |
tree | 27285b973326f894980e029cb5f726e8865e1443 /drivers | |
parent | 718749d56214aa97015fe01b76b6d6dd0c171796 (diff) | |
parent | 9c4a6b1e42801343535ccab4c190019d9975cce8 (diff) |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley:
"This is the usual grab bag of driver updates (hpsa, storvsc, mp2sas,
megaraid_sas, ses) plus an assortment of minor updates.
There's also an update to ufs which adds new phy drivers and finally a
new logging infrastructure for SCSI"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (114 commits)
scsi_logging: return void for dev_printk() functions
scsi: print single-character strings with seq_putc
scsi: merge consecutive seq_puts calls
scsi: replace seq_printf with seq_puts
aha152x: replace seq_printf with seq_puts
advansys: replace seq_printf with seq_puts
scsi: remove SPRINTF macro
sg: remove an unused variable
hpsa: Use local workqueues instead of system workqueues
hpsa: add in P840ar controller model name
hpsa: add in gen9 controller model names
hpsa: detect and report failures changing controller transport modes
hpsa: shorten the wait for the CISS doorbell mode change ack
hpsa: refactor duplicated scan completion code into a new routine
hpsa: move SG descriptor set-up out of hpsa_scatter_gather()
hpsa: do not use function pointers in fast path command submission
hpsa: print CDBs instead of kernel virtual addresses for uncommon errors
hpsa: do not use a void pointer for scsi_cmd field of struct CommandList
hpsa: return failed from device reset/abort handlers
hpsa: check for ctlr lockup after command allocation in main io path
...
Diffstat (limited to 'drivers')
87 files changed, 6144 insertions, 2215 deletions
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index a9f5aed32d39..d2029a462e2c 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -2481,7 +2481,6 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2481 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { | 2481 | for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { |
2482 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); | 2482 | struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); |
2483 | struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; | 2483 | struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; |
2484 | const u8 *cdb = qc->cdb; | ||
2485 | char data_buf[20] = ""; | 2484 | char data_buf[20] = ""; |
2486 | char cdb_buf[70] = ""; | 2485 | char cdb_buf[70] = ""; |
2487 | 2486 | ||
@@ -2509,16 +2508,15 @@ static void ata_eh_link_report(struct ata_link *link) | |||
2509 | } | 2508 | } |
2510 | 2509 | ||
2511 | if (ata_is_atapi(qc->tf.protocol)) { | 2510 | if (ata_is_atapi(qc->tf.protocol)) { |
2512 | if (qc->scsicmd) | 2511 | const u8 *cdb = qc->cdb; |
2513 | scsi_print_command(qc->scsicmd); | 2512 | size_t cdb_len = qc->dev->cdb_len; |
2514 | else | 2513 | |
2515 | snprintf(cdb_buf, sizeof(cdb_buf), | 2514 | if (qc->scsicmd) { |
2516 | "cdb %02x %02x %02x %02x %02x %02x %02x %02x " | 2515 | cdb = qc->scsicmd->cmnd; |
2517 | "%02x %02x %02x %02x %02x %02x %02x %02x\n ", | 2516 | cdb_len = qc->scsicmd->cmd_len; |
2518 | cdb[0], cdb[1], cdb[2], cdb[3], | 2517 | } |
2519 | cdb[4], cdb[5], cdb[6], cdb[7], | 2518 | __scsi_format_command(cdb_buf, sizeof(cdb_buf), |
2520 | cdb[8], cdb[9], cdb[10], cdb[11], | 2519 | cdb, cdb_len); |
2521 | cdb[12], cdb[13], cdb[14], cdb[15]); | ||
2522 | } else { | 2520 | } else { |
2523 | const char *descr = ata_get_cmd_descript(cmd->command); | 2521 | const char *descr = ata_get_cmd_descript(cmd->command); |
2524 | if (descr) | 2522 | if (descr) |
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 180a5442fd4b..38552a31304a 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c | |||
@@ -145,8 +145,11 @@ enclosure_register(struct device *dev, const char *name, int components, | |||
145 | if (err) | 145 | if (err) |
146 | goto err; | 146 | goto err; |
147 | 147 | ||
148 | for (i = 0; i < components; i++) | 148 | for (i = 0; i < components; i++) { |
149 | edev->component[i].number = -1; | 149 | edev->component[i].number = -1; |
150 | edev->component[i].slot = -1; | ||
151 | edev->component[i].power_status = 1; | ||
152 | } | ||
150 | 153 | ||
151 | mutex_lock(&container_list_lock); | 154 | mutex_lock(&container_list_lock); |
152 | list_add_tail(&edev->node, &container_list); | 155 | list_add_tail(&edev->node, &container_list); |
@@ -273,27 +276,26 @@ enclosure_component_find_by_name(struct enclosure_device *edev, | |||
273 | static const struct attribute_group *enclosure_component_groups[]; | 276 | static const struct attribute_group *enclosure_component_groups[]; |
274 | 277 | ||
275 | /** | 278 | /** |
276 | * enclosure_component_register - add a particular component to an enclosure | 279 | * enclosure_component_alloc - prepare a new enclosure component |
277 | * @edev: the enclosure to add the component | 280 | * @edev: the enclosure to add the component |
278 | * @num: the device number | 281 | * @num: the device number |
279 | * @type: the type of component being added | 282 | * @type: the type of component being added |
280 | * @name: an optional name to appear in sysfs (leave NULL if none) | 283 | * @name: an optional name to appear in sysfs (leave NULL if none) |
281 | * | 284 | * |
282 | * Registers the component. The name is optional for enclosures that | 285 | * The name is optional for enclosures that give their components a unique |
283 | * give their components a unique name. If not, leave the field NULL | 286 | * name. If not, leave the field NULL and a name will be assigned. |
284 | * and a name will be assigned. | ||
285 | * | 287 | * |
286 | * Returns a pointer to the enclosure component or an error. | 288 | * Returns a pointer to the enclosure component or an error. |
287 | */ | 289 | */ |
288 | struct enclosure_component * | 290 | struct enclosure_component * |
289 | enclosure_component_register(struct enclosure_device *edev, | 291 | enclosure_component_alloc(struct enclosure_device *edev, |
290 | unsigned int number, | 292 | unsigned int number, |
291 | enum enclosure_component_type type, | 293 | enum enclosure_component_type type, |
292 | const char *name) | 294 | const char *name) |
293 | { | 295 | { |
294 | struct enclosure_component *ecomp; | 296 | struct enclosure_component *ecomp; |
295 | struct device *cdev; | 297 | struct device *cdev; |
296 | int err, i; | 298 | int i; |
297 | char newname[COMPONENT_NAME_SIZE]; | 299 | char newname[COMPONENT_NAME_SIZE]; |
298 | 300 | ||
299 | if (number >= edev->components) | 301 | if (number >= edev->components) |
@@ -327,14 +329,30 @@ enclosure_component_register(struct enclosure_device *edev, | |||
327 | cdev->release = enclosure_component_release; | 329 | cdev->release = enclosure_component_release; |
328 | cdev->groups = enclosure_component_groups; | 330 | cdev->groups = enclosure_component_groups; |
329 | 331 | ||
332 | return ecomp; | ||
333 | } | ||
334 | EXPORT_SYMBOL_GPL(enclosure_component_alloc); | ||
335 | |||
336 | /** | ||
337 | * enclosure_component_register - publishes an initialized enclosure component | ||
338 | * @ecomp: component to add | ||
339 | * | ||
340 | * Returns 0 on successful registration, releases the component otherwise | ||
341 | */ | ||
342 | int enclosure_component_register(struct enclosure_component *ecomp) | ||
343 | { | ||
344 | struct device *cdev; | ||
345 | int err; | ||
346 | |||
347 | cdev = &ecomp->cdev; | ||
330 | err = device_register(cdev); | 348 | err = device_register(cdev); |
331 | if (err) { | 349 | if (err) { |
332 | ecomp->number = -1; | 350 | ecomp->number = -1; |
333 | put_device(cdev); | 351 | put_device(cdev); |
334 | return ERR_PTR(err); | 352 | return err; |
335 | } | 353 | } |
336 | 354 | ||
337 | return ecomp; | 355 | return 0; |
338 | } | 356 | } |
339 | EXPORT_SYMBOL_GPL(enclosure_component_register); | 357 | EXPORT_SYMBOL_GPL(enclosure_component_register); |
340 | 358 | ||
@@ -417,8 +435,21 @@ static ssize_t components_show(struct device *cdev, | |||
417 | } | 435 | } |
418 | static DEVICE_ATTR_RO(components); | 436 | static DEVICE_ATTR_RO(components); |
419 | 437 | ||
438 | static ssize_t id_show(struct device *cdev, | ||
439 | struct device_attribute *attr, | ||
440 | char *buf) | ||
441 | { | ||
442 | struct enclosure_device *edev = to_enclosure_device(cdev); | ||
443 | |||
444 | if (edev->cb->show_id) | ||
445 | return edev->cb->show_id(edev, buf); | ||
446 | return -EINVAL; | ||
447 | } | ||
448 | static DEVICE_ATTR_RO(id); | ||
449 | |||
420 | static struct attribute *enclosure_class_attrs[] = { | 450 | static struct attribute *enclosure_class_attrs[] = { |
421 | &dev_attr_components.attr, | 451 | &dev_attr_components.attr, |
452 | &dev_attr_id.attr, | ||
422 | NULL, | 453 | NULL, |
423 | }; | 454 | }; |
424 | ATTRIBUTE_GROUPS(enclosure_class); | 455 | ATTRIBUTE_GROUPS(enclosure_class); |
@@ -553,6 +584,40 @@ static ssize_t set_component_locate(struct device *cdev, | |||
553 | return count; | 584 | return count; |
554 | } | 585 | } |
555 | 586 | ||
587 | static ssize_t get_component_power_status(struct device *cdev, | ||
588 | struct device_attribute *attr, | ||
589 | char *buf) | ||
590 | { | ||
591 | struct enclosure_device *edev = to_enclosure_device(cdev->parent); | ||
592 | struct enclosure_component *ecomp = to_enclosure_component(cdev); | ||
593 | |||
594 | if (edev->cb->get_power_status) | ||
595 | edev->cb->get_power_status(edev, ecomp); | ||
596 | return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off"); | ||
597 | } | ||
598 | |||
599 | static ssize_t set_component_power_status(struct device *cdev, | ||
600 | struct device_attribute *attr, | ||
601 | const char *buf, size_t count) | ||
602 | { | ||
603 | struct enclosure_device *edev = to_enclosure_device(cdev->parent); | ||
604 | struct enclosure_component *ecomp = to_enclosure_component(cdev); | ||
605 | int val; | ||
606 | |||
607 | if (strncmp(buf, "on", 2) == 0 && | ||
608 | (buf[2] == '\n' || buf[2] == '\0')) | ||
609 | val = 1; | ||
610 | else if (strncmp(buf, "off", 3) == 0 && | ||
611 | (buf[3] == '\n' || buf[3] == '\0')) | ||
612 | val = 0; | ||
613 | else | ||
614 | return -EINVAL; | ||
615 | |||
616 | if (edev->cb->set_power_status) | ||
617 | edev->cb->set_power_status(edev, ecomp, val); | ||
618 | return count; | ||
619 | } | ||
620 | |||
556 | static ssize_t get_component_type(struct device *cdev, | 621 | static ssize_t get_component_type(struct device *cdev, |
557 | struct device_attribute *attr, char *buf) | 622 | struct device_attribute *attr, char *buf) |
558 | { | 623 | { |
@@ -561,6 +626,20 @@ static ssize_t get_component_type(struct device *cdev, | |||
561 | return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]); | 626 | return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]); |
562 | } | 627 | } |
563 | 628 | ||
629 | static ssize_t get_component_slot(struct device *cdev, | ||
630 | struct device_attribute *attr, char *buf) | ||
631 | { | ||
632 | struct enclosure_component *ecomp = to_enclosure_component(cdev); | ||
633 | int slot; | ||
634 | |||
635 | /* if the enclosure does not override then use 'number' as a stand-in */ | ||
636 | if (ecomp->slot >= 0) | ||
637 | slot = ecomp->slot; | ||
638 | else | ||
639 | slot = ecomp->number; | ||
640 | |||
641 | return snprintf(buf, 40, "%d\n", slot); | ||
642 | } | ||
564 | 643 | ||
565 | static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault, | 644 | static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault, |
566 | set_component_fault); | 645 | set_component_fault); |
@@ -570,14 +649,19 @@ static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active, | |||
570 | set_component_active); | 649 | set_component_active); |
571 | static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate, | 650 | static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate, |
572 | set_component_locate); | 651 | set_component_locate); |
652 | static DEVICE_ATTR(power_status, S_IRUGO | S_IWUSR, get_component_power_status, | ||
653 | set_component_power_status); | ||
573 | static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL); | 654 | static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL); |
655 | static DEVICE_ATTR(slot, S_IRUGO, get_component_slot, NULL); | ||
574 | 656 | ||
575 | static struct attribute *enclosure_component_attrs[] = { | 657 | static struct attribute *enclosure_component_attrs[] = { |
576 | &dev_attr_fault.attr, | 658 | &dev_attr_fault.attr, |
577 | &dev_attr_status.attr, | 659 | &dev_attr_status.attr, |
578 | &dev_attr_active.attr, | 660 | &dev_attr_active.attr, |
579 | &dev_attr_locate.attr, | 661 | &dev_attr_locate.attr, |
662 | &dev_attr_power_status.attr, | ||
580 | &dev_attr_type.attr, | 663 | &dev_attr_type.attr, |
664 | &dev_attr_slot.attr, | ||
581 | NULL | 665 | NULL |
582 | }; | 666 | }; |
583 | ATTRIBUTE_GROUPS(enclosure_component); | 667 | ATTRIBUTE_GROUPS(enclosure_component); |
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index ccad8809ecb1..26a7623e551e 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
@@ -277,4 +277,11 @@ config PHY_STIH41X_USB | |||
277 | Enable this to support the USB transceiver that is part of | 277 | Enable this to support the USB transceiver that is part of |
278 | STMicroelectronics STiH41x SoC series. | 278 | STMicroelectronics STiH41x SoC series. |
279 | 279 | ||
280 | config PHY_QCOM_UFS | ||
281 | tristate "Qualcomm UFS PHY driver" | ||
282 | depends on OF && ARCH_MSM | ||
283 | select GENERIC_PHY | ||
284 | help | ||
285 | Support for UFS PHY on QCOM chipsets. | ||
286 | |||
280 | endmenu | 287 | endmenu |
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index aa74f961e44e..cfbb72064516 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile | |||
@@ -34,3 +34,6 @@ obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o | |||
34 | obj-$(CONFIG_PHY_XGENE) += phy-xgene.o | 34 | obj-$(CONFIG_PHY_XGENE) += phy-xgene.o |
35 | obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o | 35 | obj-$(CONFIG_PHY_STIH407_USB) += phy-stih407-usb.o |
36 | obj-$(CONFIG_PHY_STIH41X_USB) += phy-stih41x-usb.o | 36 | obj-$(CONFIG_PHY_STIH41X_USB) += phy-stih41x-usb.o |
37 | obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o | ||
38 | obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o | ||
39 | obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o | ||
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h new file mode 100644 index 000000000000..591a39175e8a --- /dev/null +++ b/drivers/phy/phy-qcom-ufs-i.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef UFS_QCOM_PHY_I_H_ | ||
16 | #define UFS_QCOM_PHY_I_H_ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/clk.h> | ||
20 | #include <linux/regulator/consumer.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/phy/phy-qcom-ufs.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/delay.h> | ||
26 | |||
27 | #define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \ | ||
28 | ({ \ | ||
29 | ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ | ||
30 | might_sleep_if(timeout_us); \ | ||
31 | for (;;) { \ | ||
32 | (val) = readl(addr); \ | ||
33 | if (cond) \ | ||
34 | break; \ | ||
35 | if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ | ||
36 | (val) = readl(addr); \ | ||
37 | break; \ | ||
38 | } \ | ||
39 | if (sleep_us) \ | ||
40 | usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \ | ||
41 | } \ | ||
42 | (cond) ? 0 : -ETIMEDOUT; \ | ||
43 | }) | ||
44 | |||
45 | #define UFS_QCOM_PHY_CAL_ENTRY(reg, val) \ | ||
46 | { \ | ||
47 | .reg_offset = reg, \ | ||
48 | .cfg_value = val, \ | ||
49 | } | ||
50 | |||
51 | #define UFS_QCOM_PHY_NAME_LEN 30 | ||
52 | |||
53 | enum { | ||
54 | MASK_SERDES_START = 0x1, | ||
55 | MASK_PCS_READY = 0x1, | ||
56 | }; | ||
57 | |||
58 | enum { | ||
59 | OFFSET_SERDES_START = 0x0, | ||
60 | }; | ||
61 | |||
62 | struct ufs_qcom_phy_stored_attributes { | ||
63 | u32 att; | ||
64 | u32 value; | ||
65 | }; | ||
66 | |||
67 | |||
68 | struct ufs_qcom_phy_calibration { | ||
69 | u32 reg_offset; | ||
70 | u32 cfg_value; | ||
71 | }; | ||
72 | |||
73 | struct ufs_qcom_phy_vreg { | ||
74 | const char *name; | ||
75 | struct regulator *reg; | ||
76 | int max_uA; | ||
77 | int min_uV; | ||
78 | int max_uV; | ||
79 | bool enabled; | ||
80 | bool is_always_on; | ||
81 | }; | ||
82 | |||
83 | struct ufs_qcom_phy { | ||
84 | struct list_head list; | ||
85 | struct device *dev; | ||
86 | void __iomem *mmio; | ||
87 | void __iomem *dev_ref_clk_ctrl_mmio; | ||
88 | struct clk *tx_iface_clk; | ||
89 | struct clk *rx_iface_clk; | ||
90 | bool is_iface_clk_enabled; | ||
91 | struct clk *ref_clk_src; | ||
92 | struct clk *ref_clk_parent; | ||
93 | struct clk *ref_clk; | ||
94 | bool is_ref_clk_enabled; | ||
95 | bool is_dev_ref_clk_enabled; | ||
96 | struct ufs_qcom_phy_vreg vdda_pll; | ||
97 | struct ufs_qcom_phy_vreg vdda_phy; | ||
98 | struct ufs_qcom_phy_vreg vddp_ref_clk; | ||
99 | unsigned int quirks; | ||
100 | |||
101 | /* | ||
102 | * If UFS link is put into Hibern8 and if UFS PHY analog hardware is | ||
103 | * power collapsed (by clearing UFS_PHY_POWER_DOWN_CONTROL), Hibern8 | ||
104 | * exit might fail even after powering on UFS PHY analog hardware. | ||
105 | * Enabling this quirk will help to solve above issue by doing | ||
106 | * custom PHY settings just before PHY analog power collapse. | ||
107 | */ | ||
108 | #define UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE BIT(0) | ||
109 | |||
110 | u8 host_ctrl_rev_major; | ||
111 | u16 host_ctrl_rev_minor; | ||
112 | u16 host_ctrl_rev_step; | ||
113 | |||
114 | char name[UFS_QCOM_PHY_NAME_LEN]; | ||
115 | struct ufs_qcom_phy_calibration *cached_regs; | ||
116 | int cached_regs_table_size; | ||
117 | bool is_powered_on; | ||
118 | struct ufs_qcom_phy_specific_ops *phy_spec_ops; | ||
119 | }; | ||
120 | |||
121 | /** | ||
122 | * struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a | ||
123 | * specific implementation per phy. Each UFS phy, should implement | ||
124 | * those functions according to its spec and requirements | ||
125 | * @calibrate_phy: pointer to a function that calibrate the phy | ||
126 | * @start_serdes: pointer to a function that starts the serdes | ||
127 | * @is_physical_coding_sublayer_ready: pointer to a function that | ||
128 | * checks pcs readiness. returns 0 for success and non-zero for error. | ||
129 | * @set_tx_lane_enable: pointer to a function that enable tx lanes | ||
130 | * @power_control: pointer to a function that controls analog rail of phy | ||
131 | * and writes to QSERDES_RX_SIGDET_CNTRL attribute | ||
132 | */ | ||
133 | struct ufs_qcom_phy_specific_ops { | ||
134 | int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B); | ||
135 | void (*start_serdes)(struct ufs_qcom_phy *phy); | ||
136 | int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy); | ||
137 | void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val); | ||
138 | void (*power_control)(struct ufs_qcom_phy *phy, bool val); | ||
139 | }; | ||
140 | |||
141 | struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy); | ||
142 | int ufs_qcom_phy_power_on(struct phy *generic_phy); | ||
143 | int ufs_qcom_phy_power_off(struct phy *generic_phy); | ||
144 | int ufs_qcom_phy_exit(struct phy *generic_phy); | ||
145 | int ufs_qcom_phy_init_clks(struct phy *generic_phy, | ||
146 | struct ufs_qcom_phy *phy_common); | ||
147 | int ufs_qcom_phy_init_vregulators(struct phy *generic_phy, | ||
148 | struct ufs_qcom_phy *phy_common); | ||
149 | int ufs_qcom_phy_remove(struct phy *generic_phy, | ||
150 | struct ufs_qcom_phy *ufs_qcom_phy); | ||
151 | struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, | ||
152 | struct ufs_qcom_phy *common_cfg, | ||
153 | struct phy_ops *ufs_qcom_phy_gen_ops, | ||
154 | struct ufs_qcom_phy_specific_ops *phy_spec_ops); | ||
155 | int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, | ||
156 | struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A, | ||
157 | struct ufs_qcom_phy_calibration *tbl_B, int tbl_size_B, | ||
158 | bool is_rate_B); | ||
159 | #endif | ||
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c new file mode 100644 index 000000000000..f5fc50a9fce7 --- /dev/null +++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c | |||
@@ -0,0 +1,201 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include "phy-qcom-ufs-qmp-14nm.h" | ||
16 | |||
17 | #define UFS_PHY_NAME "ufs_phy_qmp_14nm" | ||
18 | #define UFS_PHY_VDDA_PHY_UV (925000) | ||
19 | |||
20 | static | ||
21 | int ufs_qcom_phy_qmp_14nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, | ||
22 | bool is_rate_B) | ||
23 | { | ||
24 | int tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A); | ||
25 | int tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B); | ||
26 | int err; | ||
27 | |||
28 | err = ufs_qcom_phy_calibrate(ufs_qcom_phy, phy_cal_table_rate_A, | ||
29 | tbl_size_A, phy_cal_table_rate_B, tbl_size_B, is_rate_B); | ||
30 | |||
31 | if (err) | ||
32 | dev_err(ufs_qcom_phy->dev, | ||
33 | "%s: ufs_qcom_phy_calibrate() failed %d\n", | ||
34 | __func__, err); | ||
35 | return err; | ||
36 | } | ||
37 | |||
38 | static | ||
39 | void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common) | ||
40 | { | ||
41 | phy_common->quirks = | ||
42 | UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE; | ||
43 | } | ||
44 | |||
45 | static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy) | ||
46 | { | ||
47 | struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy); | ||
48 | struct ufs_qcom_phy *phy_common = &phy->common_cfg; | ||
49 | int err; | ||
50 | |||
51 | err = ufs_qcom_phy_init_clks(generic_phy, phy_common); | ||
52 | if (err) { | ||
53 | dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n", | ||
54 | __func__, err); | ||
55 | goto out; | ||
56 | } | ||
57 | |||
58 | err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common); | ||
59 | if (err) { | ||
60 | dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n", | ||
61 | __func__, err); | ||
62 | goto out; | ||
63 | } | ||
64 | phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV; | ||
65 | phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV; | ||
66 | |||
67 | ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common); | ||
68 | |||
69 | out: | ||
70 | return err; | ||
71 | } | ||
72 | |||
73 | static | ||
74 | void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val) | ||
75 | { | ||
76 | writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL); | ||
77 | /* | ||
78 | * Before any transactions involving PHY, ensure PHY knows | ||
79 | * that it's analog rail is powered ON (or OFF). | ||
80 | */ | ||
81 | mb(); | ||
82 | } | ||
83 | |||
84 | static inline | ||
85 | void ufs_qcom_phy_qmp_14nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val) | ||
86 | { | ||
87 | /* | ||
88 | * 14nm PHY does not have TX_LANE_ENABLE register. | ||
89 | * Implement this function so as not to propagate error to caller. | ||
90 | */ | ||
91 | } | ||
92 | |||
93 | static inline void ufs_qcom_phy_qmp_14nm_start_serdes(struct ufs_qcom_phy *phy) | ||
94 | { | ||
95 | u32 tmp; | ||
96 | |||
97 | tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START); | ||
98 | tmp &= ~MASK_SERDES_START; | ||
99 | tmp |= (1 << OFFSET_SERDES_START); | ||
100 | writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START); | ||
101 | /* Ensure register value is committed */ | ||
102 | mb(); | ||
103 | } | ||
104 | |||
105 | static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common) | ||
106 | { | ||
107 | int err = 0; | ||
108 | u32 val; | ||
109 | |||
110 | err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS, | ||
111 | val, (val & MASK_PCS_READY), 10, 1000000); | ||
112 | if (err) | ||
113 | dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n", | ||
114 | __func__, err); | ||
115 | return err; | ||
116 | } | ||
117 | |||
118 | static struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = { | ||
119 | .init = ufs_qcom_phy_qmp_14nm_init, | ||
120 | .exit = ufs_qcom_phy_exit, | ||
121 | .power_on = ufs_qcom_phy_power_on, | ||
122 | .power_off = ufs_qcom_phy_power_off, | ||
123 | .owner = THIS_MODULE, | ||
124 | }; | ||
125 | |||
126 | static struct ufs_qcom_phy_specific_ops phy_14nm_ops = { | ||
127 | .calibrate_phy = ufs_qcom_phy_qmp_14nm_phy_calibrate, | ||
128 | .start_serdes = ufs_qcom_phy_qmp_14nm_start_serdes, | ||
129 | .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready, | ||
130 | .set_tx_lane_enable = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable, | ||
131 | .power_control = ufs_qcom_phy_qmp_14nm_power_control, | ||
132 | }; | ||
133 | |||
134 | static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev) | ||
135 | { | ||
136 | struct device *dev = &pdev->dev; | ||
137 | struct phy *generic_phy; | ||
138 | struct ufs_qcom_phy_qmp_14nm *phy; | ||
139 | int err = 0; | ||
140 | |||
141 | phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); | ||
142 | if (!phy) { | ||
143 | dev_err(dev, "%s: failed to allocate phy\n", __func__); | ||
144 | err = -ENOMEM; | ||
145 | goto out; | ||
146 | } | ||
147 | |||
148 | generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg, | ||
149 | &ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops); | ||
150 | |||
151 | if (!generic_phy) { | ||
152 | dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n", | ||
153 | __func__); | ||
154 | err = -EIO; | ||
155 | goto out; | ||
156 | } | ||
157 | |||
158 | phy_set_drvdata(generic_phy, phy); | ||
159 | |||
160 | strlcpy(phy->common_cfg.name, UFS_PHY_NAME, | ||
161 | sizeof(phy->common_cfg.name)); | ||
162 | |||
163 | out: | ||
164 | return err; | ||
165 | } | ||
166 | |||
167 | static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev) | ||
168 | { | ||
169 | struct device *dev = &pdev->dev; | ||
170 | struct phy *generic_phy = to_phy(dev); | ||
171 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | ||
172 | int err = 0; | ||
173 | |||
174 | err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy); | ||
175 | if (err) | ||
176 | dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n", | ||
177 | __func__, err); | ||
178 | |||
179 | return err; | ||
180 | } | ||
181 | |||
182 | static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = { | ||
183 | {.compatible = "qcom,ufs-phy-qmp-14nm"}, | ||
184 | {}, | ||
185 | }; | ||
186 | MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match); | ||
187 | |||
188 | static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = { | ||
189 | .probe = ufs_qcom_phy_qmp_14nm_probe, | ||
190 | .remove = ufs_qcom_phy_qmp_14nm_remove, | ||
191 | .driver = { | ||
192 | .of_match_table = ufs_qcom_phy_qmp_14nm_of_match, | ||
193 | .name = "ufs_qcom_phy_qmp_14nm", | ||
194 | .owner = THIS_MODULE, | ||
195 | }, | ||
196 | }; | ||
197 | |||
198 | module_platform_driver(ufs_qcom_phy_qmp_14nm_driver); | ||
199 | |||
200 | MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 14nm"); | ||
201 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.h b/drivers/phy/phy-qcom-ufs-qmp-14nm.h new file mode 100644 index 000000000000..3aefdbacbcd0 --- /dev/null +++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.h | |||
@@ -0,0 +1,177 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef UFS_QCOM_PHY_QMP_14NM_H_ | ||
16 | #define UFS_QCOM_PHY_QMP_14NM_H_ | ||
17 | |||
18 | #include "phy-qcom-ufs-i.h" | ||
19 | |||
20 | /* QCOM UFS PHY control registers */ | ||
21 | #define COM_OFF(x) (0x000 + x) | ||
22 | #define PHY_OFF(x) (0xC00 + x) | ||
23 | #define TX_OFF(n, x) (0x400 + (0x400 * n) + x) | ||
24 | #define RX_OFF(n, x) (0x600 + (0x400 * n) + x) | ||
25 | |||
26 | /* UFS PHY QSERDES COM registers */ | ||
27 | #define QSERDES_COM_BG_TIMER COM_OFF(0x0C) | ||
28 | #define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x34) | ||
29 | #define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x3C) | ||
30 | #define QSERDES_COM_LOCK_CMP1_MODE0 COM_OFF(0x4C) | ||
31 | #define QSERDES_COM_LOCK_CMP2_MODE0 COM_OFF(0x50) | ||
32 | #define QSERDES_COM_LOCK_CMP3_MODE0 COM_OFF(0x54) | ||
33 | #define QSERDES_COM_LOCK_CMP1_MODE1 COM_OFF(0x58) | ||
34 | #define QSERDES_COM_LOCK_CMP2_MODE1 COM_OFF(0x5C) | ||
35 | #define QSERDES_COM_LOCK_CMP3_MODE1 COM_OFF(0x60) | ||
36 | #define QSERDES_COM_CP_CTRL_MODE0 COM_OFF(0x78) | ||
37 | #define QSERDES_COM_CP_CTRL_MODE1 COM_OFF(0x7C) | ||
38 | #define QSERDES_COM_PLL_RCTRL_MODE0 COM_OFF(0x84) | ||
39 | #define QSERDES_COM_PLL_RCTRL_MODE1 COM_OFF(0x88) | ||
40 | #define QSERDES_COM_PLL_CCTRL_MODE0 COM_OFF(0x90) | ||
41 | #define QSERDES_COM_PLL_CCTRL_MODE1 COM_OFF(0x94) | ||
42 | #define QSERDES_COM_SYSCLK_EN_SEL COM_OFF(0xAC) | ||
43 | #define QSERDES_COM_RESETSM_CNTRL COM_OFF(0xB4) | ||
44 | #define QSERDES_COM_LOCK_CMP_EN COM_OFF(0xC8) | ||
45 | #define QSERDES_COM_LOCK_CMP_CFG COM_OFF(0xCC) | ||
46 | #define QSERDES_COM_DEC_START_MODE0 COM_OFF(0xD0) | ||
47 | #define QSERDES_COM_DEC_START_MODE1 COM_OFF(0xD4) | ||
48 | #define QSERDES_COM_DIV_FRAC_START1_MODE0 COM_OFF(0xDC) | ||
49 | #define QSERDES_COM_DIV_FRAC_START2_MODE0 COM_OFF(0xE0) | ||
50 | #define QSERDES_COM_DIV_FRAC_START3_MODE0 COM_OFF(0xE4) | ||
51 | #define QSERDES_COM_DIV_FRAC_START1_MODE1 COM_OFF(0xE8) | ||
52 | #define QSERDES_COM_DIV_FRAC_START2_MODE1 COM_OFF(0xEC) | ||
53 | #define QSERDES_COM_DIV_FRAC_START3_MODE1 COM_OFF(0xF0) | ||
54 | #define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 COM_OFF(0x108) | ||
55 | #define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 COM_OFF(0x10C) | ||
56 | #define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 COM_OFF(0x110) | ||
57 | #define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 COM_OFF(0x114) | ||
58 | #define QSERDES_COM_VCO_TUNE_CTRL COM_OFF(0x124) | ||
59 | #define QSERDES_COM_VCO_TUNE_MAP COM_OFF(0x128) | ||
60 | #define QSERDES_COM_VCO_TUNE1_MODE0 COM_OFF(0x12C) | ||
61 | #define QSERDES_COM_VCO_TUNE2_MODE0 COM_OFF(0x130) | ||
62 | #define QSERDES_COM_VCO_TUNE1_MODE1 COM_OFF(0x134) | ||
63 | #define QSERDES_COM_VCO_TUNE2_MODE1 COM_OFF(0x138) | ||
64 | #define QSERDES_COM_VCO_TUNE_TIMER1 COM_OFF(0x144) | ||
65 | #define QSERDES_COM_VCO_TUNE_TIMER2 COM_OFF(0x148) | ||
66 | #define QSERDES_COM_CLK_SELECT COM_OFF(0x174) | ||
67 | #define QSERDES_COM_HSCLK_SEL COM_OFF(0x178) | ||
68 | #define QSERDES_COM_CORECLK_DIV COM_OFF(0x184) | ||
69 | #define QSERDES_COM_CORE_CLK_EN COM_OFF(0x18C) | ||
70 | #define QSERDES_COM_CMN_CONFIG COM_OFF(0x194) | ||
71 | #define QSERDES_COM_SVS_MODE_CLK_SEL COM_OFF(0x19C) | ||
72 | #define QSERDES_COM_CORECLK_DIV_MODE1 COM_OFF(0x1BC) | ||
73 | |||
74 | /* UFS PHY registers */ | ||
75 | #define UFS_PHY_PHY_START PHY_OFF(0x00) | ||
76 | #define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x04) | ||
77 | #define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x168) | ||
78 | |||
79 | /* UFS PHY TX registers */ | ||
80 | #define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN TX_OFF(0, 0x68) | ||
81 | #define QSERDES_TX_LANE_MODE TX_OFF(0, 0x94) | ||
82 | |||
83 | /* UFS PHY RX registers */ | ||
84 | #define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN RX_OFF(0, 0x40) | ||
85 | #define QSERDES_RX_RX_TERM_BW RX_OFF(0, 0x90) | ||
86 | #define QSERDES_RX_RX_EQ_GAIN1_LSB RX_OFF(0, 0xC4) | ||
87 | #define QSERDES_RX_RX_EQ_GAIN1_MSB RX_OFF(0, 0xC8) | ||
88 | #define QSERDES_RX_RX_EQ_GAIN2_LSB RX_OFF(0, 0xCC) | ||
89 | #define QSERDES_RX_RX_EQ_GAIN2_MSB RX_OFF(0, 0xD0) | ||
90 | #define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 RX_OFF(0, 0xD8) | ||
91 | #define QSERDES_RX_SIGDET_CNTRL RX_OFF(0, 0x114) | ||
92 | #define QSERDES_RX_SIGDET_LVL RX_OFF(0, 0x118) | ||
93 | #define QSERDES_RX_SIGDET_DEGLITCH_CNTRL RX_OFF(0, 0x11C) | ||
94 | #define QSERDES_RX_RX_INTERFACE_MODE RX_OFF(0, 0x12C) | ||
95 | |||
96 | /* | ||
97 | * This structure represents the 14nm specific phy. | ||
98 | * common_cfg MUST remain the first field in this structure | ||
99 | * in case extra fields are added. This way, when calling | ||
100 | * get_ufs_qcom_phy() of generic phy, we can extract the | ||
101 | * common phy structure (struct ufs_qcom_phy) out of it | ||
102 | * regardless of the relevant specific phy. | ||
103 | */ | ||
104 | struct ufs_qcom_phy_qmp_14nm { | ||
105 | struct ufs_qcom_phy common_cfg; | ||
106 | }; | ||
107 | |||
108 | static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = { | ||
109 | UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01), | ||
110 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e), | ||
111 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xd7), | ||
112 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30), | ||
113 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x06), | ||
114 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08), | ||
115 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a), | ||
116 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05), | ||
117 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a), | ||
118 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a), | ||
119 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01), | ||
120 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x10), | ||
121 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20), | ||
122 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00), | ||
123 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00), | ||
124 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), | ||
125 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), | ||
126 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x14), | ||
127 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05), | ||
128 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82), | ||
129 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00), | ||
130 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00), | ||
131 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00), | ||
132 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b), | ||
133 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16), | ||
134 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28), | ||
135 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), | ||
136 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00), | ||
137 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28), | ||
138 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02), | ||
139 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff), | ||
140 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c), | ||
141 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00), | ||
142 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98), | ||
143 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00), | ||
144 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00), | ||
145 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00), | ||
146 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b), | ||
147 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16), | ||
148 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28), | ||
149 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80), | ||
150 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00), | ||
151 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6), | ||
152 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00), | ||
153 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32), | ||
154 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f), | ||
155 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00), | ||
156 | |||
157 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45), | ||
158 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x02), | ||
159 | |||
160 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24), | ||
161 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x02), | ||
162 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00), | ||
163 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18), | ||
164 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B), | ||
165 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B), | ||
166 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF), | ||
167 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F), | ||
168 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF), | ||
169 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0F), | ||
170 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E), | ||
171 | }; | ||
172 | |||
173 | static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = { | ||
174 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x54), | ||
175 | }; | ||
176 | |||
177 | #endif | ||
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c new file mode 100644 index 000000000000..8332f96b2c4a --- /dev/null +++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c | |||
@@ -0,0 +1,257 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include "phy-qcom-ufs-qmp-20nm.h" | ||
16 | |||
17 | #define UFS_PHY_NAME "ufs_phy_qmp_20nm" | ||
18 | |||
19 | static | ||
20 | int ufs_qcom_phy_qmp_20nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, | ||
21 | bool is_rate_B) | ||
22 | { | ||
23 | struct ufs_qcom_phy_calibration *tbl_A, *tbl_B; | ||
24 | int tbl_size_A, tbl_size_B; | ||
25 | u8 major = ufs_qcom_phy->host_ctrl_rev_major; | ||
26 | u16 minor = ufs_qcom_phy->host_ctrl_rev_minor; | ||
27 | u16 step = ufs_qcom_phy->host_ctrl_rev_step; | ||
28 | int err; | ||
29 | |||
30 | if ((major == 0x1) && (minor == 0x002) && (step == 0x0000)) { | ||
31 | tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_2_0); | ||
32 | tbl_A = phy_cal_table_rate_A_1_2_0; | ||
33 | } else if ((major == 0x1) && (minor == 0x003) && (step == 0x0000)) { | ||
34 | tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_3_0); | ||
35 | tbl_A = phy_cal_table_rate_A_1_3_0; | ||
36 | } else { | ||
37 | dev_err(ufs_qcom_phy->dev, "%s: Unknown UFS-PHY version, no calibration values\n", | ||
38 | __func__); | ||
39 | err = -ENODEV; | ||
40 | goto out; | ||
41 | } | ||
42 | |||
43 | tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B); | ||
44 | tbl_B = phy_cal_table_rate_B; | ||
45 | |||
46 | err = ufs_qcom_phy_calibrate(ufs_qcom_phy, tbl_A, tbl_size_A, | ||
47 | tbl_B, tbl_size_B, is_rate_B); | ||
48 | |||
49 | if (err) | ||
50 | dev_err(ufs_qcom_phy->dev, "%s: ufs_qcom_phy_calibrate() failed %d\n", | ||
51 | __func__, err); | ||
52 | |||
53 | out: | ||
54 | return err; | ||
55 | } | ||
56 | |||
57 | static | ||
58 | void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common) | ||
59 | { | ||
60 | phy_common->quirks = | ||
61 | UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE; | ||
62 | } | ||
63 | |||
64 | static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy) | ||
65 | { | ||
66 | struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy); | ||
67 | struct ufs_qcom_phy *phy_common = &phy->common_cfg; | ||
68 | int err = 0; | ||
69 | |||
70 | err = ufs_qcom_phy_init_clks(generic_phy, phy_common); | ||
71 | if (err) { | ||
72 | dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n", | ||
73 | __func__, err); | ||
74 | goto out; | ||
75 | } | ||
76 | |||
77 | err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common); | ||
78 | if (err) { | ||
79 | dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n", | ||
80 | __func__, err); | ||
81 | goto out; | ||
82 | } | ||
83 | |||
84 | ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common); | ||
85 | |||
86 | out: | ||
87 | return err; | ||
88 | } | ||
89 | |||
90 | static | ||
91 | void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val) | ||
92 | { | ||
93 | bool hibern8_exit_after_pwr_collapse = phy->quirks & | ||
94 | UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE; | ||
95 | |||
96 | if (val) { | ||
97 | writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL); | ||
98 | /* | ||
99 | * Before any transactions involving PHY, ensure PHY knows | ||
100 | * that it's analog rail is powered ON. | ||
101 | */ | ||
102 | mb(); | ||
103 | |||
104 | if (hibern8_exit_after_pwr_collapse) { | ||
105 | /* | ||
106 | * Give atleast 1us delay after restoring PHY analog | ||
107 | * power. | ||
108 | */ | ||
109 | usleep_range(1, 2); | ||
110 | writel_relaxed(0x0A, phy->mmio + | ||
111 | QSERDES_COM_SYSCLK_EN_SEL_TXBAND); | ||
112 | writel_relaxed(0x08, phy->mmio + | ||
113 | QSERDES_COM_SYSCLK_EN_SEL_TXBAND); | ||
114 | /* | ||
115 | * Make sure workaround is deactivated before proceeding | ||
116 | * with normal PHY operations. | ||
117 | */ | ||
118 | mb(); | ||
119 | } | ||
120 | } else { | ||
121 | if (hibern8_exit_after_pwr_collapse) { | ||
122 | writel_relaxed(0x0A, phy->mmio + | ||
123 | QSERDES_COM_SYSCLK_EN_SEL_TXBAND); | ||
124 | writel_relaxed(0x02, phy->mmio + | ||
125 | QSERDES_COM_SYSCLK_EN_SEL_TXBAND); | ||
126 | /* | ||
127 | * Make sure that above workaround is activated before | ||
128 | * PHY analog power collapse. | ||
129 | */ | ||
130 | mb(); | ||
131 | } | ||
132 | |||
133 | writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL); | ||
134 | /* | ||
135 | * ensure that PHY knows its PHY analog rail is going | ||
136 | * to be powered down | ||
137 | */ | ||
138 | mb(); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | static | ||
143 | void ufs_qcom_phy_qmp_20nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val) | ||
144 | { | ||
145 | writel_relaxed(val & UFS_PHY_TX_LANE_ENABLE_MASK, | ||
146 | phy->mmio + UFS_PHY_TX_LANE_ENABLE); | ||
147 | mb(); | ||
148 | } | ||
149 | |||
150 | static inline void ufs_qcom_phy_qmp_20nm_start_serdes(struct ufs_qcom_phy *phy) | ||
151 | { | ||
152 | u32 tmp; | ||
153 | |||
154 | tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START); | ||
155 | tmp &= ~MASK_SERDES_START; | ||
156 | tmp |= (1 << OFFSET_SERDES_START); | ||
157 | writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START); | ||
158 | mb(); | ||
159 | } | ||
160 | |||
161 | static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common) | ||
162 | { | ||
163 | int err = 0; | ||
164 | u32 val; | ||
165 | |||
166 | err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS, | ||
167 | val, (val & MASK_PCS_READY), 10, 1000000); | ||
168 | if (err) | ||
169 | dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n", | ||
170 | __func__, err); | ||
171 | return err; | ||
172 | } | ||
173 | |||
174 | static struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = { | ||
175 | .init = ufs_qcom_phy_qmp_20nm_init, | ||
176 | .exit = ufs_qcom_phy_exit, | ||
177 | .power_on = ufs_qcom_phy_power_on, | ||
178 | .power_off = ufs_qcom_phy_power_off, | ||
179 | .owner = THIS_MODULE, | ||
180 | }; | ||
181 | |||
182 | static struct ufs_qcom_phy_specific_ops phy_20nm_ops = { | ||
183 | .calibrate_phy = ufs_qcom_phy_qmp_20nm_phy_calibrate, | ||
184 | .start_serdes = ufs_qcom_phy_qmp_20nm_start_serdes, | ||
185 | .is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready, | ||
186 | .set_tx_lane_enable = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable, | ||
187 | .power_control = ufs_qcom_phy_qmp_20nm_power_control, | ||
188 | }; | ||
189 | |||
190 | static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev) | ||
191 | { | ||
192 | struct device *dev = &pdev->dev; | ||
193 | struct phy *generic_phy; | ||
194 | struct ufs_qcom_phy_qmp_20nm *phy; | ||
195 | int err = 0; | ||
196 | |||
197 | phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); | ||
198 | if (!phy) { | ||
199 | dev_err(dev, "%s: failed to allocate phy\n", __func__); | ||
200 | err = -ENOMEM; | ||
201 | goto out; | ||
202 | } | ||
203 | |||
204 | generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg, | ||
205 | &ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops); | ||
206 | |||
207 | if (!generic_phy) { | ||
208 | dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n", | ||
209 | __func__); | ||
210 | err = -EIO; | ||
211 | goto out; | ||
212 | } | ||
213 | |||
214 | phy_set_drvdata(generic_phy, phy); | ||
215 | |||
216 | strlcpy(phy->common_cfg.name, UFS_PHY_NAME, | ||
217 | sizeof(phy->common_cfg.name)); | ||
218 | |||
219 | out: | ||
220 | return err; | ||
221 | } | ||
222 | |||
223 | static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev) | ||
224 | { | ||
225 | struct device *dev = &pdev->dev; | ||
226 | struct phy *generic_phy = to_phy(dev); | ||
227 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | ||
228 | int err = 0; | ||
229 | |||
230 | err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy); | ||
231 | if (err) | ||
232 | dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n", | ||
233 | __func__, err); | ||
234 | |||
235 | return err; | ||
236 | } | ||
237 | |||
238 | static const struct of_device_id ufs_qcom_phy_qmp_20nm_of_match[] = { | ||
239 | {.compatible = "qcom,ufs-phy-qmp-20nm"}, | ||
240 | {}, | ||
241 | }; | ||
242 | MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match); | ||
243 | |||
244 | static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = { | ||
245 | .probe = ufs_qcom_phy_qmp_20nm_probe, | ||
246 | .remove = ufs_qcom_phy_qmp_20nm_remove, | ||
247 | .driver = { | ||
248 | .of_match_table = ufs_qcom_phy_qmp_20nm_of_match, | ||
249 | .name = "ufs_qcom_phy_qmp_20nm", | ||
250 | .owner = THIS_MODULE, | ||
251 | }, | ||
252 | }; | ||
253 | |||
254 | module_platform_driver(ufs_qcom_phy_qmp_20nm_driver); | ||
255 | |||
256 | MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 20nm"); | ||
257 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.h b/drivers/phy/phy-qcom-ufs-qmp-20nm.h new file mode 100644 index 000000000000..4f3076bb3d71 --- /dev/null +++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.h | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef UFS_QCOM_PHY_QMP_20NM_H_ | ||
16 | #define UFS_QCOM_PHY_QMP_20NM_H_ | ||
17 | |||
18 | #include "phy-qcom-ufs-i.h" | ||
19 | |||
20 | /* QCOM UFS PHY control registers */ | ||
21 | |||
22 | #define COM_OFF(x) (0x000 + x) | ||
23 | #define PHY_OFF(x) (0xC00 + x) | ||
24 | #define TX_OFF(n, x) (0x400 + (0x400 * n) + x) | ||
25 | #define RX_OFF(n, x) (0x600 + (0x400 * n) + x) | ||
26 | |||
27 | /* UFS PHY PLL block registers */ | ||
28 | #define QSERDES_COM_SYS_CLK_CTRL COM_OFF(0x0) | ||
29 | #define QSERDES_COM_PLL_VCOTAIL_EN COM_OFF(0x04) | ||
30 | #define QSERDES_COM_PLL_CNTRL COM_OFF(0x14) | ||
31 | #define QSERDES_COM_PLL_IP_SETI COM_OFF(0x24) | ||
32 | #define QSERDES_COM_CORE_CLK_IN_SYNC_SEL COM_OFF(0x28) | ||
33 | #define QSERDES_COM_BIAS_EN_CLKBUFLR_EN COM_OFF(0x30) | ||
34 | #define QSERDES_COM_PLL_CP_SETI COM_OFF(0x34) | ||
35 | #define QSERDES_COM_PLL_IP_SETP COM_OFF(0x38) | ||
36 | #define QSERDES_COM_PLL_CP_SETP COM_OFF(0x3C) | ||
37 | #define QSERDES_COM_SYSCLK_EN_SEL_TXBAND COM_OFF(0x48) | ||
38 | #define QSERDES_COM_RESETSM_CNTRL COM_OFF(0x4C) | ||
39 | #define QSERDES_COM_RESETSM_CNTRL2 COM_OFF(0x50) | ||
40 | #define QSERDES_COM_PLLLOCK_CMP1 COM_OFF(0x90) | ||
41 | #define QSERDES_COM_PLLLOCK_CMP2 COM_OFF(0x94) | ||
42 | #define QSERDES_COM_PLLLOCK_CMP3 COM_OFF(0x98) | ||
43 | #define QSERDES_COM_PLLLOCK_CMP_EN COM_OFF(0x9C) | ||
44 | #define QSERDES_COM_BGTC COM_OFF(0xA0) | ||
45 | #define QSERDES_COM_DEC_START1 COM_OFF(0xAC) | ||
46 | #define QSERDES_COM_PLL_AMP_OS COM_OFF(0xB0) | ||
47 | #define QSERDES_COM_RES_CODE_UP_OFFSET COM_OFF(0xD8) | ||
48 | #define QSERDES_COM_RES_CODE_DN_OFFSET COM_OFF(0xDC) | ||
49 | #define QSERDES_COM_DIV_FRAC_START1 COM_OFF(0x100) | ||
50 | #define QSERDES_COM_DIV_FRAC_START2 COM_OFF(0x104) | ||
51 | #define QSERDES_COM_DIV_FRAC_START3 COM_OFF(0x108) | ||
52 | #define QSERDES_COM_DEC_START2 COM_OFF(0x10C) | ||
53 | #define QSERDES_COM_PLL_RXTXEPCLK_EN COM_OFF(0x110) | ||
54 | #define QSERDES_COM_PLL_CRCTRL COM_OFF(0x114) | ||
55 | #define QSERDES_COM_PLL_CLKEPDIV COM_OFF(0x118) | ||
56 | |||
57 | /* TX LANE n (0, 1) registers */ | ||
58 | #define QSERDES_TX_EMP_POST1_LVL(n) TX_OFF(n, 0x08) | ||
59 | #define QSERDES_TX_DRV_LVL(n) TX_OFF(n, 0x0C) | ||
60 | #define QSERDES_TX_LANE_MODE(n) TX_OFF(n, 0x54) | ||
61 | |||
62 | /* RX LANE n (0, 1) registers */ | ||
63 | #define QSERDES_RX_CDR_CONTROL1(n) RX_OFF(n, 0x0) | ||
64 | #define QSERDES_RX_CDR_CONTROL_HALF(n) RX_OFF(n, 0x8) | ||
65 | #define QSERDES_RX_RX_EQ_GAIN1_LSB(n) RX_OFF(n, 0xA8) | ||
66 | #define QSERDES_RX_RX_EQ_GAIN1_MSB(n) RX_OFF(n, 0xAC) | ||
67 | #define QSERDES_RX_RX_EQ_GAIN2_LSB(n) RX_OFF(n, 0xB0) | ||
68 | #define QSERDES_RX_RX_EQ_GAIN2_MSB(n) RX_OFF(n, 0xB4) | ||
69 | #define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(n) RX_OFF(n, 0xBC) | ||
70 | #define QSERDES_RX_CDR_CONTROL_QUARTER(n) RX_OFF(n, 0xC) | ||
71 | #define QSERDES_RX_SIGDET_CNTRL(n) RX_OFF(n, 0x100) | ||
72 | |||
73 | /* UFS PHY registers */ | ||
74 | #define UFS_PHY_PHY_START PHY_OFF(0x00) | ||
75 | #define UFS_PHY_POWER_DOWN_CONTROL PHY_OFF(0x4) | ||
76 | #define UFS_PHY_TX_LANE_ENABLE PHY_OFF(0x44) | ||
77 | #define UFS_PHY_PWM_G1_CLK_DIVIDER PHY_OFF(0x08) | ||
78 | #define UFS_PHY_PWM_G2_CLK_DIVIDER PHY_OFF(0x0C) | ||
79 | #define UFS_PHY_PWM_G3_CLK_DIVIDER PHY_OFF(0x10) | ||
80 | #define UFS_PHY_PWM_G4_CLK_DIVIDER PHY_OFF(0x14) | ||
81 | #define UFS_PHY_CORECLK_PWM_G1_CLK_DIVIDER PHY_OFF(0x34) | ||
82 | #define UFS_PHY_CORECLK_PWM_G2_CLK_DIVIDER PHY_OFF(0x38) | ||
83 | #define UFS_PHY_CORECLK_PWM_G3_CLK_DIVIDER PHY_OFF(0x3C) | ||
84 | #define UFS_PHY_CORECLK_PWM_G4_CLK_DIVIDER PHY_OFF(0x40) | ||
85 | #define UFS_PHY_OMC_STATUS_RDVAL PHY_OFF(0x68) | ||
86 | #define UFS_PHY_LINE_RESET_TIME PHY_OFF(0x28) | ||
87 | #define UFS_PHY_LINE_RESET_GRANULARITY PHY_OFF(0x2C) | ||
88 | #define UFS_PHY_TSYNC_RSYNC_CNTL PHY_OFF(0x48) | ||
89 | #define UFS_PHY_PLL_CNTL PHY_OFF(0x50) | ||
90 | #define UFS_PHY_TX_LARGE_AMP_DRV_LVL PHY_OFF(0x54) | ||
91 | #define UFS_PHY_TX_SMALL_AMP_DRV_LVL PHY_OFF(0x5C) | ||
92 | #define UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL PHY_OFF(0x58) | ||
93 | #define UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL PHY_OFF(0x60) | ||
94 | #define UFS_PHY_CFG_CHANGE_CNT_VAL PHY_OFF(0x64) | ||
95 | #define UFS_PHY_RX_SYNC_WAIT_TIME PHY_OFF(0x6C) | ||
96 | #define UFS_PHY_TX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xB4) | ||
97 | #define UFS_PHY_RX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xE0) | ||
98 | #define UFS_PHY_TX_MIN_STALL_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xB8) | ||
99 | #define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAPABILITY PHY_OFF(0xE4) | ||
100 | #define UFS_PHY_TX_MIN_SAVE_CONFIG_TIME_CAPABILITY PHY_OFF(0xBC) | ||
101 | #define UFS_PHY_RX_MIN_SAVE_CONFIG_TIME_CAPABILITY PHY_OFF(0xE8) | ||
102 | #define UFS_PHY_RX_PWM_BURST_CLOSURE_LENGTH_CAPABILITY PHY_OFF(0xFC) | ||
103 | #define UFS_PHY_RX_MIN_ACTIVATETIME_CAPABILITY PHY_OFF(0x100) | ||
104 | #define UFS_PHY_RX_SIGDET_CTRL3 PHY_OFF(0x14c) | ||
105 | #define UFS_PHY_RMMI_ATTR_CTRL PHY_OFF(0x160) | ||
106 | #define UFS_PHY_RMMI_RX_CFGUPDT_L1 (1 << 7) | ||
107 | #define UFS_PHY_RMMI_TX_CFGUPDT_L1 (1 << 6) | ||
108 | #define UFS_PHY_RMMI_CFGWR_L1 (1 << 5) | ||
109 | #define UFS_PHY_RMMI_CFGRD_L1 (1 << 4) | ||
110 | #define UFS_PHY_RMMI_RX_CFGUPDT_L0 (1 << 3) | ||
111 | #define UFS_PHY_RMMI_TX_CFGUPDT_L0 (1 << 2) | ||
112 | #define UFS_PHY_RMMI_CFGWR_L0 (1 << 1) | ||
113 | #define UFS_PHY_RMMI_CFGRD_L0 (1 << 0) | ||
114 | #define UFS_PHY_RMMI_ATTRID PHY_OFF(0x164) | ||
115 | #define UFS_PHY_RMMI_ATTRWRVAL PHY_OFF(0x168) | ||
116 | #define UFS_PHY_RMMI_ATTRRDVAL_L0_STATUS PHY_OFF(0x16C) | ||
117 | #define UFS_PHY_RMMI_ATTRRDVAL_L1_STATUS PHY_OFF(0x170) | ||
118 | #define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x174) | ||
119 | |||
120 | #define UFS_PHY_TX_LANE_ENABLE_MASK 0x3 | ||
121 | |||
122 | /* | ||
123 | * This structure represents the 20nm specific phy. | ||
124 | * common_cfg MUST remain the first field in this structure | ||
125 | * in case extra fields are added. This way, when calling | ||
126 | * get_ufs_qcom_phy() of generic phy, we can extract the | ||
127 | * common phy structure (struct ufs_qcom_phy) out of it | ||
128 | * regardless of the relevant specific phy. | ||
129 | */ | ||
130 | struct ufs_qcom_phy_qmp_20nm { | ||
131 | struct ufs_qcom_phy common_cfg; | ||
132 | }; | ||
133 | |||
134 | static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_2_0[] = { | ||
135 | UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01), | ||
136 | UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D), | ||
137 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1), | ||
138 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc), | ||
139 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08), | ||
140 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03), | ||
141 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10), | ||
142 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82), | ||
143 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03), | ||
144 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80), | ||
145 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80), | ||
146 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40), | ||
147 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff), | ||
148 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19), | ||
149 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00), | ||
150 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03), | ||
151 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90), | ||
152 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03), | ||
153 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2), | ||
154 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c), | ||
155 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12), | ||
156 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2), | ||
157 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c), | ||
158 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12), | ||
159 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff), | ||
160 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff), | ||
161 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff), | ||
162 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00), | ||
163 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff), | ||
164 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff), | ||
165 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff), | ||
166 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00), | ||
167 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x3f), | ||
168 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x1b), | ||
169 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x0f), | ||
170 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01), | ||
171 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(0), 0x2F), | ||
172 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(0), 0x20), | ||
173 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(1), 0x2F), | ||
174 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(1), 0x20), | ||
175 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68), | ||
176 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68), | ||
177 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc), | ||
178 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc), | ||
179 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3), | ||
180 | }; | ||
181 | |||
182 | static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_3_0[] = { | ||
183 | UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01), | ||
184 | UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D), | ||
185 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1), | ||
186 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc), | ||
187 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08), | ||
188 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03), | ||
189 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10), | ||
190 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82), | ||
191 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03), | ||
192 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80), | ||
193 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80), | ||
194 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40), | ||
195 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff), | ||
196 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19), | ||
197 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00), | ||
198 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03), | ||
199 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90), | ||
200 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03), | ||
201 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2), | ||
202 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c), | ||
203 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12), | ||
204 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2), | ||
205 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c), | ||
206 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12), | ||
207 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff), | ||
208 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff), | ||
209 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff), | ||
210 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00), | ||
211 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff), | ||
212 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff), | ||
213 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff), | ||
214 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00), | ||
215 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x2b), | ||
216 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x38), | ||
217 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x3c), | ||
218 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_UP_OFFSET, 0x02), | ||
219 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_DN_OFFSET, 0x02), | ||
220 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01), | ||
221 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CNTRL, 0x40), | ||
222 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68), | ||
223 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68), | ||
224 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc), | ||
225 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc), | ||
226 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3), | ||
227 | }; | ||
228 | |||
229 | static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = { | ||
230 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x98), | ||
231 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0x65), | ||
232 | UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x1e), | ||
233 | }; | ||
234 | |||
235 | #endif | ||
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c new file mode 100644 index 000000000000..44ee983d57fe --- /dev/null +++ b/drivers/phy/phy-qcom-ufs.c | |||
@@ -0,0 +1,745 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include "phy-qcom-ufs-i.h" | ||
16 | |||
17 | #define MAX_PROP_NAME 32 | ||
18 | #define VDDA_PHY_MIN_UV 1000000 | ||
19 | #define VDDA_PHY_MAX_UV 1000000 | ||
20 | #define VDDA_PLL_MIN_UV 1800000 | ||
21 | #define VDDA_PLL_MAX_UV 1800000 | ||
22 | #define VDDP_REF_CLK_MIN_UV 1200000 | ||
23 | #define VDDP_REF_CLK_MAX_UV 1200000 | ||
24 | |||
25 | static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *, | ||
26 | const char *, bool); | ||
27 | static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *, | ||
28 | const char *); | ||
29 | static int ufs_qcom_phy_base_init(struct platform_device *pdev, | ||
30 | struct ufs_qcom_phy *phy_common); | ||
31 | |||
32 | int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy, | ||
33 | struct ufs_qcom_phy_calibration *tbl_A, | ||
34 | int tbl_size_A, | ||
35 | struct ufs_qcom_phy_calibration *tbl_B, | ||
36 | int tbl_size_B, bool is_rate_B) | ||
37 | { | ||
38 | int i; | ||
39 | int ret = 0; | ||
40 | |||
41 | if (!tbl_A) { | ||
42 | dev_err(ufs_qcom_phy->dev, "%s: tbl_A is NULL", __func__); | ||
43 | ret = EINVAL; | ||
44 | goto out; | ||
45 | } | ||
46 | |||
47 | for (i = 0; i < tbl_size_A; i++) | ||
48 | writel_relaxed(tbl_A[i].cfg_value, | ||
49 | ufs_qcom_phy->mmio + tbl_A[i].reg_offset); | ||
50 | |||
51 | /* | ||
52 | * In case we would like to work in rate B, we need | ||
53 | * to override a registers that were configured in rate A table | ||
54 | * with registers of rate B table. | ||
55 | * table. | ||
56 | */ | ||
57 | if (is_rate_B) { | ||
58 | if (!tbl_B) { | ||
59 | dev_err(ufs_qcom_phy->dev, "%s: tbl_B is NULL", | ||
60 | __func__); | ||
61 | ret = EINVAL; | ||
62 | goto out; | ||
63 | } | ||
64 | |||
65 | for (i = 0; i < tbl_size_B; i++) | ||
66 | writel_relaxed(tbl_B[i].cfg_value, | ||
67 | ufs_qcom_phy->mmio + tbl_B[i].reg_offset); | ||
68 | } | ||
69 | |||
70 | /* flush buffered writes */ | ||
71 | mb(); | ||
72 | |||
73 | out: | ||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev, | ||
78 | struct ufs_qcom_phy *common_cfg, | ||
79 | struct phy_ops *ufs_qcom_phy_gen_ops, | ||
80 | struct ufs_qcom_phy_specific_ops *phy_spec_ops) | ||
81 | { | ||
82 | int err; | ||
83 | struct device *dev = &pdev->dev; | ||
84 | struct phy *generic_phy = NULL; | ||
85 | struct phy_provider *phy_provider; | ||
86 | |||
87 | err = ufs_qcom_phy_base_init(pdev, common_cfg); | ||
88 | if (err) { | ||
89 | dev_err(dev, "%s: phy base init failed %d\n", __func__, err); | ||
90 | goto out; | ||
91 | } | ||
92 | |||
93 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | ||
94 | if (IS_ERR(phy_provider)) { | ||
95 | err = PTR_ERR(phy_provider); | ||
96 | dev_err(dev, "%s: failed to register phy %d\n", __func__, err); | ||
97 | goto out; | ||
98 | } | ||
99 | |||
100 | generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops); | ||
101 | if (IS_ERR(generic_phy)) { | ||
102 | err = PTR_ERR(generic_phy); | ||
103 | dev_err(dev, "%s: failed to create phy %d\n", __func__, err); | ||
104 | goto out; | ||
105 | } | ||
106 | |||
107 | common_cfg->phy_spec_ops = phy_spec_ops; | ||
108 | common_cfg->dev = dev; | ||
109 | |||
110 | out: | ||
111 | return generic_phy; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * This assumes the embedded phy structure inside generic_phy is of type | ||
116 | * struct ufs_qcom_phy. In order to function properly it's crucial | ||
117 | * to keep the embedded struct "struct ufs_qcom_phy common_cfg" | ||
118 | * as the first inside generic_phy. | ||
119 | */ | ||
120 | struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy) | ||
121 | { | ||
122 | return (struct ufs_qcom_phy *)phy_get_drvdata(generic_phy); | ||
123 | } | ||
124 | |||
125 | static | ||
126 | int ufs_qcom_phy_base_init(struct platform_device *pdev, | ||
127 | struct ufs_qcom_phy *phy_common) | ||
128 | { | ||
129 | struct device *dev = &pdev->dev; | ||
130 | struct resource *res; | ||
131 | int err = 0; | ||
132 | |||
133 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem"); | ||
134 | if (!res) { | ||
135 | dev_err(dev, "%s: phy_mem resource not found\n", __func__); | ||
136 | err = -ENOMEM; | ||
137 | goto out; | ||
138 | } | ||
139 | |||
140 | phy_common->mmio = devm_ioremap_resource(dev, res); | ||
141 | if (IS_ERR((void const *)phy_common->mmio)) { | ||
142 | err = PTR_ERR((void const *)phy_common->mmio); | ||
143 | phy_common->mmio = NULL; | ||
144 | dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n", | ||
145 | __func__, err); | ||
146 | goto out; | ||
147 | } | ||
148 | |||
149 | /* "dev_ref_clk_ctrl_mem" is optional resource */ | ||
150 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
151 | "dev_ref_clk_ctrl_mem"); | ||
152 | if (!res) { | ||
153 | dev_dbg(dev, "%s: dev_ref_clk_ctrl_mem resource not found\n", | ||
154 | __func__); | ||
155 | goto out; | ||
156 | } | ||
157 | |||
158 | phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res); | ||
159 | if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio)) { | ||
160 | err = PTR_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio); | ||
161 | phy_common->dev_ref_clk_ctrl_mmio = NULL; | ||
162 | dev_err(dev, "%s: ioremap for dev_ref_clk_ctrl_mem resource failed %d\n", | ||
163 | __func__, err); | ||
164 | } | ||
165 | |||
166 | out: | ||
167 | return err; | ||
168 | } | ||
169 | |||
170 | static int __ufs_qcom_phy_clk_get(struct phy *phy, | ||
171 | const char *name, struct clk **clk_out, bool err_print) | ||
172 | { | ||
173 | struct clk *clk; | ||
174 | int err = 0; | ||
175 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | ||
176 | struct device *dev = ufs_qcom_phy->dev; | ||
177 | |||
178 | clk = devm_clk_get(dev, name); | ||
179 | if (IS_ERR(clk)) { | ||
180 | err = PTR_ERR(clk); | ||
181 | if (err_print) | ||
182 | dev_err(dev, "failed to get %s err %d", name, err); | ||
183 | } else { | ||
184 | *clk_out = clk; | ||
185 | } | ||
186 | |||
187 | return err; | ||
188 | } | ||
189 | |||
190 | static | ||
191 | int ufs_qcom_phy_clk_get(struct phy *phy, | ||
192 | const char *name, struct clk **clk_out) | ||
193 | { | ||
194 | return __ufs_qcom_phy_clk_get(phy, name, clk_out, true); | ||
195 | } | ||
196 | |||
197 | int | ||
198 | ufs_qcom_phy_init_clks(struct phy *generic_phy, | ||
199 | struct ufs_qcom_phy *phy_common) | ||
200 | { | ||
201 | int err; | ||
202 | |||
203 | err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk", | ||
204 | &phy_common->tx_iface_clk); | ||
205 | if (err) | ||
206 | goto out; | ||
207 | |||
208 | err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk", | ||
209 | &phy_common->rx_iface_clk); | ||
210 | if (err) | ||
211 | goto out; | ||
212 | |||
213 | err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src", | ||
214 | &phy_common->ref_clk_src); | ||
215 | if (err) | ||
216 | goto out; | ||
217 | |||
218 | /* | ||
219 | * "ref_clk_parent" is optional hence don't abort init if it's not | ||
220 | * found. | ||
221 | */ | ||
222 | __ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent", | ||
223 | &phy_common->ref_clk_parent, false); | ||
224 | |||
225 | err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk", | ||
226 | &phy_common->ref_clk); | ||
227 | |||
228 | out: | ||
229 | return err; | ||
230 | } | ||
231 | |||
232 | int | ||
233 | ufs_qcom_phy_init_vregulators(struct phy *generic_phy, | ||
234 | struct ufs_qcom_phy *phy_common) | ||
235 | { | ||
236 | int err; | ||
237 | |||
238 | err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll, | ||
239 | "vdda-pll"); | ||
240 | if (err) | ||
241 | goto out; | ||
242 | |||
243 | err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy, | ||
244 | "vdda-phy"); | ||
245 | |||
246 | if (err) | ||
247 | goto out; | ||
248 | |||
249 | /* vddp-ref-clk-* properties are optional */ | ||
250 | __ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk, | ||
251 | "vddp-ref-clk", true); | ||
252 | out: | ||
253 | return err; | ||
254 | } | ||
255 | |||
256 | static int __ufs_qcom_phy_init_vreg(struct phy *phy, | ||
257 | struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional) | ||
258 | { | ||
259 | int err = 0; | ||
260 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | ||
261 | struct device *dev = ufs_qcom_phy->dev; | ||
262 | |||
263 | char prop_name[MAX_PROP_NAME]; | ||
264 | |||
265 | vreg->name = kstrdup(name, GFP_KERNEL); | ||
266 | if (!vreg->name) { | ||
267 | err = -ENOMEM; | ||
268 | goto out; | ||
269 | } | ||
270 | |||
271 | vreg->reg = devm_regulator_get(dev, name); | ||
272 | if (IS_ERR(vreg->reg)) { | ||
273 | err = PTR_ERR(vreg->reg); | ||
274 | vreg->reg = NULL; | ||
275 | if (!optional) | ||
276 | dev_err(dev, "failed to get %s, %d\n", name, err); | ||
277 | goto out; | ||
278 | } | ||
279 | |||
280 | if (dev->of_node) { | ||
281 | snprintf(prop_name, MAX_PROP_NAME, "%s-max-microamp", name); | ||
282 | err = of_property_read_u32(dev->of_node, | ||
283 | prop_name, &vreg->max_uA); | ||
284 | if (err && err != -EINVAL) { | ||
285 | dev_err(dev, "%s: failed to read %s\n", | ||
286 | __func__, prop_name); | ||
287 | goto out; | ||
288 | } else if (err == -EINVAL || !vreg->max_uA) { | ||
289 | if (regulator_count_voltages(vreg->reg) > 0) { | ||
290 | dev_err(dev, "%s: %s is mandatory\n", | ||
291 | __func__, prop_name); | ||
292 | goto out; | ||
293 | } | ||
294 | err = 0; | ||
295 | } | ||
296 | snprintf(prop_name, MAX_PROP_NAME, "%s-always-on", name); | ||
297 | if (of_get_property(dev->of_node, prop_name, NULL)) | ||
298 | vreg->is_always_on = true; | ||
299 | else | ||
300 | vreg->is_always_on = false; | ||
301 | } | ||
302 | |||
303 | if (!strcmp(name, "vdda-pll")) { | ||
304 | vreg->max_uV = VDDA_PLL_MAX_UV; | ||
305 | vreg->min_uV = VDDA_PLL_MIN_UV; | ||
306 | } else if (!strcmp(name, "vdda-phy")) { | ||
307 | vreg->max_uV = VDDA_PHY_MAX_UV; | ||
308 | vreg->min_uV = VDDA_PHY_MIN_UV; | ||
309 | } else if (!strcmp(name, "vddp-ref-clk")) { | ||
310 | vreg->max_uV = VDDP_REF_CLK_MAX_UV; | ||
311 | vreg->min_uV = VDDP_REF_CLK_MIN_UV; | ||
312 | } | ||
313 | |||
314 | out: | ||
315 | if (err) | ||
316 | kfree(vreg->name); | ||
317 | return err; | ||
318 | } | ||
319 | |||
320 | static int ufs_qcom_phy_init_vreg(struct phy *phy, | ||
321 | struct ufs_qcom_phy_vreg *vreg, const char *name) | ||
322 | { | ||
323 | return __ufs_qcom_phy_init_vreg(phy, vreg, name, false); | ||
324 | } | ||
325 | |||
326 | static | ||
327 | int ufs_qcom_phy_cfg_vreg(struct phy *phy, | ||
328 | struct ufs_qcom_phy_vreg *vreg, bool on) | ||
329 | { | ||
330 | int ret = 0; | ||
331 | struct regulator *reg = vreg->reg; | ||
332 | const char *name = vreg->name; | ||
333 | int min_uV; | ||
334 | int uA_load; | ||
335 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | ||
336 | struct device *dev = ufs_qcom_phy->dev; | ||
337 | |||
338 | BUG_ON(!vreg); | ||
339 | |||
340 | if (regulator_count_voltages(reg) > 0) { | ||
341 | min_uV = on ? vreg->min_uV : 0; | ||
342 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | ||
343 | if (ret) { | ||
344 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | ||
345 | __func__, name, ret); | ||
346 | goto out; | ||
347 | } | ||
348 | uA_load = on ? vreg->max_uA : 0; | ||
349 | ret = regulator_set_optimum_mode(reg, uA_load); | ||
350 | if (ret >= 0) { | ||
351 | /* | ||
352 | * regulator_set_optimum_mode() returns new regulator | ||
353 | * mode upon success. | ||
354 | */ | ||
355 | ret = 0; | ||
356 | } else { | ||
357 | dev_err(dev, "%s: %s set optimum mode(uA_load=%d) failed, err=%d\n", | ||
358 | __func__, name, uA_load, ret); | ||
359 | goto out; | ||
360 | } | ||
361 | } | ||
362 | out: | ||
363 | return ret; | ||
364 | } | ||
365 | |||
366 | static | ||
367 | int ufs_qcom_phy_enable_vreg(struct phy *phy, | ||
368 | struct ufs_qcom_phy_vreg *vreg) | ||
369 | { | ||
370 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | ||
371 | struct device *dev = ufs_qcom_phy->dev; | ||
372 | int ret = 0; | ||
373 | |||
374 | if (!vreg || vreg->enabled) | ||
375 | goto out; | ||
376 | |||
377 | ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true); | ||
378 | if (ret) { | ||
379 | dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n", | ||
380 | __func__, ret); | ||
381 | goto out; | ||
382 | } | ||
383 | |||
384 | ret = regulator_enable(vreg->reg); | ||
385 | if (ret) { | ||
386 | dev_err(dev, "%s: enable failed, err=%d\n", | ||
387 | __func__, ret); | ||
388 | goto out; | ||
389 | } | ||
390 | |||
391 | vreg->enabled = true; | ||
392 | out: | ||
393 | return ret; | ||
394 | } | ||
395 | |||
396 | int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy) | ||
397 | { | ||
398 | int ret = 0; | ||
399 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | ||
400 | |||
401 | if (phy->is_ref_clk_enabled) | ||
402 | goto out; | ||
403 | |||
404 | /* | ||
405 | * reference clock is propagated in a daisy-chained manner from | ||
406 | * source to phy, so ungate them at each stage. | ||
407 | */ | ||
408 | ret = clk_prepare_enable(phy->ref_clk_src); | ||
409 | if (ret) { | ||
410 | dev_err(phy->dev, "%s: ref_clk_src enable failed %d\n", | ||
411 | __func__, ret); | ||
412 | goto out; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * "ref_clk_parent" is optional clock hence make sure that clk reference | ||
417 | * is available before trying to enable the clock. | ||
418 | */ | ||
419 | if (phy->ref_clk_parent) { | ||
420 | ret = clk_prepare_enable(phy->ref_clk_parent); | ||
421 | if (ret) { | ||
422 | dev_err(phy->dev, "%s: ref_clk_parent enable failed %d\n", | ||
423 | __func__, ret); | ||
424 | goto out_disable_src; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | ret = clk_prepare_enable(phy->ref_clk); | ||
429 | if (ret) { | ||
430 | dev_err(phy->dev, "%s: ref_clk enable failed %d\n", | ||
431 | __func__, ret); | ||
432 | goto out_disable_parent; | ||
433 | } | ||
434 | |||
435 | phy->is_ref_clk_enabled = true; | ||
436 | goto out; | ||
437 | |||
438 | out_disable_parent: | ||
439 | if (phy->ref_clk_parent) | ||
440 | clk_disable_unprepare(phy->ref_clk_parent); | ||
441 | out_disable_src: | ||
442 | clk_disable_unprepare(phy->ref_clk_src); | ||
443 | out: | ||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | static | ||
448 | int ufs_qcom_phy_disable_vreg(struct phy *phy, | ||
449 | struct ufs_qcom_phy_vreg *vreg) | ||
450 | { | ||
451 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy); | ||
452 | struct device *dev = ufs_qcom_phy->dev; | ||
453 | int ret = 0; | ||
454 | |||
455 | if (!vreg || !vreg->enabled || vreg->is_always_on) | ||
456 | goto out; | ||
457 | |||
458 | ret = regulator_disable(vreg->reg); | ||
459 | |||
460 | if (!ret) { | ||
461 | /* ignore errors on applying disable config */ | ||
462 | ufs_qcom_phy_cfg_vreg(phy, vreg, false); | ||
463 | vreg->enabled = false; | ||
464 | } else { | ||
465 | dev_err(dev, "%s: %s disable failed, err=%d\n", | ||
466 | __func__, vreg->name, ret); | ||
467 | } | ||
468 | out: | ||
469 | return ret; | ||
470 | } | ||
471 | |||
472 | void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) | ||
473 | { | ||
474 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | ||
475 | |||
476 | if (phy->is_ref_clk_enabled) { | ||
477 | clk_disable_unprepare(phy->ref_clk); | ||
478 | /* | ||
479 | * "ref_clk_parent" is optional clock hence make sure that clk | ||
480 | * reference is available before trying to disable the clock. | ||
481 | */ | ||
482 | if (phy->ref_clk_parent) | ||
483 | clk_disable_unprepare(phy->ref_clk_parent); | ||
484 | clk_disable_unprepare(phy->ref_clk_src); | ||
485 | phy->is_ref_clk_enabled = false; | ||
486 | } | ||
487 | } | ||
488 | |||
489 | #define UFS_REF_CLK_EN (1 << 5) | ||
490 | |||
491 | static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable) | ||
492 | { | ||
493 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | ||
494 | |||
495 | if (phy->dev_ref_clk_ctrl_mmio && | ||
496 | (enable ^ phy->is_dev_ref_clk_enabled)) { | ||
497 | u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio); | ||
498 | |||
499 | if (enable) | ||
500 | temp |= UFS_REF_CLK_EN; | ||
501 | else | ||
502 | temp &= ~UFS_REF_CLK_EN; | ||
503 | |||
504 | /* | ||
505 | * If we are here to disable this clock immediately after | ||
506 | * entering into hibern8, we need to make sure that device | ||
507 | * ref_clk is active atleast 1us after the hibern8 enter. | ||
508 | */ | ||
509 | if (!enable) | ||
510 | udelay(1); | ||
511 | |||
512 | writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio); | ||
513 | /* ensure that ref_clk is enabled/disabled before we return */ | ||
514 | wmb(); | ||
515 | /* | ||
516 | * If we call hibern8 exit after this, we need to make sure that | ||
517 | * device ref_clk is stable for atleast 1us before the hibern8 | ||
518 | * exit command. | ||
519 | */ | ||
520 | if (enable) | ||
521 | udelay(1); | ||
522 | |||
523 | phy->is_dev_ref_clk_enabled = enable; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) | ||
528 | { | ||
529 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); | ||
530 | } | ||
531 | |||
532 | void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) | ||
533 | { | ||
534 | ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); | ||
535 | } | ||
536 | |||
537 | /* Turn ON M-PHY RMMI interface clocks */ | ||
538 | int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) | ||
539 | { | ||
540 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | ||
541 | int ret = 0; | ||
542 | |||
543 | if (phy->is_iface_clk_enabled) | ||
544 | goto out; | ||
545 | |||
546 | ret = clk_prepare_enable(phy->tx_iface_clk); | ||
547 | if (ret) { | ||
548 | dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n", | ||
549 | __func__, ret); | ||
550 | goto out; | ||
551 | } | ||
552 | ret = clk_prepare_enable(phy->rx_iface_clk); | ||
553 | if (ret) { | ||
554 | clk_disable_unprepare(phy->tx_iface_clk); | ||
555 | dev_err(phy->dev, "%s: rx_iface_clk enable failed %d. disabling also tx_iface_clk\n", | ||
556 | __func__, ret); | ||
557 | goto out; | ||
558 | } | ||
559 | phy->is_iface_clk_enabled = true; | ||
560 | |||
561 | out: | ||
562 | return ret; | ||
563 | } | ||
564 | |||
565 | /* Turn OFF M-PHY RMMI interface clocks */ | ||
566 | void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) | ||
567 | { | ||
568 | struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy); | ||
569 | |||
570 | if (phy->is_iface_clk_enabled) { | ||
571 | clk_disable_unprepare(phy->tx_iface_clk); | ||
572 | clk_disable_unprepare(phy->rx_iface_clk); | ||
573 | phy->is_iface_clk_enabled = false; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | int ufs_qcom_phy_start_serdes(struct phy *generic_phy) | ||
578 | { | ||
579 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | ||
580 | int ret = 0; | ||
581 | |||
582 | if (!ufs_qcom_phy->phy_spec_ops->start_serdes) { | ||
583 | dev_err(ufs_qcom_phy->dev, "%s: start_serdes() callback is not supported\n", | ||
584 | __func__); | ||
585 | ret = -ENOTSUPP; | ||
586 | } else { | ||
587 | ufs_qcom_phy->phy_spec_ops->start_serdes(ufs_qcom_phy); | ||
588 | } | ||
589 | |||
590 | return ret; | ||
591 | } | ||
592 | |||
593 | int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) | ||
594 | { | ||
595 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | ||
596 | int ret = 0; | ||
597 | |||
598 | if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) { | ||
599 | dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n", | ||
600 | __func__); | ||
601 | ret = -ENOTSUPP; | ||
602 | } else { | ||
603 | ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy, | ||
604 | tx_lanes); | ||
605 | } | ||
606 | |||
607 | return ret; | ||
608 | } | ||
609 | |||
610 | void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, | ||
611 | u8 major, u16 minor, u16 step) | ||
612 | { | ||
613 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | ||
614 | |||
615 | ufs_qcom_phy->host_ctrl_rev_major = major; | ||
616 | ufs_qcom_phy->host_ctrl_rev_minor = minor; | ||
617 | ufs_qcom_phy->host_ctrl_rev_step = step; | ||
618 | } | ||
619 | |||
620 | int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) | ||
621 | { | ||
622 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | ||
623 | int ret = 0; | ||
624 | |||
625 | if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) { | ||
626 | dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n", | ||
627 | __func__); | ||
628 | ret = -ENOTSUPP; | ||
629 | } else { | ||
630 | ret = ufs_qcom_phy->phy_spec_ops-> | ||
631 | calibrate_phy(ufs_qcom_phy, is_rate_B); | ||
632 | if (ret) | ||
633 | dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n", | ||
634 | __func__, ret); | ||
635 | } | ||
636 | |||
637 | return ret; | ||
638 | } | ||
639 | |||
640 | int ufs_qcom_phy_remove(struct phy *generic_phy, | ||
641 | struct ufs_qcom_phy *ufs_qcom_phy) | ||
642 | { | ||
643 | phy_power_off(generic_phy); | ||
644 | |||
645 | kfree(ufs_qcom_phy->vdda_pll.name); | ||
646 | kfree(ufs_qcom_phy->vdda_phy.name); | ||
647 | |||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | int ufs_qcom_phy_exit(struct phy *generic_phy) | ||
652 | { | ||
653 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | ||
654 | |||
655 | if (ufs_qcom_phy->is_powered_on) | ||
656 | phy_power_off(generic_phy); | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) | ||
662 | { | ||
663 | struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy); | ||
664 | |||
665 | if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) { | ||
666 | dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n", | ||
667 | __func__); | ||
668 | return -ENOTSUPP; | ||
669 | } | ||
670 | |||
671 | return ufs_qcom_phy->phy_spec_ops-> | ||
672 | is_physical_coding_sublayer_ready(ufs_qcom_phy); | ||
673 | } | ||
674 | |||
675 | int ufs_qcom_phy_power_on(struct phy *generic_phy) | ||
676 | { | ||
677 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | ||
678 | struct device *dev = phy_common->dev; | ||
679 | int err; | ||
680 | |||
681 | err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy); | ||
682 | if (err) { | ||
683 | dev_err(dev, "%s enable vdda_phy failed, err=%d\n", | ||
684 | __func__, err); | ||
685 | goto out; | ||
686 | } | ||
687 | |||
688 | phy_common->phy_spec_ops->power_control(phy_common, true); | ||
689 | |||
690 | /* vdda_pll also enables ref clock LDOs so enable it first */ | ||
691 | err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll); | ||
692 | if (err) { | ||
693 | dev_err(dev, "%s enable vdda_pll failed, err=%d\n", | ||
694 | __func__, err); | ||
695 | goto out_disable_phy; | ||
696 | } | ||
697 | |||
698 | err = ufs_qcom_phy_enable_ref_clk(generic_phy); | ||
699 | if (err) { | ||
700 | dev_err(dev, "%s enable phy ref clock failed, err=%d\n", | ||
701 | __func__, err); | ||
702 | goto out_disable_pll; | ||
703 | } | ||
704 | |||
705 | /* enable device PHY ref_clk pad rail */ | ||
706 | if (phy_common->vddp_ref_clk.reg) { | ||
707 | err = ufs_qcom_phy_enable_vreg(generic_phy, | ||
708 | &phy_common->vddp_ref_clk); | ||
709 | if (err) { | ||
710 | dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n", | ||
711 | __func__, err); | ||
712 | goto out_disable_ref_clk; | ||
713 | } | ||
714 | } | ||
715 | |||
716 | phy_common->is_powered_on = true; | ||
717 | goto out; | ||
718 | |||
719 | out_disable_ref_clk: | ||
720 | ufs_qcom_phy_disable_ref_clk(generic_phy); | ||
721 | out_disable_pll: | ||
722 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll); | ||
723 | out_disable_phy: | ||
724 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy); | ||
725 | out: | ||
726 | return err; | ||
727 | } | ||
728 | |||
729 | int ufs_qcom_phy_power_off(struct phy *generic_phy) | ||
730 | { | ||
731 | struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy); | ||
732 | |||
733 | phy_common->phy_spec_ops->power_control(phy_common, false); | ||
734 | |||
735 | if (phy_common->vddp_ref_clk.reg) | ||
736 | ufs_qcom_phy_disable_vreg(generic_phy, | ||
737 | &phy_common->vddp_ref_clk); | ||
738 | ufs_qcom_phy_disable_ref_clk(generic_phy); | ||
739 | |||
740 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll); | ||
741 | ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy); | ||
742 | phy_common->is_powered_on = false; | ||
743 | |||
744 | return 0; | ||
745 | } | ||
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index cd4129ff7ae4..7600639db4c4 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -608,7 +608,8 @@ static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) | |||
608 | } | 608 | } |
609 | 609 | ||
610 | /* Load rest of compatibility struct */ | 610 | /* Load rest of compatibility struct */ |
611 | strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION)); | 611 | strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, |
612 | sizeof(tw_dev->tw_compat_info.driver_version)); | ||
612 | tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; | 613 | tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL; |
613 | tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; | 614 | tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH; |
614 | tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; | 615 | tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD; |
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index 8d66a6469e29..c7be7bb37209 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c | |||
@@ -3485,7 +3485,7 @@ static int blogic_show_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3485 | seq_printf(m, "\n\ | 3485 | seq_printf(m, "\n\ |
3486 | Current Driver Queue Depth: %d\n\ | 3486 | Current Driver Queue Depth: %d\n\ |
3487 | Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs); | 3487 | Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs); |
3488 | seq_printf(m, "\n\n\ | 3488 | seq_puts(m, "\n\n\ |
3489 | DATA TRANSFER STATISTICS\n\ | 3489 | DATA TRANSFER STATISTICS\n\ |
3490 | \n\ | 3490 | \n\ |
3491 | Target Tagged Queuing Queue Depth Active Attempted Completed\n\ | 3491 | Target Tagged Queuing Queue Depth Active Attempted Completed\n\ |
@@ -3500,7 +3500,7 @@ Target Tagged Queuing Queue Depth Active Attempted Completed\n\ | |||
3500 | seq_printf(m, | 3500 | seq_printf(m, |
3501 | " %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete); | 3501 | " %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete); |
3502 | } | 3502 | } |
3503 | seq_printf(m, "\n\ | 3503 | seq_puts(m, "\n\ |
3504 | Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\ | 3504 | Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\ |
3505 | ====== ============= ============== =================== ===================\n"); | 3505 | ====== ============= ============== =================== ===================\n"); |
3506 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { | 3506 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { |
@@ -3517,7 +3517,7 @@ Target Read Commands Write Commands Total Bytes Read Total Bytes Written\ | |||
3517 | else | 3517 | else |
3518 | seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units); | 3518 | seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units); |
3519 | } | 3519 | } |
3520 | seq_printf(m, "\n\ | 3520 | seq_puts(m, "\n\ |
3521 | Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ | 3521 | Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ |
3522 | ====== ======= ========= ========= ========= ========= =========\n"); | 3522 | ====== ======= ========= ========= ========= ========= =========\n"); |
3523 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { | 3523 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { |
@@ -3533,7 +3533,7 @@ Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ | |||
3533 | tgt_stats[tgt].write_sz_buckets[0], | 3533 | tgt_stats[tgt].write_sz_buckets[0], |
3534 | tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]); | 3534 | tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]); |
3535 | } | 3535 | } |
3536 | seq_printf(m, "\n\ | 3536 | seq_puts(m, "\n\ |
3537 | Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ | 3537 | Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ |
3538 | ====== ======= ========= ========= ========= ========= =========\n"); | 3538 | ====== ======= ========= ========= ========= ========= =========\n"); |
3539 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { | 3539 | for (tgt = 0; tgt < adapter->maxdev; tgt++) { |
@@ -3549,7 +3549,7 @@ Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ | |||
3549 | tgt_stats[tgt].write_sz_buckets[5], | 3549 | tgt_stats[tgt].write_sz_buckets[5], |
3550 | tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]); | 3550 | tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]); |
3551 | } | 3551 | } |
3552 | seq_printf(m, "\n\n\ | 3552 | seq_puts(m, "\n\n\ |
3553 | ERROR RECOVERY STATISTICS\n\ | 3553 | ERROR RECOVERY STATISTICS\n\ |
3554 | \n\ | 3554 | \n\ |
3555 | Command Aborts Bus Device Resets Host Adapter Resets\n\ | 3555 | Command Aborts Bus Device Resets Host Adapter Resets\n\ |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 9c92f415229f..b021bcb88537 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -201,12 +201,12 @@ config SCSI_ENCLOSURE | |||
201 | certain enclosure conditions to be reported and is not required. | 201 | certain enclosure conditions to be reported and is not required. |
202 | 202 | ||
203 | config SCSI_CONSTANTS | 203 | config SCSI_CONSTANTS |
204 | bool "Verbose SCSI error reporting (kernel size +=12K)" | 204 | bool "Verbose SCSI error reporting (kernel size +=75K)" |
205 | depends on SCSI | 205 | depends on SCSI |
206 | help | 206 | help |
207 | The error messages regarding your SCSI hardware will be easier to | 207 | The error messages regarding your SCSI hardware will be easier to |
208 | understand if you say Y here; it will enlarge your kernel by about | 208 | understand if you say Y here; it will enlarge your kernel by about |
209 | 12 KB. If in doubt, say Y. | 209 | 75 KB. If in doubt, say Y. |
210 | 210 | ||
211 | config SCSI_LOGGING | 211 | config SCSI_LOGGING |
212 | bool "SCSI logging facility" | 212 | bool "SCSI logging facility" |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 58158f11ed7b..dee160a4f163 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -159,15 +159,15 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/ | |||
159 | 159 | ||
160 | # This goes last, so that "real" scsi devices probe earlier | 160 | # This goes last, so that "real" scsi devices probe earlier |
161 | obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o | 161 | obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o |
162 | 162 | scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \ | |
163 | scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ | ||
164 | scsicam.o scsi_error.o scsi_lib.o | 163 | scsicam.o scsi_error.o scsi_lib.o |
164 | scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o | ||
165 | scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o | 165 | scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o |
166 | scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o | 166 | scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o |
167 | scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o | 167 | scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o |
168 | scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o | 168 | scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o |
169 | scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o | 169 | scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o |
170 | scsi_mod-y += scsi_trace.o | 170 | scsi_mod-y += scsi_trace.o scsi_logging.o |
171 | scsi_mod-$(CONFIG_PM) += scsi_pm.o | 171 | scsi_mod-$(CONFIG_PM) += scsi_pm.o |
172 | 172 | ||
173 | hv_storvsc-y := storvsc_drv.o | 173 | hv_storvsc-y := storvsc_drv.o |
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 36244d63def2..8981701802ca 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
@@ -716,8 +716,6 @@ static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, | |||
716 | } | 716 | } |
717 | #endif | 717 | #endif |
718 | 718 | ||
719 | #undef SPRINTF | ||
720 | #define SPRINTF(args...) seq_printf(m, ## args) | ||
721 | static | 719 | static |
722 | void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m); | 720 | void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m); |
723 | static | 721 | static |
@@ -734,19 +732,19 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m, | |||
734 | hostdata = (struct NCR5380_hostdata *) instance->hostdata; | 732 | hostdata = (struct NCR5380_hostdata *) instance->hostdata; |
735 | 733 | ||
736 | #ifdef PSEUDO_DMA | 734 | #ifdef PSEUDO_DMA |
737 | SPRINTF("Highwater I/O busy spin counts: write %d, read %d\n", | 735 | seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n", |
738 | hostdata->spin_max_w, hostdata->spin_max_r); | 736 | hostdata->spin_max_w, hostdata->spin_max_r); |
739 | #endif | 737 | #endif |
740 | spin_lock_irq(instance->host_lock); | 738 | spin_lock_irq(instance->host_lock); |
741 | if (!hostdata->connected) | 739 | if (!hostdata->connected) |
742 | SPRINTF("scsi%d: no currently connected command\n", instance->host_no); | 740 | seq_printf(m, "scsi%d: no currently connected command\n", instance->host_no); |
743 | else | 741 | else |
744 | lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); | 742 | lprint_Scsi_Cmnd((struct scsi_cmnd *) hostdata->connected, m); |
745 | SPRINTF("scsi%d: issue_queue\n", instance->host_no); | 743 | seq_printf(m, "scsi%d: issue_queue\n", instance->host_no); |
746 | for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) | 744 | for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) |
747 | lprint_Scsi_Cmnd(ptr, m); | 745 | lprint_Scsi_Cmnd(ptr, m); |
748 | 746 | ||
749 | SPRINTF("scsi%d: disconnected_queue\n", instance->host_no); | 747 | seq_printf(m, "scsi%d: disconnected_queue\n", instance->host_no); |
750 | for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) | 748 | for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = (struct scsi_cmnd *) ptr->host_scribble) |
751 | lprint_Scsi_Cmnd(ptr, m); | 749 | lprint_Scsi_Cmnd(ptr, m); |
752 | spin_unlock_irq(instance->host_lock); | 750 | spin_unlock_irq(instance->host_lock); |
@@ -755,8 +753,8 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m, | |||
755 | 753 | ||
756 | static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) | 754 | static void lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) |
757 | { | 755 | { |
758 | SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); | 756 | seq_printf(m, "scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); |
759 | SPRINTF(" command = "); | 757 | seq_puts(m, " command = "); |
760 | lprint_command(cmd->cmnd, m); | 758 | lprint_command(cmd->cmnd, m); |
761 | } | 759 | } |
762 | 760 | ||
@@ -765,13 +763,13 @@ static void lprint_command(unsigned char *command, struct seq_file *m) | |||
765 | int i, s; | 763 | int i, s; |
766 | lprint_opcode(command[0], m); | 764 | lprint_opcode(command[0], m); |
767 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) | 765 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) |
768 | SPRINTF("%02x ", command[i]); | 766 | seq_printf(m, "%02x ", command[i]); |
769 | SPRINTF("\n"); | 767 | seq_putc(m, '\n'); |
770 | } | 768 | } |
771 | 769 | ||
772 | static void lprint_opcode(int opcode, struct seq_file *m) | 770 | static void lprint_opcode(int opcode, struct seq_file *m) |
773 | { | 771 | { |
774 | SPRINTF("%2d (0x%02x)", opcode, opcode); | 772 | seq_printf(m, "%2d (0x%02x)", opcode, opcode); |
775 | } | 773 | } |
776 | 774 | ||
777 | 775 | ||
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 2c5ce48c8f95..ae95e347f37d 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -2880,7 +2880,7 @@ static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost) | |||
2880 | chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; | 2880 | chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; |
2881 | } | 2881 | } |
2882 | 2882 | ||
2883 | seq_printf(m, "Target IDs Detected:"); | 2883 | seq_puts(m, "Target IDs Detected:"); |
2884 | for (i = 0; i <= ADV_MAX_TID; i++) { | 2884 | for (i = 0; i <= ADV_MAX_TID; i++) { |
2885 | if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) | 2885 | if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) |
2886 | seq_printf(m, " %X,", i); | 2886 | seq_printf(m, " %X,", i); |
@@ -2896,18 +2896,16 @@ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost) | |||
2896 | struct asc_board *boardp = shost_priv(shost); | 2896 | struct asc_board *boardp = shost_priv(shost); |
2897 | ushort major, minor, letter; | 2897 | ushort major, minor, letter; |
2898 | 2898 | ||
2899 | seq_printf(m, "\nROM BIOS Version: "); | 2899 | seq_puts(m, "\nROM BIOS Version: "); |
2900 | 2900 | ||
2901 | /* | 2901 | /* |
2902 | * If the BIOS saved a valid signature, then fill in | 2902 | * If the BIOS saved a valid signature, then fill in |
2903 | * the BIOS code segment base address. | 2903 | * the BIOS code segment base address. |
2904 | */ | 2904 | */ |
2905 | if (boardp->bios_signature != 0x55AA) { | 2905 | if (boardp->bios_signature != 0x55AA) { |
2906 | seq_printf(m, "Disabled or Pre-3.1\n"); | 2906 | seq_puts(m, "Disabled or Pre-3.1\n" |
2907 | seq_printf(m, | 2907 | "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n" |
2908 | "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"); | 2908 | "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); |
2909 | seq_printf(m, | ||
2910 | "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); | ||
2911 | } else { | 2909 | } else { |
2912 | major = (boardp->bios_version >> 12) & 0xF; | 2910 | major = (boardp->bios_version >> 12) & 0xF; |
2913 | minor = (boardp->bios_version >> 8) & 0xF; | 2911 | minor = (boardp->bios_version >> 8) & 0xF; |
@@ -2923,10 +2921,8 @@ static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost) | |||
2923 | */ | 2921 | */ |
2924 | if (major < 3 || (major <= 3 && minor < 1) || | 2922 | if (major < 3 || (major <= 3 && minor < 1) || |
2925 | (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { | 2923 | (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { |
2926 | seq_printf(m, | 2924 | seq_puts(m, "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n" |
2927 | "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"); | 2925 | "ftp://ftp.connectcom.net/pub\n"); |
2928 | seq_printf(m, | ||
2929 | "ftp://ftp.connectcom.net/pub\n"); | ||
2930 | } | 2926 | } |
2931 | } | 2927 | } |
2932 | } | 2928 | } |
@@ -3056,11 +3052,10 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3056 | == ASC_TRUE) | 3052 | == ASC_TRUE) |
3057 | seq_printf(m, " Serial Number: %s\n", serialstr); | 3053 | seq_printf(m, " Serial Number: %s\n", serialstr); |
3058 | else if (ep->adapter_info[5] == 0xBB) | 3054 | else if (ep->adapter_info[5] == 0xBB) |
3059 | seq_printf(m, | 3055 | seq_puts(m, |
3060 | " Default Settings Used for EEPROM-less Adapter.\n"); | 3056 | " Default Settings Used for EEPROM-less Adapter.\n"); |
3061 | else | 3057 | else |
3062 | seq_printf(m, | 3058 | seq_puts(m, " Serial Number Signature Not Present.\n"); |
3063 | " Serial Number Signature Not Present.\n"); | ||
3064 | 3059 | ||
3065 | seq_printf(m, | 3060 | seq_printf(m, |
3066 | " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", | 3061 | " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", |
@@ -3070,34 +3065,30 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3070 | seq_printf(m, | 3065 | seq_printf(m, |
3071 | " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); | 3066 | " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); |
3072 | 3067 | ||
3073 | seq_printf(m, " Target ID: "); | 3068 | seq_puts(m, " Target ID: "); |
3074 | for (i = 0; i <= ASC_MAX_TID; i++) | 3069 | for (i = 0; i <= ASC_MAX_TID; i++) |
3075 | seq_printf(m, " %d", i); | 3070 | seq_printf(m, " %d", i); |
3076 | seq_printf(m, "\n"); | ||
3077 | 3071 | ||
3078 | seq_printf(m, " Disconnects: "); | 3072 | seq_puts(m, "\n Disconnects: "); |
3079 | for (i = 0; i <= ASC_MAX_TID; i++) | 3073 | for (i = 0; i <= ASC_MAX_TID; i++) |
3080 | seq_printf(m, " %c", | 3074 | seq_printf(m, " %c", |
3081 | (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3075 | (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3082 | seq_printf(m, "\n"); | ||
3083 | 3076 | ||
3084 | seq_printf(m, " Command Queuing: "); | 3077 | seq_puts(m, "\n Command Queuing: "); |
3085 | for (i = 0; i <= ASC_MAX_TID; i++) | 3078 | for (i = 0; i <= ASC_MAX_TID; i++) |
3086 | seq_printf(m, " %c", | 3079 | seq_printf(m, " %c", |
3087 | (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3080 | (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3088 | seq_printf(m, "\n"); | ||
3089 | 3081 | ||
3090 | seq_printf(m, " Start Motor: "); | 3082 | seq_puts(m, "\n Start Motor: "); |
3091 | for (i = 0; i <= ASC_MAX_TID; i++) | 3083 | for (i = 0; i <= ASC_MAX_TID; i++) |
3092 | seq_printf(m, " %c", | 3084 | seq_printf(m, " %c", |
3093 | (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3085 | (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3094 | seq_printf(m, "\n"); | ||
3095 | 3086 | ||
3096 | seq_printf(m, " Synchronous Transfer:"); | 3087 | seq_puts(m, "\n Synchronous Transfer:"); |
3097 | for (i = 0; i <= ASC_MAX_TID; i++) | 3088 | for (i = 0; i <= ASC_MAX_TID; i++) |
3098 | seq_printf(m, " %c", | 3089 | seq_printf(m, " %c", |
3099 | (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3090 | (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3100 | seq_printf(m, "\n"); | 3091 | seq_putc(m, '\n'); |
3101 | 3092 | ||
3102 | #ifdef CONFIG_ISA | 3093 | #ifdef CONFIG_ISA |
3103 | if (asc_dvc_varp->bus_type & ASC_IS_ISA) { | 3094 | if (asc_dvc_varp->bus_type & ASC_IS_ISA) { |
@@ -3151,7 +3142,7 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3151 | if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) | 3142 | if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) |
3152 | seq_printf(m, " Serial Number: %s\n", serialstr); | 3143 | seq_printf(m, " Serial Number: %s\n", serialstr); |
3153 | else | 3144 | else |
3154 | seq_printf(m, " Serial Number Signature Not Present.\n"); | 3145 | seq_puts(m, " Serial Number Signature Not Present.\n"); |
3155 | 3146 | ||
3156 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) | 3147 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) |
3157 | seq_printf(m, | 3148 | seq_printf(m, |
@@ -3209,10 +3200,10 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3209 | ep_38C1600->termination_lvd, termstr, | 3200 | ep_38C1600->termination_lvd, termstr, |
3210 | ep_38C1600->bios_ctrl); | 3201 | ep_38C1600->bios_ctrl); |
3211 | 3202 | ||
3212 | seq_printf(m, " Target ID: "); | 3203 | seq_puts(m, " Target ID: "); |
3213 | for (i = 0; i <= ADV_MAX_TID; i++) | 3204 | for (i = 0; i <= ADV_MAX_TID; i++) |
3214 | seq_printf(m, " %X", i); | 3205 | seq_printf(m, " %X", i); |
3215 | seq_printf(m, "\n"); | 3206 | seq_putc(m, '\n'); |
3216 | 3207 | ||
3217 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3208 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3218 | word = ep_3550->disc_enable; | 3209 | word = ep_3550->disc_enable; |
@@ -3221,11 +3212,11 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3221 | } else { | 3212 | } else { |
3222 | word = ep_38C1600->disc_enable; | 3213 | word = ep_38C1600->disc_enable; |
3223 | } | 3214 | } |
3224 | seq_printf(m, " Disconnects: "); | 3215 | seq_puts(m, " Disconnects: "); |
3225 | for (i = 0; i <= ADV_MAX_TID; i++) | 3216 | for (i = 0; i <= ADV_MAX_TID; i++) |
3226 | seq_printf(m, " %c", | 3217 | seq_printf(m, " %c", |
3227 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3218 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3228 | seq_printf(m, "\n"); | 3219 | seq_putc(m, '\n'); |
3229 | 3220 | ||
3230 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3221 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3231 | word = ep_3550->tagqng_able; | 3222 | word = ep_3550->tagqng_able; |
@@ -3234,11 +3225,11 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3234 | } else { | 3225 | } else { |
3235 | word = ep_38C1600->tagqng_able; | 3226 | word = ep_38C1600->tagqng_able; |
3236 | } | 3227 | } |
3237 | seq_printf(m, " Command Queuing: "); | 3228 | seq_puts(m, " Command Queuing: "); |
3238 | for (i = 0; i <= ADV_MAX_TID; i++) | 3229 | for (i = 0; i <= ADV_MAX_TID; i++) |
3239 | seq_printf(m, " %c", | 3230 | seq_printf(m, " %c", |
3240 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3231 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3241 | seq_printf(m, "\n"); | 3232 | seq_putc(m, '\n'); |
3242 | 3233 | ||
3243 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3234 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3244 | word = ep_3550->start_motor; | 3235 | word = ep_3550->start_motor; |
@@ -3247,28 +3238,28 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3247 | } else { | 3238 | } else { |
3248 | word = ep_38C1600->start_motor; | 3239 | word = ep_38C1600->start_motor; |
3249 | } | 3240 | } |
3250 | seq_printf(m, " Start Motor: "); | 3241 | seq_puts(m, " Start Motor: "); |
3251 | for (i = 0; i <= ADV_MAX_TID; i++) | 3242 | for (i = 0; i <= ADV_MAX_TID; i++) |
3252 | seq_printf(m, " %c", | 3243 | seq_printf(m, " %c", |
3253 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3244 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3254 | seq_printf(m, "\n"); | 3245 | seq_putc(m, '\n'); |
3255 | 3246 | ||
3256 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3247 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3257 | seq_printf(m, " Synchronous Transfer:"); | 3248 | seq_puts(m, " Synchronous Transfer:"); |
3258 | for (i = 0; i <= ADV_MAX_TID; i++) | 3249 | for (i = 0; i <= ADV_MAX_TID; i++) |
3259 | seq_printf(m, " %c", | 3250 | seq_printf(m, " %c", |
3260 | (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? | 3251 | (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? |
3261 | 'Y' : 'N'); | 3252 | 'Y' : 'N'); |
3262 | seq_printf(m, "\n"); | 3253 | seq_putc(m, '\n'); |
3263 | } | 3254 | } |
3264 | 3255 | ||
3265 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3256 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
3266 | seq_printf(m, " Ultra Transfer: "); | 3257 | seq_puts(m, " Ultra Transfer: "); |
3267 | for (i = 0; i <= ADV_MAX_TID; i++) | 3258 | for (i = 0; i <= ADV_MAX_TID; i++) |
3268 | seq_printf(m, " %c", | 3259 | seq_printf(m, " %c", |
3269 | (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i)) | 3260 | (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i)) |
3270 | ? 'Y' : 'N'); | 3261 | ? 'Y' : 'N'); |
3271 | seq_printf(m, "\n"); | 3262 | seq_putc(m, '\n'); |
3272 | } | 3263 | } |
3273 | 3264 | ||
3274 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { | 3265 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { |
@@ -3278,16 +3269,15 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3278 | } else { | 3269 | } else { |
3279 | word = ep_38C1600->wdtr_able; | 3270 | word = ep_38C1600->wdtr_able; |
3280 | } | 3271 | } |
3281 | seq_printf(m, " Wide Transfer: "); | 3272 | seq_puts(m, " Wide Transfer: "); |
3282 | for (i = 0; i <= ADV_MAX_TID; i++) | 3273 | for (i = 0; i <= ADV_MAX_TID; i++) |
3283 | seq_printf(m, " %c", | 3274 | seq_printf(m, " %c", |
3284 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3275 | (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3285 | seq_printf(m, "\n"); | 3276 | seq_putc(m, '\n'); |
3286 | 3277 | ||
3287 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || | 3278 | if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || |
3288 | adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { | 3279 | adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { |
3289 | seq_printf(m, | 3280 | seq_puts(m, " Synchronous Transfer Speed (Mhz):\n "); |
3290 | " Synchronous Transfer Speed (Mhz):\n "); | ||
3291 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3281 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3292 | char *speed_str; | 3282 | char *speed_str; |
3293 | 3283 | ||
@@ -3325,10 +3315,10 @@ static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost | |||
3325 | } | 3315 | } |
3326 | seq_printf(m, "%X:%s ", i, speed_str); | 3316 | seq_printf(m, "%X:%s ", i, speed_str); |
3327 | if (i == 7) | 3317 | if (i == 7) |
3328 | seq_printf(m, "\n "); | 3318 | seq_puts(m, "\n "); |
3329 | sdtr_speed >>= 4; | 3319 | sdtr_speed >>= 4; |
3330 | } | 3320 | } |
3331 | seq_printf(m, "\n"); | 3321 | seq_putc(m, '\n'); |
3332 | } | 3322 | } |
3333 | } | 3323 | } |
3334 | 3324 | ||
@@ -3403,7 +3393,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3403 | seq_printf(m, | 3393 | seq_printf(m, |
3404 | " Total Command Pending: %d\n", v->cur_total_qng); | 3394 | " Total Command Pending: %d\n", v->cur_total_qng); |
3405 | 3395 | ||
3406 | seq_printf(m, " Command Queuing:"); | 3396 | seq_puts(m, " Command Queuing:"); |
3407 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3397 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3408 | if ((chip_scsi_id == i) || | 3398 | if ((chip_scsi_id == i) || |
3409 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3399 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3413,10 +3403,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3413 | i, | 3403 | i, |
3414 | (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3404 | (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3415 | } | 3405 | } |
3416 | seq_printf(m, "\n"); | ||
3417 | 3406 | ||
3418 | /* Current number of commands waiting for a device. */ | 3407 | /* Current number of commands waiting for a device. */ |
3419 | seq_printf(m, " Command Queue Pending:"); | 3408 | seq_puts(m, "\n Command Queue Pending:"); |
3420 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3409 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3421 | if ((chip_scsi_id == i) || | 3410 | if ((chip_scsi_id == i) || |
3422 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3411 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3424,10 +3413,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3424 | } | 3413 | } |
3425 | seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]); | 3414 | seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]); |
3426 | } | 3415 | } |
3427 | seq_printf(m, "\n"); | ||
3428 | 3416 | ||
3429 | /* Current limit on number of commands that can be sent to a device. */ | 3417 | /* Current limit on number of commands that can be sent to a device. */ |
3430 | seq_printf(m, " Command Queue Limit:"); | 3418 | seq_puts(m, "\n Command Queue Limit:"); |
3431 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3419 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3432 | if ((chip_scsi_id == i) || | 3420 | if ((chip_scsi_id == i) || |
3433 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3421 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3435,10 +3423,9 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3435 | } | 3423 | } |
3436 | seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]); | 3424 | seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]); |
3437 | } | 3425 | } |
3438 | seq_printf(m, "\n"); | ||
3439 | 3426 | ||
3440 | /* Indicate whether the device has returned queue full status. */ | 3427 | /* Indicate whether the device has returned queue full status. */ |
3441 | seq_printf(m, " Command Queue Full:"); | 3428 | seq_puts(m, "\n Command Queue Full:"); |
3442 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3429 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3443 | if ((chip_scsi_id == i) || | 3430 | if ((chip_scsi_id == i) || |
3444 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3431 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3450,9 +3437,8 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3450 | else | 3437 | else |
3451 | seq_printf(m, " %X:N", i); | 3438 | seq_printf(m, " %X:N", i); |
3452 | } | 3439 | } |
3453 | seq_printf(m, "\n"); | ||
3454 | 3440 | ||
3455 | seq_printf(m, " Synchronous Transfer:"); | 3441 | seq_puts(m, "\n Synchronous Transfer:"); |
3456 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3442 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3457 | if ((chip_scsi_id == i) || | 3443 | if ((chip_scsi_id == i) || |
3458 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3444 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3462,7 +3448,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3462 | i, | 3448 | i, |
3463 | (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3449 | (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3464 | } | 3450 | } |
3465 | seq_printf(m, "\n"); | 3451 | seq_putc(m, '\n'); |
3466 | 3452 | ||
3467 | for (i = 0; i <= ASC_MAX_TID; i++) { | 3453 | for (i = 0; i <= ASC_MAX_TID; i++) { |
3468 | uchar syn_period_ix; | 3454 | uchar syn_period_ix; |
@@ -3476,7 +3462,7 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3476 | seq_printf(m, " %X:", i); | 3462 | seq_printf(m, " %X:", i); |
3477 | 3463 | ||
3478 | if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { | 3464 | if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { |
3479 | seq_printf(m, " Asynchronous"); | 3465 | seq_puts(m, " Asynchronous"); |
3480 | } else { | 3466 | } else { |
3481 | syn_period_ix = | 3467 | syn_period_ix = |
3482 | (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - | 3468 | (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - |
@@ -3494,16 +3480,15 @@ static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3494 | } | 3480 | } |
3495 | 3481 | ||
3496 | if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { | 3482 | if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { |
3497 | seq_printf(m, "*\n"); | 3483 | seq_puts(m, "*\n"); |
3498 | renegotiate = 1; | 3484 | renegotiate = 1; |
3499 | } else { | 3485 | } else { |
3500 | seq_printf(m, "\n"); | 3486 | seq_putc(m, '\n'); |
3501 | } | 3487 | } |
3502 | } | 3488 | } |
3503 | 3489 | ||
3504 | if (renegotiate) { | 3490 | if (renegotiate) { |
3505 | seq_printf(m, | 3491 | seq_puts(m, " * = Re-negotiation pending before next command.\n"); |
3506 | " * = Re-negotiation pending before next command.\n"); | ||
3507 | } | 3492 | } |
3508 | } | 3493 | } |
3509 | 3494 | ||
@@ -3548,7 +3533,7 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3548 | c->mcode_date, c->mcode_version); | 3533 | c->mcode_date, c->mcode_version); |
3549 | 3534 | ||
3550 | AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); | 3535 | AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); |
3551 | seq_printf(m, " Queuing Enabled:"); | 3536 | seq_puts(m, " Queuing Enabled:"); |
3552 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3537 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3553 | if ((chip_scsi_id == i) || | 3538 | if ((chip_scsi_id == i) || |
3554 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3539 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3559,9 +3544,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3559 | i, | 3544 | i, |
3560 | (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3545 | (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3561 | } | 3546 | } |
3562 | seq_printf(m, "\n"); | ||
3563 | 3547 | ||
3564 | seq_printf(m, " Queue Limit:"); | 3548 | seq_puts(m, "\n Queue Limit:"); |
3565 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3549 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3566 | if ((chip_scsi_id == i) || | 3550 | if ((chip_scsi_id == i) || |
3567 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3551 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3573,9 +3557,8 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3573 | 3557 | ||
3574 | seq_printf(m, " %X:%d", i, lrambyte); | 3558 | seq_printf(m, " %X:%d", i, lrambyte); |
3575 | } | 3559 | } |
3576 | seq_printf(m, "\n"); | ||
3577 | 3560 | ||
3578 | seq_printf(m, " Command Pending:"); | 3561 | seq_puts(m, "\n Command Pending:"); |
3579 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3562 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3580 | if ((chip_scsi_id == i) || | 3563 | if ((chip_scsi_id == i) || |
3581 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3564 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3587,10 +3570,10 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3587 | 3570 | ||
3588 | seq_printf(m, " %X:%d", i, lrambyte); | 3571 | seq_printf(m, " %X:%d", i, lrambyte); |
3589 | } | 3572 | } |
3590 | seq_printf(m, "\n"); | 3573 | seq_putc(m, '\n'); |
3591 | 3574 | ||
3592 | AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); | 3575 | AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); |
3593 | seq_printf(m, " Wide Enabled:"); | 3576 | seq_puts(m, " Wide Enabled:"); |
3594 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3577 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3595 | if ((chip_scsi_id == i) || | 3578 | if ((chip_scsi_id == i) || |
3596 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3579 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3601,10 +3584,10 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3601 | i, | 3584 | i, |
3602 | (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3585 | (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3603 | } | 3586 | } |
3604 | seq_printf(m, "\n"); | 3587 | seq_putc(m, '\n'); |
3605 | 3588 | ||
3606 | AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); | 3589 | AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); |
3607 | seq_printf(m, " Transfer Bit Width:"); | 3590 | seq_puts(m, " Transfer Bit Width:"); |
3608 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3591 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3609 | if ((chip_scsi_id == i) || | 3592 | if ((chip_scsi_id == i) || |
3610 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3593 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3620,14 +3603,14 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3620 | 3603 | ||
3621 | if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && | 3604 | if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && |
3622 | (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { | 3605 | (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { |
3623 | seq_printf(m, "*"); | 3606 | seq_putc(m, '*'); |
3624 | renegotiate = 1; | 3607 | renegotiate = 1; |
3625 | } | 3608 | } |
3626 | } | 3609 | } |
3627 | seq_printf(m, "\n"); | 3610 | seq_putc(m, '\n'); |
3628 | 3611 | ||
3629 | AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); | 3612 | AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); |
3630 | seq_printf(m, " Synchronous Enabled:"); | 3613 | seq_puts(m, " Synchronous Enabled:"); |
3631 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3614 | for (i = 0; i <= ADV_MAX_TID; i++) { |
3632 | if ((chip_scsi_id == i) || | 3615 | if ((chip_scsi_id == i) || |
3633 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { | 3616 | ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { |
@@ -3638,7 +3621,7 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3638 | i, | 3621 | i, |
3639 | (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); | 3622 | (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); |
3640 | } | 3623 | } |
3641 | seq_printf(m, "\n"); | 3624 | seq_putc(m, '\n'); |
3642 | 3625 | ||
3643 | AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); | 3626 | AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); |
3644 | for (i = 0; i <= ADV_MAX_TID; i++) { | 3627 | for (i = 0; i <= ADV_MAX_TID; i++) { |
@@ -3657,14 +3640,14 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3657 | seq_printf(m, " %X:", i); | 3640 | seq_printf(m, " %X:", i); |
3658 | 3641 | ||
3659 | if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ | 3642 | if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ |
3660 | seq_printf(m, " Asynchronous"); | 3643 | seq_puts(m, " Asynchronous"); |
3661 | } else { | 3644 | } else { |
3662 | seq_printf(m, " Transfer Period Factor: "); | 3645 | seq_puts(m, " Transfer Period Factor: "); |
3663 | 3646 | ||
3664 | if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ | 3647 | if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ |
3665 | seq_printf(m, "9 (80.0 Mhz),"); | 3648 | seq_puts(m, "9 (80.0 Mhz),"); |
3666 | } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ | 3649 | } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ |
3667 | seq_printf(m, "10 (40.0 Mhz),"); | 3650 | seq_puts(m, "10 (40.0 Mhz),"); |
3668 | } else { /* 20 Mhz or below. */ | 3651 | } else { /* 20 Mhz or below. */ |
3669 | 3652 | ||
3670 | period = (((lramword >> 8) * 25) + 50) / 4; | 3653 | period = (((lramword >> 8) * 25) + 50) / 4; |
@@ -3684,16 +3667,15 @@ static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) | |||
3684 | } | 3667 | } |
3685 | 3668 | ||
3686 | if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { | 3669 | if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { |
3687 | seq_printf(m, "*\n"); | 3670 | seq_puts(m, "*\n"); |
3688 | renegotiate = 1; | 3671 | renegotiate = 1; |
3689 | } else { | 3672 | } else { |
3690 | seq_printf(m, "\n"); | 3673 | seq_putc(m, '\n'); |
3691 | } | 3674 | } |
3692 | } | 3675 | } |
3693 | 3676 | ||
3694 | if (renegotiate) { | 3677 | if (renegotiate) { |
3695 | seq_printf(m, | 3678 | seq_puts(m, " * = Re-negotiation pending before next command.\n"); |
3696 | " * = Re-negotiation pending before next command.\n"); | ||
3697 | } | 3679 | } |
3698 | } | 3680 | } |
3699 | 3681 | ||
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 2b960b326daf..e31c460a1335 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -2490,299 +2490,296 @@ static void show_queues(struct Scsi_Host *shpnt) | |||
2490 | disp_enintr(shpnt); | 2490 | disp_enintr(shpnt); |
2491 | } | 2491 | } |
2492 | 2492 | ||
2493 | #undef SPRINTF | ||
2494 | #define SPRINTF(args...) seq_printf(m, ##args) | ||
2495 | |||
2496 | static void get_command(struct seq_file *m, Scsi_Cmnd * ptr) | 2493 | static void get_command(struct seq_file *m, Scsi_Cmnd * ptr) |
2497 | { | 2494 | { |
2498 | int i; | 2495 | int i; |
2499 | 2496 | ||
2500 | SPRINTF("%p: target=%d; lun=%d; cmnd=( ", | 2497 | seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ", |
2501 | ptr, ptr->device->id, (u8)ptr->device->lun); | 2498 | ptr, ptr->device->id, (u8)ptr->device->lun); |
2502 | 2499 | ||
2503 | for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) | 2500 | for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++) |
2504 | SPRINTF("0x%02x ", ptr->cmnd[i]); | 2501 | seq_printf(m, "0x%02x ", ptr->cmnd[i]); |
2505 | 2502 | ||
2506 | SPRINTF("); resid=%d; residual=%d; buffers=%d; phase |", | 2503 | seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |", |
2507 | scsi_get_resid(ptr), ptr->SCp.this_residual, | 2504 | scsi_get_resid(ptr), ptr->SCp.this_residual, |
2508 | ptr->SCp.buffers_residual); | 2505 | ptr->SCp.buffers_residual); |
2509 | 2506 | ||
2510 | if (ptr->SCp.phase & not_issued) | 2507 | if (ptr->SCp.phase & not_issued) |
2511 | SPRINTF("not issued|"); | 2508 | seq_puts(m, "not issued|"); |
2512 | if (ptr->SCp.phase & selecting) | 2509 | if (ptr->SCp.phase & selecting) |
2513 | SPRINTF("selecting|"); | 2510 | seq_puts(m, "selecting|"); |
2514 | if (ptr->SCp.phase & disconnected) | 2511 | if (ptr->SCp.phase & disconnected) |
2515 | SPRINTF("disconnected|"); | 2512 | seq_puts(m, "disconnected|"); |
2516 | if (ptr->SCp.phase & aborted) | 2513 | if (ptr->SCp.phase & aborted) |
2517 | SPRINTF("aborted|"); | 2514 | seq_puts(m, "aborted|"); |
2518 | if (ptr->SCp.phase & identified) | 2515 | if (ptr->SCp.phase & identified) |
2519 | SPRINTF("identified|"); | 2516 | seq_puts(m, "identified|"); |
2520 | if (ptr->SCp.phase & completed) | 2517 | if (ptr->SCp.phase & completed) |
2521 | SPRINTF("completed|"); | 2518 | seq_puts(m, "completed|"); |
2522 | if (ptr->SCp.phase & spiordy) | 2519 | if (ptr->SCp.phase & spiordy) |
2523 | SPRINTF("spiordy|"); | 2520 | seq_puts(m, "spiordy|"); |
2524 | if (ptr->SCp.phase & syncneg) | 2521 | if (ptr->SCp.phase & syncneg) |
2525 | SPRINTF("syncneg|"); | 2522 | seq_puts(m, "syncneg|"); |
2526 | SPRINTF("; next=0x%p\n", SCNEXT(ptr)); | 2523 | seq_printf(m, "; next=0x%p\n", SCNEXT(ptr)); |
2527 | } | 2524 | } |
2528 | 2525 | ||
2529 | static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt) | 2526 | static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt) |
2530 | { | 2527 | { |
2531 | int s; | 2528 | int s; |
2532 | 2529 | ||
2533 | SPRINTF("\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name); | 2530 | seq_printf(m, "\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name); |
2534 | 2531 | ||
2535 | s = GETPORT(SCSISEQ); | 2532 | s = GETPORT(SCSISEQ); |
2536 | SPRINTF("SCSISEQ( "); | 2533 | seq_puts(m, "SCSISEQ( "); |
2537 | if (s & TEMODEO) | 2534 | if (s & TEMODEO) |
2538 | SPRINTF("TARGET MODE "); | 2535 | seq_puts(m, "TARGET MODE "); |
2539 | if (s & ENSELO) | 2536 | if (s & ENSELO) |
2540 | SPRINTF("SELO "); | 2537 | seq_puts(m, "SELO "); |
2541 | if (s & ENSELI) | 2538 | if (s & ENSELI) |
2542 | SPRINTF("SELI "); | 2539 | seq_puts(m, "SELI "); |
2543 | if (s & ENRESELI) | 2540 | if (s & ENRESELI) |
2544 | SPRINTF("RESELI "); | 2541 | seq_puts(m, "RESELI "); |
2545 | if (s & ENAUTOATNO) | 2542 | if (s & ENAUTOATNO) |
2546 | SPRINTF("AUTOATNO "); | 2543 | seq_puts(m, "AUTOATNO "); |
2547 | if (s & ENAUTOATNI) | 2544 | if (s & ENAUTOATNI) |
2548 | SPRINTF("AUTOATNI "); | 2545 | seq_puts(m, "AUTOATNI "); |
2549 | if (s & ENAUTOATNP) | 2546 | if (s & ENAUTOATNP) |
2550 | SPRINTF("AUTOATNP "); | 2547 | seq_puts(m, "AUTOATNP "); |
2551 | if (s & SCSIRSTO) | 2548 | if (s & SCSIRSTO) |
2552 | SPRINTF("SCSIRSTO "); | 2549 | seq_puts(m, "SCSIRSTO "); |
2553 | SPRINTF(");"); | 2550 | seq_puts(m, ");"); |
2554 | 2551 | ||
2555 | SPRINTF(" SCSISIG("); | 2552 | seq_puts(m, " SCSISIG("); |
2556 | s = GETPORT(SCSISIG); | 2553 | s = GETPORT(SCSISIG); |
2557 | switch (s & P_MASK) { | 2554 | switch (s & P_MASK) { |
2558 | case P_DATAO: | 2555 | case P_DATAO: |
2559 | SPRINTF("DATA OUT"); | 2556 | seq_puts(m, "DATA OUT"); |
2560 | break; | 2557 | break; |
2561 | case P_DATAI: | 2558 | case P_DATAI: |
2562 | SPRINTF("DATA IN"); | 2559 | seq_puts(m, "DATA IN"); |
2563 | break; | 2560 | break; |
2564 | case P_CMD: | 2561 | case P_CMD: |
2565 | SPRINTF("COMMAND"); | 2562 | seq_puts(m, "COMMAND"); |
2566 | break; | 2563 | break; |
2567 | case P_STATUS: | 2564 | case P_STATUS: |
2568 | SPRINTF("STATUS"); | 2565 | seq_puts(m, "STATUS"); |
2569 | break; | 2566 | break; |
2570 | case P_MSGO: | 2567 | case P_MSGO: |
2571 | SPRINTF("MESSAGE OUT"); | 2568 | seq_puts(m, "MESSAGE OUT"); |
2572 | break; | 2569 | break; |
2573 | case P_MSGI: | 2570 | case P_MSGI: |
2574 | SPRINTF("MESSAGE IN"); | 2571 | seq_puts(m, "MESSAGE IN"); |
2575 | break; | 2572 | break; |
2576 | default: | 2573 | default: |
2577 | SPRINTF("*invalid*"); | 2574 | seq_puts(m, "*invalid*"); |
2578 | break; | 2575 | break; |
2579 | } | 2576 | } |
2580 | 2577 | ||
2581 | SPRINTF("); "); | 2578 | seq_puts(m, "); "); |
2582 | 2579 | ||
2583 | SPRINTF("INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo"); | 2580 | seq_printf(m, "INTSTAT (%s); ", TESTHI(DMASTAT, INTSTAT) ? "hi" : "lo"); |
2584 | 2581 | ||
2585 | SPRINTF("SSTAT( "); | 2582 | seq_puts(m, "SSTAT( "); |
2586 | s = GETPORT(SSTAT0); | 2583 | s = GETPORT(SSTAT0); |
2587 | if (s & TARGET) | 2584 | if (s & TARGET) |
2588 | SPRINTF("TARGET "); | 2585 | seq_puts(m, "TARGET "); |
2589 | if (s & SELDO) | 2586 | if (s & SELDO) |
2590 | SPRINTF("SELDO "); | 2587 | seq_puts(m, "SELDO "); |
2591 | if (s & SELDI) | 2588 | if (s & SELDI) |
2592 | SPRINTF("SELDI "); | 2589 | seq_puts(m, "SELDI "); |
2593 | if (s & SELINGO) | 2590 | if (s & SELINGO) |
2594 | SPRINTF("SELINGO "); | 2591 | seq_puts(m, "SELINGO "); |
2595 | if (s & SWRAP) | 2592 | if (s & SWRAP) |
2596 | SPRINTF("SWRAP "); | 2593 | seq_puts(m, "SWRAP "); |
2597 | if (s & SDONE) | 2594 | if (s & SDONE) |
2598 | SPRINTF("SDONE "); | 2595 | seq_puts(m, "SDONE "); |
2599 | if (s & SPIORDY) | 2596 | if (s & SPIORDY) |
2600 | SPRINTF("SPIORDY "); | 2597 | seq_puts(m, "SPIORDY "); |
2601 | if (s & DMADONE) | 2598 | if (s & DMADONE) |
2602 | SPRINTF("DMADONE "); | 2599 | seq_puts(m, "DMADONE "); |
2603 | 2600 | ||
2604 | s = GETPORT(SSTAT1); | 2601 | s = GETPORT(SSTAT1); |
2605 | if (s & SELTO) | 2602 | if (s & SELTO) |
2606 | SPRINTF("SELTO "); | 2603 | seq_puts(m, "SELTO "); |
2607 | if (s & ATNTARG) | 2604 | if (s & ATNTARG) |
2608 | SPRINTF("ATNTARG "); | 2605 | seq_puts(m, "ATNTARG "); |
2609 | if (s & SCSIRSTI) | 2606 | if (s & SCSIRSTI) |
2610 | SPRINTF("SCSIRSTI "); | 2607 | seq_puts(m, "SCSIRSTI "); |
2611 | if (s & PHASEMIS) | 2608 | if (s & PHASEMIS) |
2612 | SPRINTF("PHASEMIS "); | 2609 | seq_puts(m, "PHASEMIS "); |
2613 | if (s & BUSFREE) | 2610 | if (s & BUSFREE) |
2614 | SPRINTF("BUSFREE "); | 2611 | seq_puts(m, "BUSFREE "); |
2615 | if (s & SCSIPERR) | 2612 | if (s & SCSIPERR) |
2616 | SPRINTF("SCSIPERR "); | 2613 | seq_puts(m, "SCSIPERR "); |
2617 | if (s & PHASECHG) | 2614 | if (s & PHASECHG) |
2618 | SPRINTF("PHASECHG "); | 2615 | seq_puts(m, "PHASECHG "); |
2619 | if (s & REQINIT) | 2616 | if (s & REQINIT) |
2620 | SPRINTF("REQINIT "); | 2617 | seq_puts(m, "REQINIT "); |
2621 | SPRINTF("); "); | 2618 | seq_puts(m, "); "); |
2622 | 2619 | ||
2623 | 2620 | ||
2624 | SPRINTF("SSTAT( "); | 2621 | seq_puts(m, "SSTAT( "); |
2625 | 2622 | ||
2626 | s = GETPORT(SSTAT0) & GETPORT(SIMODE0); | 2623 | s = GETPORT(SSTAT0) & GETPORT(SIMODE0); |
2627 | 2624 | ||
2628 | if (s & TARGET) | 2625 | if (s & TARGET) |
2629 | SPRINTF("TARGET "); | 2626 | seq_puts(m, "TARGET "); |
2630 | if (s & SELDO) | 2627 | if (s & SELDO) |
2631 | SPRINTF("SELDO "); | 2628 | seq_puts(m, "SELDO "); |
2632 | if (s & SELDI) | 2629 | if (s & SELDI) |
2633 | SPRINTF("SELDI "); | 2630 | seq_puts(m, "SELDI "); |
2634 | if (s & SELINGO) | 2631 | if (s & SELINGO) |
2635 | SPRINTF("SELINGO "); | 2632 | seq_puts(m, "SELINGO "); |
2636 | if (s & SWRAP) | 2633 | if (s & SWRAP) |
2637 | SPRINTF("SWRAP "); | 2634 | seq_puts(m, "SWRAP "); |
2638 | if (s & SDONE) | 2635 | if (s & SDONE) |
2639 | SPRINTF("SDONE "); | 2636 | seq_puts(m, "SDONE "); |
2640 | if (s & SPIORDY) | 2637 | if (s & SPIORDY) |
2641 | SPRINTF("SPIORDY "); | 2638 | seq_puts(m, "SPIORDY "); |
2642 | if (s & DMADONE) | 2639 | if (s & DMADONE) |
2643 | SPRINTF("DMADONE "); | 2640 | seq_puts(m, "DMADONE "); |
2644 | 2641 | ||
2645 | s = GETPORT(SSTAT1) & GETPORT(SIMODE1); | 2642 | s = GETPORT(SSTAT1) & GETPORT(SIMODE1); |
2646 | 2643 | ||
2647 | if (s & SELTO) | 2644 | if (s & SELTO) |
2648 | SPRINTF("SELTO "); | 2645 | seq_puts(m, "SELTO "); |
2649 | if (s & ATNTARG) | 2646 | if (s & ATNTARG) |
2650 | SPRINTF("ATNTARG "); | 2647 | seq_puts(m, "ATNTARG "); |
2651 | if (s & SCSIRSTI) | 2648 | if (s & SCSIRSTI) |
2652 | SPRINTF("SCSIRSTI "); | 2649 | seq_puts(m, "SCSIRSTI "); |
2653 | if (s & PHASEMIS) | 2650 | if (s & PHASEMIS) |
2654 | SPRINTF("PHASEMIS "); | 2651 | seq_puts(m, "PHASEMIS "); |
2655 | if (s & BUSFREE) | 2652 | if (s & BUSFREE) |
2656 | SPRINTF("BUSFREE "); | 2653 | seq_puts(m, "BUSFREE "); |
2657 | if (s & SCSIPERR) | 2654 | if (s & SCSIPERR) |
2658 | SPRINTF("SCSIPERR "); | 2655 | seq_puts(m, "SCSIPERR "); |
2659 | if (s & PHASECHG) | 2656 | if (s & PHASECHG) |
2660 | SPRINTF("PHASECHG "); | 2657 | seq_puts(m, "PHASECHG "); |
2661 | if (s & REQINIT) | 2658 | if (s & REQINIT) |
2662 | SPRINTF("REQINIT "); | 2659 | seq_puts(m, "REQINIT "); |
2663 | SPRINTF("); "); | 2660 | seq_puts(m, "); "); |
2664 | 2661 | ||
2665 | SPRINTF("SXFRCTL0( "); | 2662 | seq_puts(m, "SXFRCTL0( "); |
2666 | 2663 | ||
2667 | s = GETPORT(SXFRCTL0); | 2664 | s = GETPORT(SXFRCTL0); |
2668 | if (s & SCSIEN) | 2665 | if (s & SCSIEN) |
2669 | SPRINTF("SCSIEN "); | 2666 | seq_puts(m, "SCSIEN "); |
2670 | if (s & DMAEN) | 2667 | if (s & DMAEN) |
2671 | SPRINTF("DMAEN "); | 2668 | seq_puts(m, "DMAEN "); |
2672 | if (s & CH1) | 2669 | if (s & CH1) |
2673 | SPRINTF("CH1 "); | 2670 | seq_puts(m, "CH1 "); |
2674 | if (s & CLRSTCNT) | 2671 | if (s & CLRSTCNT) |
2675 | SPRINTF("CLRSTCNT "); | 2672 | seq_puts(m, "CLRSTCNT "); |
2676 | if (s & SPIOEN) | 2673 | if (s & SPIOEN) |
2677 | SPRINTF("SPIOEN "); | 2674 | seq_puts(m, "SPIOEN "); |
2678 | if (s & CLRCH1) | 2675 | if (s & CLRCH1) |
2679 | SPRINTF("CLRCH1 "); | 2676 | seq_puts(m, "CLRCH1 "); |
2680 | SPRINTF("); "); | 2677 | seq_puts(m, "); "); |
2681 | 2678 | ||
2682 | SPRINTF("SIGNAL( "); | 2679 | seq_puts(m, "SIGNAL( "); |
2683 | 2680 | ||
2684 | s = GETPORT(SCSISIG); | 2681 | s = GETPORT(SCSISIG); |
2685 | if (s & SIG_ATNI) | 2682 | if (s & SIG_ATNI) |
2686 | SPRINTF("ATNI "); | 2683 | seq_puts(m, "ATNI "); |
2687 | if (s & SIG_SELI) | 2684 | if (s & SIG_SELI) |
2688 | SPRINTF("SELI "); | 2685 | seq_puts(m, "SELI "); |
2689 | if (s & SIG_BSYI) | 2686 | if (s & SIG_BSYI) |
2690 | SPRINTF("BSYI "); | 2687 | seq_puts(m, "BSYI "); |
2691 | if (s & SIG_REQI) | 2688 | if (s & SIG_REQI) |
2692 | SPRINTF("REQI "); | 2689 | seq_puts(m, "REQI "); |
2693 | if (s & SIG_ACKI) | 2690 | if (s & SIG_ACKI) |
2694 | SPRINTF("ACKI "); | 2691 | seq_puts(m, "ACKI "); |
2695 | SPRINTF("); "); | 2692 | seq_puts(m, "); "); |
2696 | 2693 | ||
2697 | SPRINTF("SELID(%02x), ", GETPORT(SELID)); | 2694 | seq_printf(m, "SELID(%02x), ", GETPORT(SELID)); |
2698 | 2695 | ||
2699 | SPRINTF("STCNT(%d), ", GETSTCNT()); | 2696 | seq_printf(m, "STCNT(%d), ", GETSTCNT()); |
2700 | 2697 | ||
2701 | SPRINTF("SSTAT2( "); | 2698 | seq_puts(m, "SSTAT2( "); |
2702 | 2699 | ||
2703 | s = GETPORT(SSTAT2); | 2700 | s = GETPORT(SSTAT2); |
2704 | if (s & SOFFSET) | 2701 | if (s & SOFFSET) |
2705 | SPRINTF("SOFFSET "); | 2702 | seq_puts(m, "SOFFSET "); |
2706 | if (s & SEMPTY) | 2703 | if (s & SEMPTY) |
2707 | SPRINTF("SEMPTY "); | 2704 | seq_puts(m, "SEMPTY "); |
2708 | if (s & SFULL) | 2705 | if (s & SFULL) |
2709 | SPRINTF("SFULL "); | 2706 | seq_puts(m, "SFULL "); |
2710 | SPRINTF("); SFCNT (%d); ", s & (SFULL | SFCNT)); | 2707 | seq_printf(m, "); SFCNT (%d); ", s & (SFULL | SFCNT)); |
2711 | 2708 | ||
2712 | s = GETPORT(SSTAT3); | 2709 | s = GETPORT(SSTAT3); |
2713 | SPRINTF("SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f); | 2710 | seq_printf(m, "SCSICNT (%d), OFFCNT(%d), ", (s & 0xf0) >> 4, s & 0x0f); |
2714 | 2711 | ||
2715 | SPRINTF("SSTAT4( "); | 2712 | seq_puts(m, "SSTAT4( "); |
2716 | s = GETPORT(SSTAT4); | 2713 | s = GETPORT(SSTAT4); |
2717 | if (s & SYNCERR) | 2714 | if (s & SYNCERR) |
2718 | SPRINTF("SYNCERR "); | 2715 | seq_puts(m, "SYNCERR "); |
2719 | if (s & FWERR) | 2716 | if (s & FWERR) |
2720 | SPRINTF("FWERR "); | 2717 | seq_puts(m, "FWERR "); |
2721 | if (s & FRERR) | 2718 | if (s & FRERR) |
2722 | SPRINTF("FRERR "); | 2719 | seq_puts(m, "FRERR "); |
2723 | SPRINTF("); "); | 2720 | seq_puts(m, "); "); |
2724 | 2721 | ||
2725 | SPRINTF("DMACNTRL0( "); | 2722 | seq_puts(m, "DMACNTRL0( "); |
2726 | s = GETPORT(DMACNTRL0); | 2723 | s = GETPORT(DMACNTRL0); |
2727 | SPRINTF("%s ", s & _8BIT ? "8BIT" : "16BIT"); | 2724 | seq_printf(m, "%s ", s & _8BIT ? "8BIT" : "16BIT"); |
2728 | SPRINTF("%s ", s & DMA ? "DMA" : "PIO"); | 2725 | seq_printf(m, "%s ", s & DMA ? "DMA" : "PIO"); |
2729 | SPRINTF("%s ", s & WRITE_READ ? "WRITE" : "READ"); | 2726 | seq_printf(m, "%s ", s & WRITE_READ ? "WRITE" : "READ"); |
2730 | if (s & ENDMA) | 2727 | if (s & ENDMA) |
2731 | SPRINTF("ENDMA "); | 2728 | seq_puts(m, "ENDMA "); |
2732 | if (s & INTEN) | 2729 | if (s & INTEN) |
2733 | SPRINTF("INTEN "); | 2730 | seq_puts(m, "INTEN "); |
2734 | if (s & RSTFIFO) | 2731 | if (s & RSTFIFO) |
2735 | SPRINTF("RSTFIFO "); | 2732 | seq_puts(m, "RSTFIFO "); |
2736 | if (s & SWINT) | 2733 | if (s & SWINT) |
2737 | SPRINTF("SWINT "); | 2734 | seq_puts(m, "SWINT "); |
2738 | SPRINTF("); "); | 2735 | seq_puts(m, "); "); |
2739 | 2736 | ||
2740 | SPRINTF("DMASTAT( "); | 2737 | seq_puts(m, "DMASTAT( "); |
2741 | s = GETPORT(DMASTAT); | 2738 | s = GETPORT(DMASTAT); |
2742 | if (s & ATDONE) | 2739 | if (s & ATDONE) |
2743 | SPRINTF("ATDONE "); | 2740 | seq_puts(m, "ATDONE "); |
2744 | if (s & WORDRDY) | 2741 | if (s & WORDRDY) |
2745 | SPRINTF("WORDRDY "); | 2742 | seq_puts(m, "WORDRDY "); |
2746 | if (s & DFIFOFULL) | 2743 | if (s & DFIFOFULL) |
2747 | SPRINTF("DFIFOFULL "); | 2744 | seq_puts(m, "DFIFOFULL "); |
2748 | if (s & DFIFOEMP) | 2745 | if (s & DFIFOEMP) |
2749 | SPRINTF("DFIFOEMP "); | 2746 | seq_puts(m, "DFIFOEMP "); |
2750 | SPRINTF(")\n"); | 2747 | seq_puts(m, ")\n"); |
2751 | 2748 | ||
2752 | SPRINTF("enabled interrupts( "); | 2749 | seq_puts(m, "enabled interrupts( "); |
2753 | 2750 | ||
2754 | s = GETPORT(SIMODE0); | 2751 | s = GETPORT(SIMODE0); |
2755 | if (s & ENSELDO) | 2752 | if (s & ENSELDO) |
2756 | SPRINTF("ENSELDO "); | 2753 | seq_puts(m, "ENSELDO "); |
2757 | if (s & ENSELDI) | 2754 | if (s & ENSELDI) |
2758 | SPRINTF("ENSELDI "); | 2755 | seq_puts(m, "ENSELDI "); |
2759 | if (s & ENSELINGO) | 2756 | if (s & ENSELINGO) |
2760 | SPRINTF("ENSELINGO "); | 2757 | seq_puts(m, "ENSELINGO "); |
2761 | if (s & ENSWRAP) | 2758 | if (s & ENSWRAP) |
2762 | SPRINTF("ENSWRAP "); | 2759 | seq_puts(m, "ENSWRAP "); |
2763 | if (s & ENSDONE) | 2760 | if (s & ENSDONE) |
2764 | SPRINTF("ENSDONE "); | 2761 | seq_puts(m, "ENSDONE "); |
2765 | if (s & ENSPIORDY) | 2762 | if (s & ENSPIORDY) |
2766 | SPRINTF("ENSPIORDY "); | 2763 | seq_puts(m, "ENSPIORDY "); |
2767 | if (s & ENDMADONE) | 2764 | if (s & ENDMADONE) |
2768 | SPRINTF("ENDMADONE "); | 2765 | seq_puts(m, "ENDMADONE "); |
2769 | 2766 | ||
2770 | s = GETPORT(SIMODE1); | 2767 | s = GETPORT(SIMODE1); |
2771 | if (s & ENSELTIMO) | 2768 | if (s & ENSELTIMO) |
2772 | SPRINTF("ENSELTIMO "); | 2769 | seq_puts(m, "ENSELTIMO "); |
2773 | if (s & ENATNTARG) | 2770 | if (s & ENATNTARG) |
2774 | SPRINTF("ENATNTARG "); | 2771 | seq_puts(m, "ENATNTARG "); |
2775 | if (s & ENPHASEMIS) | 2772 | if (s & ENPHASEMIS) |
2776 | SPRINTF("ENPHASEMIS "); | 2773 | seq_puts(m, "ENPHASEMIS "); |
2777 | if (s & ENBUSFREE) | 2774 | if (s & ENBUSFREE) |
2778 | SPRINTF("ENBUSFREE "); | 2775 | seq_puts(m, "ENBUSFREE "); |
2779 | if (s & ENSCSIPERR) | 2776 | if (s & ENSCSIPERR) |
2780 | SPRINTF("ENSCSIPERR "); | 2777 | seq_puts(m, "ENSCSIPERR "); |
2781 | if (s & ENPHASECHG) | 2778 | if (s & ENPHASECHG) |
2782 | SPRINTF("ENPHASECHG "); | 2779 | seq_puts(m, "ENPHASECHG "); |
2783 | if (s & ENREQINIT) | 2780 | if (s & ENREQINIT) |
2784 | SPRINTF("ENREQINIT "); | 2781 | seq_puts(m, "ENREQINIT "); |
2785 | SPRINTF(")\n"); | 2782 | seq_puts(m, ")\n"); |
2786 | } | 2783 | } |
2787 | 2784 | ||
2788 | static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) | 2785 | static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) |
@@ -2825,56 +2822,56 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) | |||
2825 | Scsi_Cmnd *ptr; | 2822 | Scsi_Cmnd *ptr; |
2826 | unsigned long flags; | 2823 | unsigned long flags; |
2827 | 2824 | ||
2828 | SPRINTF(AHA152X_REVID "\n"); | 2825 | seq_puts(m, AHA152X_REVID "\n"); |
2829 | 2826 | ||
2830 | SPRINTF("ioports 0x%04lx to 0x%04lx\n", | 2827 | seq_printf(m, "ioports 0x%04lx to 0x%04lx\n", |
2831 | shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1); | 2828 | shpnt->io_port, shpnt->io_port + shpnt->n_io_port - 1); |
2832 | SPRINTF("interrupt 0x%02x\n", shpnt->irq); | 2829 | seq_printf(m, "interrupt 0x%02x\n", shpnt->irq); |
2833 | SPRINTF("disconnection/reconnection %s\n", | 2830 | seq_printf(m, "disconnection/reconnection %s\n", |
2834 | RECONNECT ? "enabled" : "disabled"); | 2831 | RECONNECT ? "enabled" : "disabled"); |
2835 | SPRINTF("parity checking %s\n", | 2832 | seq_printf(m, "parity checking %s\n", |
2836 | PARITY ? "enabled" : "disabled"); | 2833 | PARITY ? "enabled" : "disabled"); |
2837 | SPRINTF("synchronous transfers %s\n", | 2834 | seq_printf(m, "synchronous transfers %s\n", |
2838 | SYNCHRONOUS ? "enabled" : "disabled"); | 2835 | SYNCHRONOUS ? "enabled" : "disabled"); |
2839 | SPRINTF("%d commands currently queued\n", HOSTDATA(shpnt)->commands); | 2836 | seq_printf(m, "%d commands currently queued\n", HOSTDATA(shpnt)->commands); |
2840 | 2837 | ||
2841 | if(SYNCHRONOUS) { | 2838 | if(SYNCHRONOUS) { |
2842 | SPRINTF("synchronously operating targets (tick=50 ns):\n"); | 2839 | seq_puts(m, "synchronously operating targets (tick=50 ns):\n"); |
2843 | for (i = 0; i < 8; i++) | 2840 | for (i = 0; i < 8; i++) |
2844 | if (HOSTDATA(shpnt)->syncrate[i] & 0x7f) | 2841 | if (HOSTDATA(shpnt)->syncrate[i] & 0x7f) |
2845 | SPRINTF("target %d: period %dT/%dns; req/ack offset %d\n", | 2842 | seq_printf(m, "target %d: period %dT/%dns; req/ack offset %d\n", |
2846 | i, | 2843 | i, |
2847 | (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2), | 2844 | (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2), |
2848 | (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50, | 2845 | (((HOSTDATA(shpnt)->syncrate[i] & 0x70) >> 4) + 2) * 50, |
2849 | HOSTDATA(shpnt)->syncrate[i] & 0x0f); | 2846 | HOSTDATA(shpnt)->syncrate[i] & 0x0f); |
2850 | } | 2847 | } |
2851 | SPRINTF("\nqueue status:\n"); | 2848 | seq_puts(m, "\nqueue status:\n"); |
2852 | DO_LOCK(flags); | 2849 | DO_LOCK(flags); |
2853 | if (ISSUE_SC) { | 2850 | if (ISSUE_SC) { |
2854 | SPRINTF("not yet issued commands:\n"); | 2851 | seq_puts(m, "not yet issued commands:\n"); |
2855 | for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) | 2852 | for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) |
2856 | get_command(m, ptr); | 2853 | get_command(m, ptr); |
2857 | } else | 2854 | } else |
2858 | SPRINTF("no not yet issued commands\n"); | 2855 | seq_puts(m, "no not yet issued commands\n"); |
2859 | DO_UNLOCK(flags); | 2856 | DO_UNLOCK(flags); |
2860 | 2857 | ||
2861 | if (CURRENT_SC) { | 2858 | if (CURRENT_SC) { |
2862 | SPRINTF("current command:\n"); | 2859 | seq_puts(m, "current command:\n"); |
2863 | get_command(m, CURRENT_SC); | 2860 | get_command(m, CURRENT_SC); |
2864 | } else | 2861 | } else |
2865 | SPRINTF("no current command\n"); | 2862 | seq_puts(m, "no current command\n"); |
2866 | 2863 | ||
2867 | if (DISCONNECTED_SC) { | 2864 | if (DISCONNECTED_SC) { |
2868 | SPRINTF("disconnected commands:\n"); | 2865 | seq_puts(m, "disconnected commands:\n"); |
2869 | for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr)) | 2866 | for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr)) |
2870 | get_command(m, ptr); | 2867 | get_command(m, ptr); |
2871 | } else | 2868 | } else |
2872 | SPRINTF("no disconnected commands\n"); | 2869 | seq_puts(m, "no disconnected commands\n"); |
2873 | 2870 | ||
2874 | get_ports(m, shpnt); | 2871 | get_ports(m, shpnt); |
2875 | 2872 | ||
2876 | #if defined(AHA152X_STAT) | 2873 | #if defined(AHA152X_STAT) |
2877 | SPRINTF("statistics:\n" | 2874 | seq_printf(m, "statistics:\n" |
2878 | "total commands: %d\n" | 2875 | "total commands: %d\n" |
2879 | "disconnections: %d\n" | 2876 | "disconnections: %d\n" |
2880 | "busfree with check condition: %d\n" | 2877 | "busfree with check condition: %d\n" |
@@ -2894,7 +2891,7 @@ static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) | |||
2894 | HOSTDATA(shpnt)->busfree_without_done_command, | 2891 | HOSTDATA(shpnt)->busfree_without_done_command, |
2895 | HOSTDATA(shpnt)->busfree_without_any_action); | 2892 | HOSTDATA(shpnt)->busfree_without_any_action); |
2896 | for(i=0; i<maxstate; i++) { | 2893 | for(i=0; i<maxstate; i++) { |
2897 | SPRINTF("%-10s %-12d %-12d %-12ld\n", | 2894 | seq_printf(m, "%-10s %-12d %-12d %-12ld\n", |
2898 | states[i].name, | 2895 | states[i].name, |
2899 | HOSTDATA(shpnt)->count_trans[i], | 2896 | HOSTDATA(shpnt)->count_trans[i], |
2900 | HOSTDATA(shpnt)->count[i], | 2897 | HOSTDATA(shpnt)->count[i], |
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c index 27dbfccea774..add2da581d66 100644 --- a/drivers/scsi/aic7xxx/aic79xx_proc.c +++ b/drivers/scsi/aic7xxx/aic79xx_proc.c | |||
@@ -97,7 +97,7 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo) | |||
97 | u_int mb; | 97 | u_int mb; |
98 | 98 | ||
99 | if (tinfo->period == AHD_PERIOD_UNKNOWN) { | 99 | if (tinfo->period == AHD_PERIOD_UNKNOWN) { |
100 | seq_printf(m, "Renegotiation Pending\n"); | 100 | seq_puts(m, "Renegotiation Pending\n"); |
101 | return; | 101 | return; |
102 | } | 102 | } |
103 | speed = 3300; | 103 | speed = 3300; |
@@ -119,40 +119,38 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo) | |||
119 | printed_options = 0; | 119 | printed_options = 0; |
120 | seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000); | 120 | seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000); |
121 | if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { | 121 | if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { |
122 | seq_printf(m, " RDSTRM"); | 122 | seq_puts(m, " RDSTRM"); |
123 | printed_options++; | 123 | printed_options++; |
124 | } | 124 | } |
125 | if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { | 125 | if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { |
126 | seq_printf(m, "%s", printed_options ? "|DT" : " DT"); | 126 | seq_puts(m, printed_options ? "|DT" : " DT"); |
127 | printed_options++; | 127 | printed_options++; |
128 | } | 128 | } |
129 | if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { | 129 | if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { |
130 | seq_printf(m, "%s", printed_options ? "|IU" : " IU"); | 130 | seq_puts(m, printed_options ? "|IU" : " IU"); |
131 | printed_options++; | 131 | printed_options++; |
132 | } | 132 | } |
133 | if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) { | 133 | if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) { |
134 | seq_printf(m, "%s", | 134 | seq_puts(m, printed_options ? "|RTI" : " RTI"); |
135 | printed_options ? "|RTI" : " RTI"); | ||
136 | printed_options++; | 135 | printed_options++; |
137 | } | 136 | } |
138 | if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { | 137 | if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { |
139 | seq_printf(m, "%s", | 138 | seq_puts(m, printed_options ? "|QAS" : " QAS"); |
140 | printed_options ? "|QAS" : " QAS"); | ||
141 | printed_options++; | 139 | printed_options++; |
142 | } | 140 | } |
143 | } | 141 | } |
144 | 142 | ||
145 | if (tinfo->width > 0) { | 143 | if (tinfo->width > 0) { |
146 | if (freq != 0) { | 144 | if (freq != 0) { |
147 | seq_printf(m, ", "); | 145 | seq_puts(m, ", "); |
148 | } else { | 146 | } else { |
149 | seq_printf(m, " ("); | 147 | seq_puts(m, " ("); |
150 | } | 148 | } |
151 | seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); | 149 | seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); |
152 | } else if (freq != 0) { | 150 | } else if (freq != 0) { |
153 | seq_printf(m, ")"); | 151 | seq_putc(m, ')'); |
154 | } | 152 | } |
155 | seq_printf(m, "\n"); | 153 | seq_putc(m, '\n'); |
156 | } | 154 | } |
157 | 155 | ||
158 | static void | 156 | static void |
@@ -167,15 +165,15 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m, | |||
167 | tinfo = ahd_fetch_transinfo(ahd, channel, our_id, | 165 | tinfo = ahd_fetch_transinfo(ahd, channel, our_id, |
168 | target_id, &tstate); | 166 | target_id, &tstate); |
169 | seq_printf(m, "Target %d Negotiation Settings\n", target_id); | 167 | seq_printf(m, "Target %d Negotiation Settings\n", target_id); |
170 | seq_printf(m, "\tUser: "); | 168 | seq_puts(m, "\tUser: "); |
171 | ahd_format_transinfo(m, &tinfo->user); | 169 | ahd_format_transinfo(m, &tinfo->user); |
172 | starget = ahd->platform_data->starget[target_id]; | 170 | starget = ahd->platform_data->starget[target_id]; |
173 | if (starget == NULL) | 171 | if (starget == NULL) |
174 | return; | 172 | return; |
175 | 173 | ||
176 | seq_printf(m, "\tGoal: "); | 174 | seq_puts(m, "\tGoal: "); |
177 | ahd_format_transinfo(m, &tinfo->goal); | 175 | ahd_format_transinfo(m, &tinfo->goal); |
178 | seq_printf(m, "\tCurr: "); | 176 | seq_puts(m, "\tCurr: "); |
179 | ahd_format_transinfo(m, &tinfo->curr); | 177 | ahd_format_transinfo(m, &tinfo->curr); |
180 | 178 | ||
181 | for (lun = 0; lun < AHD_NUM_LUNS; lun++) { | 179 | for (lun = 0; lun < AHD_NUM_LUNS; lun++) { |
@@ -291,19 +289,19 @@ ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) | |||
291 | max_targ = 16; | 289 | max_targ = 16; |
292 | 290 | ||
293 | if (ahd->seep_config == NULL) | 291 | if (ahd->seep_config == NULL) |
294 | seq_printf(m, "No Serial EEPROM\n"); | 292 | seq_puts(m, "No Serial EEPROM\n"); |
295 | else { | 293 | else { |
296 | seq_printf(m, "Serial EEPROM:\n"); | 294 | seq_puts(m, "Serial EEPROM:\n"); |
297 | for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) { | 295 | for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) { |
298 | if (((i % 8) == 0) && (i != 0)) { | 296 | if (((i % 8) == 0) && (i != 0)) { |
299 | seq_printf(m, "\n"); | 297 | seq_putc(m, '\n'); |
300 | } | 298 | } |
301 | seq_printf(m, "0x%.4x ", | 299 | seq_printf(m, "0x%.4x ", |
302 | ((uint16_t*)ahd->seep_config)[i]); | 300 | ((uint16_t*)ahd->seep_config)[i]); |
303 | } | 301 | } |
304 | seq_printf(m, "\n"); | 302 | seq_putc(m, '\n'); |
305 | } | 303 | } |
306 | seq_printf(m, "\n"); | 304 | seq_putc(m, '\n'); |
307 | 305 | ||
308 | if ((ahd->features & AHD_WIDE) == 0) | 306 | if ((ahd->features & AHD_WIDE) == 0) |
309 | max_targ = 8; | 307 | max_targ = 8; |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c index 64eec6c07a83..18459605d991 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_proc.c +++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c | |||
@@ -119,15 +119,15 @@ ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo) | |||
119 | 119 | ||
120 | if (tinfo->width > 0) { | 120 | if (tinfo->width > 0) { |
121 | if (freq != 0) { | 121 | if (freq != 0) { |
122 | seq_printf(m, ", "); | 122 | seq_puts(m, ", "); |
123 | } else { | 123 | } else { |
124 | seq_printf(m, " ("); | 124 | seq_puts(m, " ("); |
125 | } | 125 | } |
126 | seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); | 126 | seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); |
127 | } else if (freq != 0) { | 127 | } else if (freq != 0) { |
128 | seq_printf(m, ")"); | 128 | seq_putc(m, ')'); |
129 | } | 129 | } |
130 | seq_printf(m, "\n"); | 130 | seq_putc(m, '\n'); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void | 133 | static void |
@@ -145,15 +145,15 @@ ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m, | |||
145 | if ((ahc->features & AHC_TWIN) != 0) | 145 | if ((ahc->features & AHC_TWIN) != 0) |
146 | seq_printf(m, "Channel %c ", channel); | 146 | seq_printf(m, "Channel %c ", channel); |
147 | seq_printf(m, "Target %d Negotiation Settings\n", target_id); | 147 | seq_printf(m, "Target %d Negotiation Settings\n", target_id); |
148 | seq_printf(m, "\tUser: "); | 148 | seq_puts(m, "\tUser: "); |
149 | ahc_format_transinfo(m, &tinfo->user); | 149 | ahc_format_transinfo(m, &tinfo->user); |
150 | starget = ahc->platform_data->starget[target_offset]; | 150 | starget = ahc->platform_data->starget[target_offset]; |
151 | if (!starget) | 151 | if (!starget) |
152 | return; | 152 | return; |
153 | 153 | ||
154 | seq_printf(m, "\tGoal: "); | 154 | seq_puts(m, "\tGoal: "); |
155 | ahc_format_transinfo(m, &tinfo->goal); | 155 | ahc_format_transinfo(m, &tinfo->goal); |
156 | seq_printf(m, "\tCurr: "); | 156 | seq_puts(m, "\tCurr: "); |
157 | ahc_format_transinfo(m, &tinfo->curr); | 157 | ahc_format_transinfo(m, &tinfo->curr); |
158 | 158 | ||
159 | for (lun = 0; lun < AHC_NUM_LUNS; lun++) { | 159 | for (lun = 0; lun < AHC_NUM_LUNS; lun++) { |
@@ -303,19 +303,19 @@ ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) | |||
303 | 303 | ||
304 | 304 | ||
305 | if (ahc->seep_config == NULL) | 305 | if (ahc->seep_config == NULL) |
306 | seq_printf(m, "No Serial EEPROM\n"); | 306 | seq_puts(m, "No Serial EEPROM\n"); |
307 | else { | 307 | else { |
308 | seq_printf(m, "Serial EEPROM:\n"); | 308 | seq_puts(m, "Serial EEPROM:\n"); |
309 | for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) { | 309 | for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) { |
310 | if (((i % 8) == 0) && (i != 0)) { | 310 | if (((i % 8) == 0) && (i != 0)) { |
311 | seq_printf(m, "\n"); | 311 | seq_putc(m, '\n'); |
312 | } | 312 | } |
313 | seq_printf(m, "0x%.4x ", | 313 | seq_printf(m, "0x%.4x ", |
314 | ((uint16_t*)ahc->seep_config)[i]); | 314 | ((uint16_t*)ahc->seep_config)[i]); |
315 | } | 315 | } |
316 | seq_printf(m, "\n"); | 316 | seq_putc(m, '\n'); |
317 | } | 317 | } |
318 | seq_printf(m, "\n"); | 318 | seq_putc(m, '\n'); |
319 | 319 | ||
320 | max_targ = 16; | 320 | max_targ = 16; |
321 | if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) | 321 | if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) |
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index e64c3af7c1a0..decdc71b6b86 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c | |||
@@ -2990,7 +2990,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m) | |||
2990 | struct fas216_device *dev; | 2990 | struct fas216_device *dev; |
2991 | struct scsi_device *scd; | 2991 | struct scsi_device *scd; |
2992 | 2992 | ||
2993 | seq_printf(m, "Device/Lun TaggedQ Parity Sync\n"); | 2993 | seq_puts(m, "Device/Lun TaggedQ Parity Sync\n"); |
2994 | 2994 | ||
2995 | shost_for_each_device(scd, info->host) { | 2995 | shost_for_each_device(scd, info->host) { |
2996 | dev = &info->device[scd->id]; | 2996 | dev = &info->device[scd->id]; |
@@ -3000,7 +3000,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m) | |||
3000 | scd->simple_tags ? "en" : "dis", | 3000 | scd->simple_tags ? "en" : "dis", |
3001 | scd->current_tag); | 3001 | scd->current_tag); |
3002 | else | 3002 | else |
3003 | seq_printf(m, "unsupported "); | 3003 | seq_puts(m, "unsupported "); |
3004 | 3004 | ||
3005 | seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis"); | 3005 | seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis"); |
3006 | 3006 | ||
@@ -3008,7 +3008,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m) | |||
3008 | seq_printf(m, "offset %d, %d ns\n", | 3008 | seq_printf(m, "offset %d, %d ns\n", |
3009 | dev->sof, dev->period * 4); | 3009 | dev->sof, dev->period * 4); |
3010 | else | 3010 | else |
3011 | seq_printf(m, "async\n"); | 3011 | seq_puts(m, "async\n"); |
3012 | } | 3012 | } |
3013 | } | 3013 | } |
3014 | 3014 | ||
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 6daed6b386d4..a70255413e7f 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
@@ -711,12 +711,12 @@ static void show_Scsi_Cmnd(struct scsi_cmnd *cmd, struct seq_file *m) | |||
711 | unsigned char *command; | 711 | unsigned char *command; |
712 | seq_printf(m, "scsi%d: destination target %d, lun %llu\n", | 712 | seq_printf(m, "scsi%d: destination target %d, lun %llu\n", |
713 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 713 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
714 | seq_printf(m, " command = "); | 714 | seq_puts(m, " command = "); |
715 | command = cmd->cmnd; | 715 | command = cmd->cmnd; |
716 | seq_printf(m, "%2d (0x%02x)", command[0], command[0]); | 716 | seq_printf(m, "%2d (0x%02x)", command[0], command[0]); |
717 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) | 717 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) |
718 | seq_printf(m, " %02x", command[i]); | 718 | seq_printf(m, " %02x", command[i]); |
719 | seq_printf(m, "\n"); | 719 | seq_putc(m, '\n'); |
720 | } | 720 | } |
721 | 721 | ||
722 | static int __maybe_unused NCR5380_show_info(struct seq_file *m, | 722 | static int __maybe_unused NCR5380_show_info(struct seq_file *m, |
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index a795d81ef875..0836433e3a2d 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c | |||
@@ -3101,9 +3101,8 @@ static const char *atp870u_info(struct Scsi_Host *notused) | |||
3101 | 3101 | ||
3102 | static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) | 3102 | static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) |
3103 | { | 3103 | { |
3104 | seq_printf(m, "ACARD AEC-671X Driver Version: 2.6+ac\n"); | 3104 | seq_puts(m, "ACARD AEC-671X Driver Version: 2.6+ac\n\n" |
3105 | seq_printf(m, "\n"); | 3105 | "Adapter Configuration:\n"); |
3106 | seq_printf(m, "Adapter Configuration:\n"); | ||
3107 | seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port); | 3106 | seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port); |
3108 | seq_printf(m, " IRQ: %d\n", HBAptr->irq); | 3107 | seq_printf(m, " IRQ: %d\n", HBAptr->irq); |
3109 | return 0; | 3108 | return 0; |
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 6bac8a746ee2..0045742fab7d 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c | |||
@@ -194,16 +194,10 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len, | |||
194 | 194 | ||
195 | retry: | 195 | retry: |
196 | errno = 0; | 196 | errno = 0; |
197 | if (debug) { | ||
198 | DPRINTK("command: "); | ||
199 | __scsi_print_command(cmd, cmd_len); | ||
200 | } | ||
201 | |||
202 | result = scsi_execute_req(ch->device, cmd, direction, buffer, | 197 | result = scsi_execute_req(ch->device, cmd, direction, buffer, |
203 | buflength, &sshdr, timeout * HZ, | 198 | buflength, &sshdr, timeout * HZ, |
204 | MAX_RETRIES, NULL); | 199 | MAX_RETRIES, NULL); |
205 | 200 | ||
206 | DPRINTK("result: 0x%x\n",result); | ||
207 | if (driver_byte(result) & DRIVER_SENSE) { | 201 | if (driver_byte(result) & DRIVER_SENSE) { |
208 | if (debug) | 202 | if (debug) |
209 | scsi_print_sense_hdr(ch->device, ch->name, &sshdr); | 203 | scsi_print_sense_hdr(ch->device, ch->name, &sshdr); |
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index e2068a2621c4..fa09d4be2b53 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c | |||
@@ -18,14 +18,10 @@ | |||
18 | #include <scsi/scsi_eh.h> | 18 | #include <scsi/scsi_eh.h> |
19 | #include <scsi/scsi_dbg.h> | 19 | #include <scsi/scsi_dbg.h> |
20 | 20 | ||
21 | |||
22 | |||
23 | /* Commands with service actions that change the command name */ | 21 | /* Commands with service actions that change the command name */ |
24 | #define THIRD_PARTY_COPY_OUT 0x83 | 22 | #define THIRD_PARTY_COPY_OUT 0x83 |
25 | #define THIRD_PARTY_COPY_IN 0x84 | 23 | #define THIRD_PARTY_COPY_IN 0x84 |
26 | 24 | ||
27 | #define VENDOR_SPECIFIC_CDB 0xc0 | ||
28 | |||
29 | struct sa_name_list { | 25 | struct sa_name_list { |
30 | int opcode; | 26 | int opcode; |
31 | const struct value_name_pair *arr; | 27 | const struct value_name_pair *arr; |
@@ -37,7 +33,6 @@ struct value_name_pair { | |||
37 | const char * name; | 33 | const char * name; |
38 | }; | 34 | }; |
39 | 35 | ||
40 | #ifdef CONFIG_SCSI_CONSTANTS | ||
41 | static const char * cdb_byte0_names[] = { | 36 | static const char * cdb_byte0_names[] = { |
42 | /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", | 37 | /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", |
43 | /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, | 38 | /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, |
@@ -261,28 +256,8 @@ static struct sa_name_list sa_names_arr[] = { | |||
261 | {0, NULL, 0}, | 256 | {0, NULL, 0}, |
262 | }; | 257 | }; |
263 | 258 | ||
264 | #else /* ifndef CONFIG_SCSI_CONSTANTS */ | 259 | bool scsi_opcode_sa_name(int opcode, int service_action, |
265 | static const char *cdb_byte0_names[0]; | 260 | const char **cdb_name, const char **sa_name) |
266 | |||
267 | static struct sa_name_list sa_names_arr[] = { | ||
268 | {VARIABLE_LENGTH_CMD, NULL, 0}, | ||
269 | {MAINTENANCE_IN, NULL, 0}, | ||
270 | {MAINTENANCE_OUT, NULL, 0}, | ||
271 | {PERSISTENT_RESERVE_IN, NULL, 0}, | ||
272 | {PERSISTENT_RESERVE_OUT, NULL, 0}, | ||
273 | {SERVICE_ACTION_IN_12, NULL, 0}, | ||
274 | {SERVICE_ACTION_OUT_12, NULL, 0}, | ||
275 | {SERVICE_ACTION_BIDIRECTIONAL, NULL, 0}, | ||
276 | {SERVICE_ACTION_IN_16, NULL, 0}, | ||
277 | {SERVICE_ACTION_OUT_16, NULL, 0}, | ||
278 | {THIRD_PARTY_COPY_IN, NULL, 0}, | ||
279 | {THIRD_PARTY_COPY_OUT, NULL, 0}, | ||
280 | {0, NULL, 0}, | ||
281 | }; | ||
282 | #endif /* CONFIG_SCSI_CONSTANTS */ | ||
283 | |||
284 | static bool scsi_opcode_sa_name(int opcode, int service_action, | ||
285 | const char **cdb_name, const char **sa_name) | ||
286 | { | 261 | { |
287 | struct sa_name_list *sa_name_ptr; | 262 | struct sa_name_list *sa_name_ptr; |
288 | const struct value_name_pair *arr = NULL; | 263 | const struct value_name_pair *arr = NULL; |
@@ -315,76 +290,6 @@ static bool scsi_opcode_sa_name(int opcode, int service_action, | |||
315 | return true; | 290 | return true; |
316 | } | 291 | } |
317 | 292 | ||
318 | static void print_opcode_name(const unsigned char *cdbp, size_t cdb_len) | ||
319 | { | ||
320 | int sa, cdb0; | ||
321 | const char *cdb_name = NULL, *sa_name = NULL; | ||
322 | |||
323 | cdb0 = cdbp[0]; | ||
324 | if (cdb0 == VARIABLE_LENGTH_CMD) { | ||
325 | if (cdb_len < 10) { | ||
326 | printk("short variable length command, len=%zu", | ||
327 | cdb_len); | ||
328 | return; | ||
329 | } | ||
330 | sa = (cdbp[8] << 8) + cdbp[9]; | ||
331 | } else | ||
332 | sa = cdbp[1] & 0x1f; | ||
333 | |||
334 | if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) { | ||
335 | if (cdb_name) | ||
336 | printk("%s", cdb_name); | ||
337 | else if (cdb0 >= VENDOR_SPECIFIC_CDB) | ||
338 | printk("cdb[0]=0x%x (vendor)", cdb0); | ||
339 | else if (cdb0 >= 0x60 && cdb0 < 0x7e) | ||
340 | printk("cdb[0]=0x%x (reserved)", cdb0); | ||
341 | else | ||
342 | printk("cdb[0]=0x%x", cdb0); | ||
343 | } else { | ||
344 | if (sa_name) | ||
345 | printk("%s", sa_name); | ||
346 | else if (cdb_name) | ||
347 | printk("%s, sa=0x%x", cdb_name, sa); | ||
348 | else | ||
349 | printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); | ||
350 | } | ||
351 | } | ||
352 | |||
353 | void __scsi_print_command(const unsigned char *cdb, size_t cdb_len) | ||
354 | { | ||
355 | int k, len; | ||
356 | |||
357 | print_opcode_name(cdb, cdb_len); | ||
358 | len = scsi_command_size(cdb); | ||
359 | if (cdb_len < len) | ||
360 | len = cdb_len; | ||
361 | /* print out all bytes in cdb */ | ||
362 | for (k = 0; k < len; ++k) | ||
363 | printk(" %02x", cdb[k]); | ||
364 | printk("\n"); | ||
365 | } | ||
366 | EXPORT_SYMBOL(__scsi_print_command); | ||
367 | |||
368 | void scsi_print_command(struct scsi_cmnd *cmd) | ||
369 | { | ||
370 | int k; | ||
371 | |||
372 | if (cmd->cmnd == NULL) | ||
373 | return; | ||
374 | |||
375 | scmd_printk(KERN_INFO, cmd, "CDB: "); | ||
376 | print_opcode_name(cmd->cmnd, cmd->cmd_len); | ||
377 | |||
378 | /* print out all bytes in cdb */ | ||
379 | printk(":"); | ||
380 | for (k = 0; k < cmd->cmd_len; ++k) | ||
381 | printk(" %02x", cmd->cmnd[k]); | ||
382 | printk("\n"); | ||
383 | } | ||
384 | EXPORT_SYMBOL(scsi_print_command); | ||
385 | |||
386 | #ifdef CONFIG_SCSI_CONSTANTS | ||
387 | |||
388 | struct error_info { | 293 | struct error_info { |
389 | unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ | 294 | unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ |
390 | const char * text; | 295 | const char * text; |
@@ -392,7 +297,7 @@ struct error_info { | |||
392 | 297 | ||
393 | /* | 298 | /* |
394 | * The canonical list of T10 Additional Sense Codes is available at: | 299 | * The canonical list of T10 Additional Sense Codes is available at: |
395 | * http://www.t10.org/lists/asc-num.txt [most recent: 20130605] | 300 | * http://www.t10.org/lists/asc-num.txt [most recent: 20141221] |
396 | */ | 301 | */ |
397 | 302 | ||
398 | static const struct error_info additional[] = | 303 | static const struct error_info additional[] = |
@@ -421,6 +326,7 @@ static const struct error_info additional[] = | |||
421 | {0x001E, "Conflicting SA creation request"}, | 326 | {0x001E, "Conflicting SA creation request"}, |
422 | {0x001F, "Logical unit transitioning to another power condition"}, | 327 | {0x001F, "Logical unit transitioning to another power condition"}, |
423 | {0x0020, "Extended copy information available"}, | 328 | {0x0020, "Extended copy information available"}, |
329 | {0x0021, "Atomic command aborted due to ACA"}, | ||
424 | 330 | ||
425 | {0x0100, "No index/sector signal"}, | 331 | {0x0100, "No index/sector signal"}, |
426 | 332 | ||
@@ -446,6 +352,7 @@ static const struct error_info additional[] = | |||
446 | {0x040C, "Logical unit not accessible, target port in unavailable " | 352 | {0x040C, "Logical unit not accessible, target port in unavailable " |
447 | "state"}, | 353 | "state"}, |
448 | {0x040D, "Logical unit not ready, structure check required"}, | 354 | {0x040D, "Logical unit not ready, structure check required"}, |
355 | {0x040E, "Logical unit not ready, security session in progress"}, | ||
449 | {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, | 356 | {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, |
450 | {0x0411, "Logical unit not ready, notify (enable spinup) required"}, | 357 | {0x0411, "Logical unit not ready, notify (enable spinup) required"}, |
451 | {0x0412, "Logical unit not ready, offline"}, | 358 | {0x0412, "Logical unit not ready, offline"}, |
@@ -462,6 +369,11 @@ static const struct error_info additional[] = | |||
462 | {0x041C, "Logical unit not ready, additional power use not yet " | 369 | {0x041C, "Logical unit not ready, additional power use not yet " |
463 | "granted"}, | 370 | "granted"}, |
464 | {0x041D, "Logical unit not ready, configuration in progress"}, | 371 | {0x041D, "Logical unit not ready, configuration in progress"}, |
372 | {0x041E, "Logical unit not ready, microcode activation required"}, | ||
373 | {0x041F, "Logical unit not ready, microcode download required"}, | ||
374 | {0x0420, "Logical unit not ready, logical unit reset required"}, | ||
375 | {0x0421, "Logical unit not ready, hard reset required"}, | ||
376 | {0x0422, "Logical unit not ready, power cycle required"}, | ||
465 | 377 | ||
466 | {0x0500, "Logical unit does not respond to selection"}, | 378 | {0x0500, "Logical unit does not respond to selection"}, |
467 | 379 | ||
@@ -480,6 +392,7 @@ static const struct error_info additional[] = | |||
480 | {0x0902, "Focus servo failure"}, | 392 | {0x0902, "Focus servo failure"}, |
481 | {0x0903, "Spindle servo failure"}, | 393 | {0x0903, "Spindle servo failure"}, |
482 | {0x0904, "Head select fault"}, | 394 | {0x0904, "Head select fault"}, |
395 | {0x0905, "Vibration induced tracking error"}, | ||
483 | 396 | ||
484 | {0x0A00, "Error log overflow"}, | 397 | {0x0A00, "Error log overflow"}, |
485 | 398 | ||
@@ -510,6 +423,7 @@ static const struct error_info additional[] = | |||
510 | {0x0C0D, "Write error - not enough unsolicited data"}, | 423 | {0x0C0D, "Write error - not enough unsolicited data"}, |
511 | {0x0C0E, "Multiple write errors"}, | 424 | {0x0C0E, "Multiple write errors"}, |
512 | {0x0C0F, "Defects in error window"}, | 425 | {0x0C0F, "Defects in error window"}, |
426 | {0x0C10, "Incomplete multiple atomic write operations"}, | ||
513 | 427 | ||
514 | {0x0D00, "Error detected by third party temporary initiator"}, | 428 | {0x0D00, "Error detected by third party temporary initiator"}, |
515 | {0x0D01, "Third party device failure"}, | 429 | {0x0D01, "Third party device failure"}, |
@@ -635,6 +549,10 @@ static const struct error_info additional[] = | |||
635 | {0x2101, "Invalid element address"}, | 549 | {0x2101, "Invalid element address"}, |
636 | {0x2102, "Invalid address for write"}, | 550 | {0x2102, "Invalid address for write"}, |
637 | {0x2103, "Invalid write crossing layer jump"}, | 551 | {0x2103, "Invalid write crossing layer jump"}, |
552 | {0x2104, "Unaligned write command"}, | ||
553 | {0x2105, "Write boundary violation"}, | ||
554 | {0x2106, "Attempt to read invalid data"}, | ||
555 | {0x2107, "Read boundary violation"}, | ||
638 | 556 | ||
639 | {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, | 557 | {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, |
640 | 558 | ||
@@ -691,6 +609,7 @@ static const struct error_info additional[] = | |||
691 | {0x2705, "Permanent write protect"}, | 609 | {0x2705, "Permanent write protect"}, |
692 | {0x2706, "Conditional write protect"}, | 610 | {0x2706, "Conditional write protect"}, |
693 | {0x2707, "Space allocation failed write protect"}, | 611 | {0x2707, "Space allocation failed write protect"}, |
612 | {0x2708, "Zone is read only"}, | ||
694 | 613 | ||
695 | {0x2800, "Not ready to ready change, medium may have changed"}, | 614 | {0x2800, "Not ready to ready change, medium may have changed"}, |
696 | {0x2801, "Import or export element accessed"}, | 615 | {0x2801, "Import or export element accessed"}, |
@@ -743,10 +662,15 @@ static const struct error_info additional[] = | |||
743 | {0x2C0A, "Partition or collection contains user objects"}, | 662 | {0x2C0A, "Partition or collection contains user objects"}, |
744 | {0x2C0B, "Not reserved"}, | 663 | {0x2C0B, "Not reserved"}, |
745 | {0x2C0C, "Orwrite generation does not match"}, | 664 | {0x2C0C, "Orwrite generation does not match"}, |
665 | {0x2C0D, "Reset write pointer not allowed"}, | ||
666 | {0x2C0E, "Zone is offline"}, | ||
746 | 667 | ||
747 | {0x2D00, "Overwrite error on update in place"}, | 668 | {0x2D00, "Overwrite error on update in place"}, |
748 | 669 | ||
749 | {0x2E00, "Insufficient time for operation"}, | 670 | {0x2E00, "Insufficient time for operation"}, |
671 | {0x2E01, "Command timeout before processing"}, | ||
672 | {0x2E02, "Command timeout during processing"}, | ||
673 | {0x2E03, "Command timeout during processing due to error recovery"}, | ||
750 | 674 | ||
751 | {0x2F00, "Commands cleared by another initiator"}, | 675 | {0x2F00, "Commands cleared by another initiator"}, |
752 | {0x2F01, "Commands cleared by power loss notification"}, | 676 | {0x2F01, "Commands cleared by power loss notification"}, |
@@ -868,6 +792,7 @@ static const struct error_info additional[] = | |||
868 | {0x3F13, "iSCSI IP address removed"}, | 792 | {0x3F13, "iSCSI IP address removed"}, |
869 | {0x3F14, "iSCSI IP address changed"}, | 793 | {0x3F14, "iSCSI IP address changed"}, |
870 | {0x3F15, "Inspect referrals sense descriptors"}, | 794 | {0x3F15, "Inspect referrals sense descriptors"}, |
795 | {0x3F16, "Microcode has been changed without reset"}, | ||
871 | /* | 796 | /* |
872 | * {0x40NN, "Ram failure"}, | 797 | * {0x40NN, "Ram failure"}, |
873 | * {0x40NN, "Diagnostic failure on component nn"}, | 798 | * {0x40NN, "Diagnostic failure on component nn"}, |
@@ -946,6 +871,11 @@ static const struct error_info additional[] = | |||
946 | {0x5306, "Volume identifier missing"}, | 871 | {0x5306, "Volume identifier missing"}, |
947 | {0x5307, "Duplicate volume identifier"}, | 872 | {0x5307, "Duplicate volume identifier"}, |
948 | {0x5308, "Element status unknown"}, | 873 | {0x5308, "Element status unknown"}, |
874 | {0x5309, "Data transfer device error - load failed"}, | ||
875 | {0x530a, "Data transfer device error - unload failed"}, | ||
876 | {0x530b, "Data transfer device error - unload missing"}, | ||
877 | {0x530c, "Data transfer device error - eject failed"}, | ||
878 | {0x530d, "Data transfer device error - library communication failed"}, | ||
949 | 879 | ||
950 | {0x5400, "Scsi to host system interface failure"}, | 880 | {0x5400, "Scsi to host system interface failure"}, |
951 | 881 | ||
@@ -963,6 +893,7 @@ static const struct error_info additional[] = | |||
963 | {0x550B, "Insufficient power for operation"}, | 893 | {0x550B, "Insufficient power for operation"}, |
964 | {0x550C, "Insufficient resources to create rod"}, | 894 | {0x550C, "Insufficient resources to create rod"}, |
965 | {0x550D, "Insufficient resources to create rod token"}, | 895 | {0x550D, "Insufficient resources to create rod token"}, |
896 | {0x550E, "Insufficient zone resources"}, | ||
966 | 897 | ||
967 | {0x5700, "Unable to recover table-of-contents"}, | 898 | {0x5700, "Unable to recover table-of-contents"}, |
968 | 899 | ||
@@ -1247,15 +1178,12 @@ static const char * const snstext[] = { | |||
1247 | "Completed", /* F: command completed sense data reported, | 1178 | "Completed", /* F: command completed sense data reported, |
1248 | may occur for successful command */ | 1179 | may occur for successful command */ |
1249 | }; | 1180 | }; |
1250 | #endif | ||
1251 | 1181 | ||
1252 | /* Get sense key string or NULL if not available */ | 1182 | /* Get sense key string or NULL if not available */ |
1253 | const char * | 1183 | const char * |
1254 | scsi_sense_key_string(unsigned char key) { | 1184 | scsi_sense_key_string(unsigned char key) { |
1255 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1256 | if (key <= 0xE) | 1185 | if (key <= 0xE) |
1257 | return snstext[key]; | 1186 | return snstext[key]; |
1258 | #endif | ||
1259 | return NULL; | 1187 | return NULL; |
1260 | } | 1188 | } |
1261 | EXPORT_SYMBOL(scsi_sense_key_string); | 1189 | EXPORT_SYMBOL(scsi_sense_key_string); |
@@ -1267,7 +1195,6 @@ EXPORT_SYMBOL(scsi_sense_key_string); | |||
1267 | const char * | 1195 | const char * |
1268 | scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) | 1196 | scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) |
1269 | { | 1197 | { |
1270 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1271 | int i; | 1198 | int i; |
1272 | unsigned short code = ((asc << 8) | ascq); | 1199 | unsigned short code = ((asc << 8) | ascq); |
1273 | 1200 | ||
@@ -1283,122 +1210,10 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt) | |||
1283 | return additional2[i].str; | 1210 | return additional2[i].str; |
1284 | } | 1211 | } |
1285 | } | 1212 | } |
1286 | #else | ||
1287 | *fmt = NULL; | ||
1288 | #endif | ||
1289 | return NULL; | 1213 | return NULL; |
1290 | } | 1214 | } |
1291 | EXPORT_SYMBOL(scsi_extd_sense_format); | 1215 | EXPORT_SYMBOL(scsi_extd_sense_format); |
1292 | 1216 | ||
1293 | void | ||
1294 | scsi_show_extd_sense(const struct scsi_device *sdev, const char *name, | ||
1295 | unsigned char asc, unsigned char ascq) | ||
1296 | { | ||
1297 | const char *extd_sense_fmt = NULL; | ||
1298 | const char *extd_sense_str = scsi_extd_sense_format(asc, ascq, | ||
1299 | &extd_sense_fmt); | ||
1300 | |||
1301 | if (extd_sense_str) { | ||
1302 | if (extd_sense_fmt) | ||
1303 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1304 | "Add. Sense: %s (%s%x)", | ||
1305 | extd_sense_str, extd_sense_fmt, | ||
1306 | ascq); | ||
1307 | else | ||
1308 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1309 | "Add. Sense: %s", extd_sense_str); | ||
1310 | |||
1311 | } else { | ||
1312 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1313 | "%sASC=0x%x %sASCQ=0x%x\n", | ||
1314 | asc >= 0x80 ? "<<vendor>> " : "", asc, | ||
1315 | ascq >= 0x80 ? "<<vendor>> " : "", ascq); | ||
1316 | } | ||
1317 | } | ||
1318 | EXPORT_SYMBOL(scsi_show_extd_sense); | ||
1319 | |||
1320 | void | ||
1321 | scsi_show_sense_hdr(const struct scsi_device *sdev, const char *name, | ||
1322 | const struct scsi_sense_hdr *sshdr) | ||
1323 | { | ||
1324 | const char *sense_txt; | ||
1325 | |||
1326 | sense_txt = scsi_sense_key_string(sshdr->sense_key); | ||
1327 | if (sense_txt) | ||
1328 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1329 | "Sense Key : %s [%s]%s\n", sense_txt, | ||
1330 | scsi_sense_is_deferred(sshdr) ? | ||
1331 | "deferred" : "current", | ||
1332 | sshdr->response_code >= 0x72 ? | ||
1333 | " [descriptor]" : ""); | ||
1334 | else | ||
1335 | sdev_prefix_printk(KERN_INFO, sdev, name, | ||
1336 | "Sense Key : 0x%x [%s]%s", sshdr->sense_key, | ||
1337 | scsi_sense_is_deferred(sshdr) ? | ||
1338 | "deferred" : "current", | ||
1339 | sshdr->response_code >= 0x72 ? | ||
1340 | " [descriptor]" : ""); | ||
1341 | } | ||
1342 | EXPORT_SYMBOL(scsi_show_sense_hdr); | ||
1343 | |||
1344 | /* | ||
1345 | * Print normalized SCSI sense header with a prefix. | ||
1346 | */ | ||
1347 | void | ||
1348 | scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name, | ||
1349 | const struct scsi_sense_hdr *sshdr) | ||
1350 | { | ||
1351 | scsi_show_sense_hdr(sdev, name, sshdr); | ||
1352 | scsi_show_extd_sense(sdev, name, sshdr->asc, sshdr->ascq); | ||
1353 | } | ||
1354 | EXPORT_SYMBOL(scsi_print_sense_hdr); | ||
1355 | |||
1356 | static void | ||
1357 | scsi_dump_sense_buffer(const unsigned char *sense_buffer, int sense_len) | ||
1358 | { | ||
1359 | int k, num; | ||
1360 | |||
1361 | num = (sense_len < 32) ? sense_len : 32; | ||
1362 | printk("Unrecognized sense data (in hex):"); | ||
1363 | for (k = 0; k < num; ++k) { | ||
1364 | if (0 == (k % 16)) { | ||
1365 | printk("\n"); | ||
1366 | printk(KERN_INFO " "); | ||
1367 | } | ||
1368 | printk("%02x ", sense_buffer[k]); | ||
1369 | } | ||
1370 | printk("\n"); | ||
1371 | return; | ||
1372 | } | ||
1373 | |||
1374 | /* Normalize and print sense buffer with name prefix */ | ||
1375 | void __scsi_print_sense(const struct scsi_device *sdev, const char *name, | ||
1376 | const unsigned char *sense_buffer, int sense_len) | ||
1377 | { | ||
1378 | struct scsi_sense_hdr sshdr; | ||
1379 | |||
1380 | if (!scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) { | ||
1381 | scsi_dump_sense_buffer(sense_buffer, sense_len); | ||
1382 | return; | ||
1383 | } | ||
1384 | scsi_show_sense_hdr(sdev, name, &sshdr); | ||
1385 | scsi_show_extd_sense(sdev, name, sshdr.asc, sshdr.ascq); | ||
1386 | } | ||
1387 | EXPORT_SYMBOL(__scsi_print_sense); | ||
1388 | |||
1389 | /* Normalize and print sense buffer in SCSI command */ | ||
1390 | void scsi_print_sense(const struct scsi_cmnd *cmd) | ||
1391 | { | ||
1392 | struct gendisk *disk = cmd->request->rq_disk; | ||
1393 | const char *disk_name = disk ? disk->disk_name : NULL; | ||
1394 | |||
1395 | __scsi_print_sense(cmd->device, disk_name, cmd->sense_buffer, | ||
1396 | SCSI_SENSE_BUFFERSIZE); | ||
1397 | } | ||
1398 | EXPORT_SYMBOL(scsi_print_sense); | ||
1399 | |||
1400 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1401 | |||
1402 | static const char * const hostbyte_table[]={ | 1217 | static const char * const hostbyte_table[]={ |
1403 | "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", | 1218 | "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", |
1404 | "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", | 1219 | "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", |
@@ -1410,17 +1225,13 @@ static const char * const driverbyte_table[]={ | |||
1410 | "DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", | 1225 | "DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", |
1411 | "DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; | 1226 | "DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; |
1412 | 1227 | ||
1413 | #endif | ||
1414 | |||
1415 | const char *scsi_hostbyte_string(int result) | 1228 | const char *scsi_hostbyte_string(int result) |
1416 | { | 1229 | { |
1417 | const char *hb_string = NULL; | 1230 | const char *hb_string = NULL; |
1418 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1419 | int hb = host_byte(result); | 1231 | int hb = host_byte(result); |
1420 | 1232 | ||
1421 | if (hb < ARRAY_SIZE(hostbyte_table)) | 1233 | if (hb < ARRAY_SIZE(hostbyte_table)) |
1422 | hb_string = hostbyte_table[hb]; | 1234 | hb_string = hostbyte_table[hb]; |
1423 | #endif | ||
1424 | return hb_string; | 1235 | return hb_string; |
1425 | } | 1236 | } |
1426 | EXPORT_SYMBOL(scsi_hostbyte_string); | 1237 | EXPORT_SYMBOL(scsi_hostbyte_string); |
@@ -1428,17 +1239,14 @@ EXPORT_SYMBOL(scsi_hostbyte_string); | |||
1428 | const char *scsi_driverbyte_string(int result) | 1239 | const char *scsi_driverbyte_string(int result) |
1429 | { | 1240 | { |
1430 | const char *db_string = NULL; | 1241 | const char *db_string = NULL; |
1431 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1432 | int db = driver_byte(result); | 1242 | int db = driver_byte(result); |
1433 | 1243 | ||
1434 | if (db < ARRAY_SIZE(driverbyte_table)) | 1244 | if (db < ARRAY_SIZE(driverbyte_table)) |
1435 | db_string = driverbyte_table[db]; | 1245 | db_string = driverbyte_table[db]; |
1436 | #endif | ||
1437 | return db_string; | 1246 | return db_string; |
1438 | } | 1247 | } |
1439 | EXPORT_SYMBOL(scsi_driverbyte_string); | 1248 | EXPORT_SYMBOL(scsi_driverbyte_string); |
1440 | 1249 | ||
1441 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1442 | #define scsi_mlreturn_name(result) { result, #result } | 1250 | #define scsi_mlreturn_name(result) { result, #result } |
1443 | static const struct value_name_pair scsi_mlreturn_arr[] = { | 1251 | static const struct value_name_pair scsi_mlreturn_arr[] = { |
1444 | scsi_mlreturn_name(NEEDS_RETRY), | 1252 | scsi_mlreturn_name(NEEDS_RETRY), |
@@ -1451,11 +1259,9 @@ static const struct value_name_pair scsi_mlreturn_arr[] = { | |||
1451 | scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED), | 1259 | scsi_mlreturn_name(SCSI_RETURN_NOT_HANDLED), |
1452 | scsi_mlreturn_name(FAST_IO_FAIL) | 1260 | scsi_mlreturn_name(FAST_IO_FAIL) |
1453 | }; | 1261 | }; |
1454 | #endif | ||
1455 | 1262 | ||
1456 | const char *scsi_mlreturn_string(int result) | 1263 | const char *scsi_mlreturn_string(int result) |
1457 | { | 1264 | { |
1458 | #ifdef CONFIG_SCSI_CONSTANTS | ||
1459 | const struct value_name_pair *arr = scsi_mlreturn_arr; | 1265 | const struct value_name_pair *arr = scsi_mlreturn_arr; |
1460 | int k; | 1266 | int k; |
1461 | 1267 | ||
@@ -1463,29 +1269,6 @@ const char *scsi_mlreturn_string(int result) | |||
1463 | if (result == arr->value) | 1269 | if (result == arr->value) |
1464 | return arr->name; | 1270 | return arr->name; |
1465 | } | 1271 | } |
1466 | #endif | ||
1467 | return NULL; | 1272 | return NULL; |
1468 | } | 1273 | } |
1469 | EXPORT_SYMBOL(scsi_mlreturn_string); | 1274 | EXPORT_SYMBOL(scsi_mlreturn_string); |
1470 | |||
1471 | void scsi_print_result(struct scsi_cmnd *cmd, const char *msg, int disposition) | ||
1472 | { | ||
1473 | const char *mlret_string = scsi_mlreturn_string(disposition); | ||
1474 | const char *hb_string = scsi_hostbyte_string(cmd->result); | ||
1475 | const char *db_string = scsi_driverbyte_string(cmd->result); | ||
1476 | |||
1477 | if (hb_string || db_string) | ||
1478 | scmd_printk(KERN_INFO, cmd, | ||
1479 | "%s%s Result: hostbyte=%s driverbyte=%s", | ||
1480 | msg ? msg : "", | ||
1481 | mlret_string ? mlret_string : "UNKNOWN", | ||
1482 | hb_string ? hb_string : "invalid", | ||
1483 | db_string ? db_string : "invalid"); | ||
1484 | else | ||
1485 | scmd_printk(KERN_INFO, cmd, | ||
1486 | "%s%s Result: hostbyte=0x%02x driverbyte=0x%02x", | ||
1487 | msg ? msg : "", | ||
1488 | mlret_string ? mlret_string : "UNKNOWN", | ||
1489 | host_byte(cmd->result), driver_byte(cmd->result)); | ||
1490 | } | ||
1491 | EXPORT_SYMBOL(scsi_print_result); | ||
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 0c6be0a17f53..5ee7f44cf869 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
@@ -4610,13 +4610,10 @@ static void adapter_uninit(struct AdapterCtlBlk *acb) | |||
4610 | } | 4610 | } |
4611 | 4611 | ||
4612 | 4612 | ||
4613 | #undef SPRINTF | ||
4614 | #define SPRINTF(args...) seq_printf(m,##args) | ||
4615 | |||
4616 | #undef YESNO | 4613 | #undef YESNO |
4617 | #define YESNO(YN) \ | 4614 | #define YESNO(YN) \ |
4618 | if (YN) SPRINTF(" Yes ");\ | 4615 | if (YN) seq_printf(m, " Yes ");\ |
4619 | else SPRINTF(" No ") | 4616 | else seq_printf(m, " No ") |
4620 | 4617 | ||
4621 | static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) | 4618 | static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) |
4622 | { | 4619 | { |
@@ -4626,47 +4623,45 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
4626 | unsigned long flags; | 4623 | unsigned long flags; |
4627 | int dev; | 4624 | int dev; |
4628 | 4625 | ||
4629 | SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n"); | 4626 | seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n" |
4630 | SPRINTF(" Driver Version " DC395X_VERSION "\n"); | 4627 | " Driver Version " DC395X_VERSION "\n"); |
4631 | 4628 | ||
4632 | DC395x_LOCK_IO(acb->scsi_host, flags); | 4629 | DC395x_LOCK_IO(acb->scsi_host, flags); |
4633 | 4630 | ||
4634 | SPRINTF("SCSI Host Nr %i, ", host->host_no); | 4631 | seq_printf(m, "SCSI Host Nr %i, ", host->host_no); |
4635 | SPRINTF("DC395U/UW/F DC315/U %s\n", | 4632 | seq_printf(m, "DC395U/UW/F DC315/U %s\n", |
4636 | (acb->config & HCC_WIDE_CARD) ? "Wide" : ""); | 4633 | (acb->config & HCC_WIDE_CARD) ? "Wide" : ""); |
4637 | SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base); | 4634 | seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base); |
4638 | SPRINTF("irq_level 0x%04x, ", acb->irq_level); | 4635 | seq_printf(m, "irq_level 0x%04x, ", acb->irq_level); |
4639 | SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000); | 4636 | seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000); |
4640 | 4637 | ||
4641 | SPRINTF("MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun); | 4638 | seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun); |
4642 | SPRINTF("AdapterID %i\n", host->this_id); | 4639 | seq_printf(m, "AdapterID %i\n", host->this_id); |
4643 | 4640 | ||
4644 | SPRINTF("tag_max_num %i", acb->tag_max_num); | 4641 | seq_printf(m, "tag_max_num %i", acb->tag_max_num); |
4645 | /*SPRINTF(", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */ | 4642 | /*seq_printf(m, ", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */ |
4646 | SPRINTF(", FilterCfg 0x%02x", | 4643 | seq_printf(m, ", FilterCfg 0x%02x", |
4647 | DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1)); | 4644 | DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1)); |
4648 | SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time); | 4645 | seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time); |
4649 | /*SPRINTF("\n"); */ | 4646 | /*seq_printf(m, "\n"); */ |
4650 | 4647 | ||
4651 | SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list)); | 4648 | seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list)); |
4652 | SPRINTF | 4649 | seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n", |
4653 | ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
4654 | acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2], | 4650 | acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2], |
4655 | acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5], | 4651 | acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5], |
4656 | acb->dcb_map[6], acb->dcb_map[7]); | 4652 | acb->dcb_map[6], acb->dcb_map[7]); |
4657 | SPRINTF | 4653 | seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n", |
4658 | (" %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
4659 | acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10], | 4654 | acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10], |
4660 | acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13], | 4655 | acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13], |
4661 | acb->dcb_map[14], acb->dcb_map[15]); | 4656 | acb->dcb_map[14], acb->dcb_map[15]); |
4662 | 4657 | ||
4663 | SPRINTF | 4658 | seq_puts(m, |
4664 | ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n"); | 4659 | "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n"); |
4665 | 4660 | ||
4666 | dev = 0; | 4661 | dev = 0; |
4667 | list_for_each_entry(dcb, &acb->dcb_list, list) { | 4662 | list_for_each_entry(dcb, &acb->dcb_list, list) { |
4668 | int nego_period; | 4663 | int nego_period; |
4669 | SPRINTF("%02i %02i %02i ", dev, dcb->target_id, | 4664 | seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id, |
4670 | dcb->target_lun); | 4665 | dcb->target_lun); |
4671 | YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK); | 4666 | YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK); |
4672 | YESNO(dcb->sync_offset); | 4667 | YESNO(dcb->sync_offset); |
@@ -4676,53 +4671,53 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
4676 | YESNO(dcb->sync_mode & EN_TAG_QUEUEING); | 4671 | YESNO(dcb->sync_mode & EN_TAG_QUEUEING); |
4677 | nego_period = clock_period[dcb->sync_period & 0x07] << 2; | 4672 | nego_period = clock_period[dcb->sync_period & 0x07] << 2; |
4678 | if (dcb->sync_offset) | 4673 | if (dcb->sync_offset) |
4679 | SPRINTF(" %03i ns ", nego_period); | 4674 | seq_printf(m, " %03i ns ", nego_period); |
4680 | else | 4675 | else |
4681 | SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2)); | 4676 | seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2)); |
4682 | 4677 | ||
4683 | if (dcb->sync_offset & 0x0f) { | 4678 | if (dcb->sync_offset & 0x0f) { |
4684 | spd = 1000 / (nego_period); | 4679 | spd = 1000 / (nego_period); |
4685 | spd1 = 1000 % (nego_period); | 4680 | spd1 = 1000 % (nego_period); |
4686 | spd1 = (spd1 * 10 + nego_period / 2) / (nego_period); | 4681 | spd1 = (spd1 * 10 + nego_period / 2) / (nego_period); |
4687 | SPRINTF(" %2i.%1i M %02i ", spd, spd1, | 4682 | seq_printf(m, " %2i.%1i M %02i ", spd, spd1, |
4688 | (dcb->sync_offset & 0x0f)); | 4683 | (dcb->sync_offset & 0x0f)); |
4689 | } else | 4684 | } else |
4690 | SPRINTF(" "); | 4685 | seq_puts(m, " "); |
4691 | 4686 | ||
4692 | /* Add more info ... */ | 4687 | /* Add more info ... */ |
4693 | SPRINTF(" %02i\n", dcb->max_command); | 4688 | seq_printf(m, " %02i\n", dcb->max_command); |
4694 | dev++; | 4689 | dev++; |
4695 | } | 4690 | } |
4696 | 4691 | ||
4697 | if (timer_pending(&acb->waiting_timer)) | 4692 | if (timer_pending(&acb->waiting_timer)) |
4698 | SPRINTF("Waiting queue timer running\n"); | 4693 | seq_puts(m, "Waiting queue timer running\n"); |
4699 | else | 4694 | else |
4700 | SPRINTF("\n"); | 4695 | seq_putc(m, '\n'); |
4701 | 4696 | ||
4702 | list_for_each_entry(dcb, &acb->dcb_list, list) { | 4697 | list_for_each_entry(dcb, &acb->dcb_list, list) { |
4703 | struct ScsiReqBlk *srb; | 4698 | struct ScsiReqBlk *srb; |
4704 | if (!list_empty(&dcb->srb_waiting_list)) | 4699 | if (!list_empty(&dcb->srb_waiting_list)) |
4705 | SPRINTF("DCB (%02i-%i): Waiting: %i:", | 4700 | seq_printf(m, "DCB (%02i-%i): Waiting: %i:", |
4706 | dcb->target_id, dcb->target_lun, | 4701 | dcb->target_id, dcb->target_lun, |
4707 | list_size(&dcb->srb_waiting_list)); | 4702 | list_size(&dcb->srb_waiting_list)); |
4708 | list_for_each_entry(srb, &dcb->srb_waiting_list, list) | 4703 | list_for_each_entry(srb, &dcb->srb_waiting_list, list) |
4709 | SPRINTF(" %p", srb->cmd); | 4704 | seq_printf(m, " %p", srb->cmd); |
4710 | if (!list_empty(&dcb->srb_going_list)) | 4705 | if (!list_empty(&dcb->srb_going_list)) |
4711 | SPRINTF("\nDCB (%02i-%i): Going : %i:", | 4706 | seq_printf(m, "\nDCB (%02i-%i): Going : %i:", |
4712 | dcb->target_id, dcb->target_lun, | 4707 | dcb->target_id, dcb->target_lun, |
4713 | list_size(&dcb->srb_going_list)); | 4708 | list_size(&dcb->srb_going_list)); |
4714 | list_for_each_entry(srb, &dcb->srb_going_list, list) | 4709 | list_for_each_entry(srb, &dcb->srb_going_list, list) |
4715 | SPRINTF(" %p", srb->cmd); | 4710 | seq_printf(m, " %p", srb->cmd); |
4716 | if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) | 4711 | if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list)) |
4717 | SPRINTF("\n"); | 4712 | seq_putc(m, '\n'); |
4718 | } | 4713 | } |
4719 | 4714 | ||
4720 | if (debug_enabled(DBG_1)) { | 4715 | if (debug_enabled(DBG_1)) { |
4721 | SPRINTF("DCB list for ACB %p:\n", acb); | 4716 | seq_printf(m, "DCB list for ACB %p:\n", acb); |
4722 | list_for_each_entry(dcb, &acb->dcb_list, list) { | 4717 | list_for_each_entry(dcb, &acb->dcb_list, list) { |
4723 | SPRINTF("%p -> ", dcb); | 4718 | seq_printf(m, "%p -> ", dcb); |
4724 | } | 4719 | } |
4725 | SPRINTF("END\n"); | 4720 | seq_puts(m, "END\n"); |
4726 | } | 4721 | } |
4727 | 4722 | ||
4728 | DC395x_UNLOCK_IO(acb->scsi_host, flags); | 4723 | DC395x_UNLOCK_IO(acb->scsi_host, flags); |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 0bf976936a10..2806cfbec2b9 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
@@ -568,7 +568,7 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
568 | seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", | 568 | seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", |
569 | host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize); | 569 | host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize); |
570 | 570 | ||
571 | seq_printf(m, "Devices:\n"); | 571 | seq_puts(m, "Devices:\n"); |
572 | for(chan = 0; chan < MAX_CHANNEL; chan++) { | 572 | for(chan = 0; chan < MAX_CHANNEL; chan++) { |
573 | for(id = 0; id < MAX_ID; id++) { | 573 | for(id = 0; id < MAX_ID; id++) { |
574 | d = pHba->channel[chan].device[id]; | 574 | d = pHba->channel[chan].device[id]; |
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index 8319d2b417b8..ca8003f0d8a3 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c | |||
@@ -102,7 +102,7 @@ static int eata_pio_show_info(struct seq_file *m, struct Scsi_Host *shost) | |||
102 | shost->host_no, SD(shost)->name); | 102 | shost->host_no, SD(shost)->name); |
103 | seq_printf(m, "Firmware revision: v%s\n", | 103 | seq_printf(m, "Firmware revision: v%s\n", |
104 | SD(shost)->revision); | 104 | SD(shost)->revision); |
105 | seq_printf(m, "IO: PIO\n"); | 105 | seq_puts(m, "IO: PIO\n"); |
106 | seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base); | 106 | seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base); |
107 | seq_printf(m, "Host Bus: %s\n", | 107 | seq_printf(m, "Host Bus: %s\n", |
108 | (SD(shost)->bustype == 'P')?"PCI ": | 108 | (SD(shost)->bustype == 'P')?"PCI ": |
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 7e1c21e6736b..31f8966b2e03 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c | |||
@@ -749,7 +749,7 @@ int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh) | |||
749 | if (dev_count == 0) | 749 | if (dev_count == 0) |
750 | seq_puts(m, "none\n"); | 750 | seq_puts(m, "none\n"); |
751 | 751 | ||
752 | seq_puts(m, "\n"); | 752 | seq_putc(m, '\n'); |
753 | return 0; | 753 | return 0; |
754 | 754 | ||
755 | } | 755 | } |
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index ce5bd52fe692..065b25df741b 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c | |||
@@ -2396,8 +2396,6 @@ int scsi_esp_register(struct esp *esp, struct device *dev) | |||
2396 | 2396 | ||
2397 | if (!esp->num_tags) | 2397 | if (!esp->num_tags) |
2398 | esp->num_tags = ESP_DEFAULT_TAGS; | 2398 | esp->num_tags = ESP_DEFAULT_TAGS; |
2399 | else if (esp->num_tags >= ESP_MAX_TAG) | ||
2400 | esp->num_tags = ESP_MAX_TAG - 1; | ||
2401 | esp->host->transportt = esp_transport_template; | 2399 | esp->host->transportt = esp_transport_template; |
2402 | esp->host->max_lun = ESP_MAX_LUN; | 2400 | esp->host->max_lun = ESP_MAX_LUN; |
2403 | esp->host->cmd_per_lun = 2; | 2401 | esp->host->cmd_per_lun = 2; |
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 9fb632684863..e66e997992e3 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -173,7 +173,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
173 | /* request is i.e. "cat /proc/scsi/gdth/0" */ | 173 | /* request is i.e. "cat /proc/scsi/gdth/0" */ |
174 | /* format: %-15s\t%-10s\t%-15s\t%s */ | 174 | /* format: %-15s\t%-10s\t%-15s\t%s */ |
175 | /* driver parameters */ | 175 | /* driver parameters */ |
176 | seq_printf(m, "Driver Parameters:\n"); | 176 | seq_puts(m, "Driver Parameters:\n"); |
177 | if (reserve_list[0] == 0xff) | 177 | if (reserve_list[0] == 0xff) |
178 | strcpy(hrec, "--"); | 178 | strcpy(hrec, "--"); |
179 | else { | 179 | else { |
@@ -192,7 +192,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
192 | max_ids, hdr_channel); | 192 | max_ids, hdr_channel); |
193 | 193 | ||
194 | /* controller information */ | 194 | /* controller information */ |
195 | seq_printf(m,"\nDisk Array Controller Information:\n"); | 195 | seq_puts(m, "\nDisk Array Controller Information:\n"); |
196 | seq_printf(m, | 196 | seq_printf(m, |
197 | " Number: \t%d \tName: \t%s\n", | 197 | " Number: \t%d \tName: \t%s\n", |
198 | ha->hanum, ha->binfo.type_string); | 198 | ha->hanum, ha->binfo.type_string); |
@@ -219,7 +219,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
219 | 219 | ||
220 | #ifdef GDTH_DMA_STATISTICS | 220 | #ifdef GDTH_DMA_STATISTICS |
221 | /* controller statistics */ | 221 | /* controller statistics */ |
222 | seq_printf(m,"\nController Statistics:\n"); | 222 | seq_puts(m, "\nController Statistics:\n"); |
223 | seq_printf(m, | 223 | seq_printf(m, |
224 | " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n", | 224 | " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n", |
225 | ha->dma32_cnt, ha->dma64_cnt); | 225 | ha->dma32_cnt, ha->dma64_cnt); |
@@ -227,7 +227,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
227 | 227 | ||
228 | if (ha->more_proc) { | 228 | if (ha->more_proc) { |
229 | /* more information: 2. about physical devices */ | 229 | /* more information: 2. about physical devices */ |
230 | seq_printf(m, "\nPhysical Devices:"); | 230 | seq_puts(m, "\nPhysical Devices:"); |
231 | flag = FALSE; | 231 | flag = FALSE; |
232 | 232 | ||
233 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); | 233 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); |
@@ -326,10 +326,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
326 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 326 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
327 | 327 | ||
328 | if (!flag) | 328 | if (!flag) |
329 | seq_printf(m, "\n --\n"); | 329 | seq_puts(m, "\n --\n"); |
330 | 330 | ||
331 | /* 3. about logical drives */ | 331 | /* 3. about logical drives */ |
332 | seq_printf(m,"\nLogical Drives:"); | 332 | seq_puts(m, "\nLogical Drives:"); |
333 | flag = FALSE; | 333 | flag = FALSE; |
334 | 334 | ||
335 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); | 335 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); |
@@ -411,10 +411,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
411 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 411 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
412 | 412 | ||
413 | if (!flag) | 413 | if (!flag) |
414 | seq_printf(m, "\n --\n"); | 414 | seq_puts(m, "\n --\n"); |
415 | 415 | ||
416 | /* 4. about array drives */ | 416 | /* 4. about array drives */ |
417 | seq_printf(m,"\nArray Drives:"); | 417 | seq_puts(m, "\nArray Drives:"); |
418 | flag = FALSE; | 418 | flag = FALSE; |
419 | 419 | ||
420 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); | 420 | buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); |
@@ -471,10 +471,10 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
471 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 471 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
472 | 472 | ||
473 | if (!flag) | 473 | if (!flag) |
474 | seq_printf(m, "\n --\n"); | 474 | seq_puts(m, "\n --\n"); |
475 | 475 | ||
476 | /* 5. about host drives */ | 476 | /* 5. about host drives */ |
477 | seq_printf(m,"\nHost Drives:"); | 477 | seq_puts(m, "\nHost Drives:"); |
478 | flag = FALSE; | 478 | flag = FALSE; |
479 | 479 | ||
480 | buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr); | 480 | buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr); |
@@ -527,11 +527,11 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
527 | } | 527 | } |
528 | 528 | ||
529 | if (!flag) | 529 | if (!flag) |
530 | seq_printf(m, "\n --\n"); | 530 | seq_puts(m, "\n --\n"); |
531 | } | 531 | } |
532 | 532 | ||
533 | /* controller events */ | 533 | /* controller events */ |
534 | seq_printf(m,"\nController Events:\n"); | 534 | seq_puts(m, "\nController Events:\n"); |
535 | 535 | ||
536 | for (id = -1;;) { | 536 | for (id = -1;;) { |
537 | id = gdth_read_event(ha, id, estr); | 537 | id = gdth_read_event(ha, id, estr); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 6bb4611b238a..95d581c45413 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/jiffies.h> | 50 | #include <linux/jiffies.h> |
51 | #include <linux/percpu-defs.h> | 51 | #include <linux/percpu-defs.h> |
52 | #include <linux/percpu.h> | 52 | #include <linux/percpu.h> |
53 | #include <asm/unaligned.h> | ||
53 | #include <asm/div64.h> | 54 | #include <asm/div64.h> |
54 | #include "hpsa_cmd.h" | 55 | #include "hpsa_cmd.h" |
55 | #include "hpsa.h" | 56 | #include "hpsa.h" |
@@ -59,8 +60,11 @@ | |||
59 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" | 60 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
60 | #define HPSA "hpsa" | 61 | #define HPSA "hpsa" |
61 | 62 | ||
62 | /* How long to wait (in milliseconds) for board to go into simple mode */ | 63 | /* How long to wait for CISS doorbell communication */ |
63 | #define MAX_CONFIG_WAIT 30000 | 64 | #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ |
65 | #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ | ||
66 | #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ | ||
67 | #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ | ||
64 | #define MAX_IOCTL_CONFIG_WAIT 1000 | 68 | #define MAX_IOCTL_CONFIG_WAIT 1000 |
65 | 69 | ||
66 | /*define how many times we will try a command because of bus resets */ | 70 | /*define how many times we will try a command because of bus resets */ |
@@ -164,24 +168,24 @@ static struct board_type products[] = { | |||
164 | {0x1926103C, "Smart Array P731m", &SA5_access}, | 168 | {0x1926103C, "Smart Array P731m", &SA5_access}, |
165 | {0x1928103C, "Smart Array P230i", &SA5_access}, | 169 | {0x1928103C, "Smart Array P230i", &SA5_access}, |
166 | {0x1929103C, "Smart Array P530", &SA5_access}, | 170 | {0x1929103C, "Smart Array P530", &SA5_access}, |
167 | {0x21BD103C, "Smart Array", &SA5_access}, | 171 | {0x21BD103C, "Smart Array P244br", &SA5_access}, |
168 | {0x21BE103C, "Smart Array", &SA5_access}, | 172 | {0x21BE103C, "Smart Array P741m", &SA5_access}, |
169 | {0x21BF103C, "Smart Array", &SA5_access}, | 173 | {0x21BF103C, "Smart HBA H240ar", &SA5_access}, |
170 | {0x21C0103C, "Smart Array", &SA5_access}, | 174 | {0x21C0103C, "Smart Array P440ar", &SA5_access}, |
171 | {0x21C1103C, "Smart Array", &SA5_access}, | 175 | {0x21C1103C, "Smart Array P840ar", &SA5_access}, |
172 | {0x21C2103C, "Smart Array", &SA5_access}, | 176 | {0x21C2103C, "Smart Array P440", &SA5_access}, |
173 | {0x21C3103C, "Smart Array", &SA5_access}, | 177 | {0x21C3103C, "Smart Array P441", &SA5_access}, |
174 | {0x21C4103C, "Smart Array", &SA5_access}, | 178 | {0x21C4103C, "Smart Array", &SA5_access}, |
175 | {0x21C5103C, "Smart Array", &SA5_access}, | 179 | {0x21C5103C, "Smart Array P841", &SA5_access}, |
176 | {0x21C6103C, "Smart Array", &SA5_access}, | 180 | {0x21C6103C, "Smart HBA H244br", &SA5_access}, |
177 | {0x21C7103C, "Smart Array", &SA5_access}, | 181 | {0x21C7103C, "Smart HBA H240", &SA5_access}, |
178 | {0x21C8103C, "Smart Array", &SA5_access}, | 182 | {0x21C8103C, "Smart HBA H241", &SA5_access}, |
179 | {0x21C9103C, "Smart Array", &SA5_access}, | 183 | {0x21C9103C, "Smart Array", &SA5_access}, |
180 | {0x21CA103C, "Smart Array", &SA5_access}, | 184 | {0x21CA103C, "Smart Array P246br", &SA5_access}, |
181 | {0x21CB103C, "Smart Array", &SA5_access}, | 185 | {0x21CB103C, "Smart Array P840", &SA5_access}, |
182 | {0x21CC103C, "Smart Array", &SA5_access}, | 186 | {0x21CC103C, "Smart Array", &SA5_access}, |
183 | {0x21CD103C, "Smart Array", &SA5_access}, | 187 | {0x21CD103C, "Smart Array", &SA5_access}, |
184 | {0x21CE103C, "Smart Array", &SA5_access}, | 188 | {0x21CE103C, "Smart HBA", &SA5_access}, |
185 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, | 189 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, |
186 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, | 190 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, |
187 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, | 191 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, |
@@ -195,8 +199,6 @@ static int number_of_controllers; | |||
195 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); | 199 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
196 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); | 200 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); |
197 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); | 201 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
198 | static void lock_and_start_io(struct ctlr_info *h); | ||
199 | static void start_io(struct ctlr_info *h, unsigned long *flags); | ||
200 | 202 | ||
201 | #ifdef CONFIG_COMPAT | 203 | #ifdef CONFIG_COMPAT |
202 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, | 204 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, |
@@ -204,18 +206,18 @@ static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, | |||
204 | #endif | 206 | #endif |
205 | 207 | ||
206 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); | 208 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); |
207 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); | ||
208 | static struct CommandList *cmd_alloc(struct ctlr_info *h); | 209 | static struct CommandList *cmd_alloc(struct ctlr_info *h); |
209 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); | ||
210 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | 210 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
211 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, | 211 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, |
212 | int cmd_type); | 212 | int cmd_type); |
213 | static void hpsa_free_cmd_pool(struct ctlr_info *h); | ||
213 | #define VPD_PAGE (1 << 8) | 214 | #define VPD_PAGE (1 << 8) |
214 | 215 | ||
215 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); | 216 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
216 | static void hpsa_scan_start(struct Scsi_Host *); | 217 | static void hpsa_scan_start(struct Scsi_Host *); |
217 | static int hpsa_scan_finished(struct Scsi_Host *sh, | 218 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
218 | unsigned long elapsed_time); | 219 | unsigned long elapsed_time); |
220 | static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); | ||
219 | 221 | ||
220 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); | 222 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); |
221 | static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); | 223 | static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); |
@@ -229,7 +231,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, | |||
229 | struct CommandList *c); | 231 | struct CommandList *c); |
230 | /* performant mode helper functions */ | 232 | /* performant mode helper functions */ |
231 | static void calc_bucket_map(int *bucket, int num_buckets, | 233 | static void calc_bucket_map(int *bucket, int num_buckets, |
232 | int nsgs, int min_blocks, int *bucket_map); | 234 | int nsgs, int min_blocks, u32 *bucket_map); |
233 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); | 235 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); |
234 | static inline u32 next_command(struct ctlr_info *h, u8 q); | 236 | static inline u32 next_command(struct ctlr_info *h, u8 q); |
235 | static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, | 237 | static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, |
@@ -241,14 +243,15 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); | |||
241 | static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, | 243 | static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, |
242 | int wait_for_ready); | 244 | int wait_for_ready); |
243 | static inline void finish_cmd(struct CommandList *c); | 245 | static inline void finish_cmd(struct CommandList *c); |
244 | static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); | 246 | static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); |
245 | #define BOARD_NOT_READY 0 | 247 | #define BOARD_NOT_READY 0 |
246 | #define BOARD_READY 1 | 248 | #define BOARD_READY 1 |
247 | static void hpsa_drain_accel_commands(struct ctlr_info *h); | 249 | static void hpsa_drain_accel_commands(struct ctlr_info *h); |
248 | static void hpsa_flush_cache(struct ctlr_info *h); | 250 | static void hpsa_flush_cache(struct ctlr_info *h); |
249 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, | 251 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, |
250 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | 252 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
251 | u8 *scsi3addr); | 253 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); |
254 | static void hpsa_command_resubmit_worker(struct work_struct *work); | ||
252 | 255 | ||
253 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) | 256 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) |
254 | { | 257 | { |
@@ -505,8 +508,8 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) | |||
505 | return (scsi3addr[3] & 0xC0) == 0x40; | 508 | return (scsi3addr[3] & 0xC0) == 0x40; |
506 | } | 509 | } |
507 | 510 | ||
508 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 511 | static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", |
509 | "1(ADM)", "UNKNOWN" | 512 | "1(+0)ADM", "UNKNOWN" |
510 | }; | 513 | }; |
511 | #define HPSA_RAID_0 0 | 514 | #define HPSA_RAID_0 0 |
512 | #define HPSA_RAID_4 1 | 515 | #define HPSA_RAID_4 1 |
@@ -671,7 +674,7 @@ static struct scsi_host_template hpsa_driver_template = { | |||
671 | .queuecommand = hpsa_scsi_queue_command, | 674 | .queuecommand = hpsa_scsi_queue_command, |
672 | .scan_start = hpsa_scan_start, | 675 | .scan_start = hpsa_scan_start, |
673 | .scan_finished = hpsa_scan_finished, | 676 | .scan_finished = hpsa_scan_finished, |
674 | .change_queue_depth = scsi_change_queue_depth, | 677 | .change_queue_depth = hpsa_change_queue_depth, |
675 | .this_id = -1, | 678 | .this_id = -1, |
676 | .use_clustering = ENABLE_CLUSTERING, | 679 | .use_clustering = ENABLE_CLUSTERING, |
677 | .eh_abort_handler = hpsa_eh_abort_handler, | 680 | .eh_abort_handler = hpsa_eh_abort_handler, |
@@ -688,13 +691,6 @@ static struct scsi_host_template hpsa_driver_template = { | |||
688 | .no_write_same = 1, | 691 | .no_write_same = 1, |
689 | }; | 692 | }; |
690 | 693 | ||
691 | |||
692 | /* Enqueuing and dequeuing functions for cmdlists. */ | ||
693 | static inline void addQ(struct list_head *list, struct CommandList *c) | ||
694 | { | ||
695 | list_add_tail(&c->list, list); | ||
696 | } | ||
697 | |||
698 | static inline u32 next_command(struct ctlr_info *h, u8 q) | 694 | static inline u32 next_command(struct ctlr_info *h, u8 q) |
699 | { | 695 | { |
700 | u32 a; | 696 | u32 a; |
@@ -828,31 +824,21 @@ static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, | |||
828 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, | 824 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, |
829 | struct CommandList *c) | 825 | struct CommandList *c) |
830 | { | 826 | { |
831 | unsigned long flags; | 827 | dial_down_lockup_detection_during_fw_flash(h, c); |
832 | 828 | atomic_inc(&h->commands_outstanding); | |
833 | switch (c->cmd_type) { | 829 | switch (c->cmd_type) { |
834 | case CMD_IOACCEL1: | 830 | case CMD_IOACCEL1: |
835 | set_ioaccel1_performant_mode(h, c); | 831 | set_ioaccel1_performant_mode(h, c); |
832 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); | ||
836 | break; | 833 | break; |
837 | case CMD_IOACCEL2: | 834 | case CMD_IOACCEL2: |
838 | set_ioaccel2_performant_mode(h, c); | 835 | set_ioaccel2_performant_mode(h, c); |
836 | writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); | ||
839 | break; | 837 | break; |
840 | default: | 838 | default: |
841 | set_performant_mode(h, c); | 839 | set_performant_mode(h, c); |
840 | h->access.submit_command(h, c); | ||
842 | } | 841 | } |
843 | dial_down_lockup_detection_during_fw_flash(h, c); | ||
844 | spin_lock_irqsave(&h->lock, flags); | ||
845 | addQ(&h->reqQ, c); | ||
846 | h->Qdepth++; | ||
847 | start_io(h, &flags); | ||
848 | spin_unlock_irqrestore(&h->lock, flags); | ||
849 | } | ||
850 | |||
851 | static inline void removeQ(struct CommandList *c) | ||
852 | { | ||
853 | if (WARN_ON(list_empty(&c->list))) | ||
854 | return; | ||
855 | list_del_init(&c->list); | ||
856 | } | 842 | } |
857 | 843 | ||
858 | static inline int is_hba_lunid(unsigned char scsi3addr[]) | 844 | static inline int is_hba_lunid(unsigned char scsi3addr[]) |
@@ -919,7 +905,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, | |||
919 | 905 | ||
920 | /* If this device a non-zero lun of a multi-lun device | 906 | /* If this device a non-zero lun of a multi-lun device |
921 | * byte 4 of the 8-byte LUN addr will contain the logical | 907 | * byte 4 of the 8-byte LUN addr will contain the logical |
922 | * unit no, zero otherise. | 908 | * unit no, zero otherwise. |
923 | */ | 909 | */ |
924 | if (device->scsi3addr[4] == 0) { | 910 | if (device->scsi3addr[4] == 0) { |
925 | /* This is not a non-zero lun of a multi-lun device */ | 911 | /* This is not a non-zero lun of a multi-lun device */ |
@@ -984,12 +970,24 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, | |||
984 | /* Raid level changed. */ | 970 | /* Raid level changed. */ |
985 | h->dev[entry]->raid_level = new_entry->raid_level; | 971 | h->dev[entry]->raid_level = new_entry->raid_level; |
986 | 972 | ||
987 | /* Raid offload parameters changed. */ | 973 | /* Raid offload parameters changed. Careful about the ordering. */ |
974 | if (new_entry->offload_config && new_entry->offload_enabled) { | ||
975 | /* | ||
976 | * if drive is newly offload_enabled, we want to copy the | ||
977 | * raid map data first. If previously offload_enabled and | ||
978 | * offload_config were set, raid map data had better be | ||
979 | * the same as it was before. if raid map data is changed | ||
980 | * then it had better be the case that | ||
981 | * h->dev[entry]->offload_enabled is currently 0. | ||
982 | */ | ||
983 | h->dev[entry]->raid_map = new_entry->raid_map; | ||
984 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; | ||
985 | wmb(); /* ensure raid map updated prior to ->offload_enabled */ | ||
986 | } | ||
988 | h->dev[entry]->offload_config = new_entry->offload_config; | 987 | h->dev[entry]->offload_config = new_entry->offload_config; |
989 | h->dev[entry]->offload_enabled = new_entry->offload_enabled; | ||
990 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; | ||
991 | h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; | 988 | h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; |
992 | h->dev[entry]->raid_map = new_entry->raid_map; | 989 | h->dev[entry]->offload_enabled = new_entry->offload_enabled; |
990 | h->dev[entry]->queue_depth = new_entry->queue_depth; | ||
993 | 991 | ||
994 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", | 992 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", |
995 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, | 993 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, |
@@ -1115,6 +1113,8 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1, | |||
1115 | return 1; | 1113 | return 1; |
1116 | if (dev1->offload_enabled != dev2->offload_enabled) | 1114 | if (dev1->offload_enabled != dev2->offload_enabled) |
1117 | return 1; | 1115 | return 1; |
1116 | if (dev1->queue_depth != dev2->queue_depth) | ||
1117 | return 1; | ||
1118 | return 0; | 1118 | return 0; |
1119 | } | 1119 | } |
1120 | 1120 | ||
@@ -1260,6 +1260,85 @@ static void hpsa_show_volume_status(struct ctlr_info *h, | |||
1260 | } | 1260 | } |
1261 | } | 1261 | } |
1262 | 1262 | ||
1263 | /* | ||
1264 | * Figure the list of physical drive pointers for a logical drive with | ||
1265 | * raid offload configured. | ||
1266 | */ | ||
1267 | static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, | ||
1268 | struct hpsa_scsi_dev_t *dev[], int ndevices, | ||
1269 | struct hpsa_scsi_dev_t *logical_drive) | ||
1270 | { | ||
1271 | struct raid_map_data *map = &logical_drive->raid_map; | ||
1272 | struct raid_map_disk_data *dd = &map->data[0]; | ||
1273 | int i, j; | ||
1274 | int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + | ||
1275 | le16_to_cpu(map->metadata_disks_per_row); | ||
1276 | int nraid_map_entries = le16_to_cpu(map->row_cnt) * | ||
1277 | le16_to_cpu(map->layout_map_count) * | ||
1278 | total_disks_per_row; | ||
1279 | int nphys_disk = le16_to_cpu(map->layout_map_count) * | ||
1280 | total_disks_per_row; | ||
1281 | int qdepth; | ||
1282 | |||
1283 | if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) | ||
1284 | nraid_map_entries = RAID_MAP_MAX_ENTRIES; | ||
1285 | |||
1286 | qdepth = 0; | ||
1287 | for (i = 0; i < nraid_map_entries; i++) { | ||
1288 | logical_drive->phys_disk[i] = NULL; | ||
1289 | if (!logical_drive->offload_config) | ||
1290 | continue; | ||
1291 | for (j = 0; j < ndevices; j++) { | ||
1292 | if (dev[j]->devtype != TYPE_DISK) | ||
1293 | continue; | ||
1294 | if (is_logical_dev_addr_mode(dev[j]->scsi3addr)) | ||
1295 | continue; | ||
1296 | if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) | ||
1297 | continue; | ||
1298 | |||
1299 | logical_drive->phys_disk[i] = dev[j]; | ||
1300 | if (i < nphys_disk) | ||
1301 | qdepth = min(h->nr_cmds, qdepth + | ||
1302 | logical_drive->phys_disk[i]->queue_depth); | ||
1303 | break; | ||
1304 | } | ||
1305 | |||
1306 | /* | ||
1307 | * This can happen if a physical drive is removed and | ||
1308 | * the logical drive is degraded. In that case, the RAID | ||
1309 | * map data will refer to a physical disk which isn't actually | ||
1310 | * present. And in that case offload_enabled should already | ||
1311 | * be 0, but we'll turn it off here just in case | ||
1312 | */ | ||
1313 | if (!logical_drive->phys_disk[i]) { | ||
1314 | logical_drive->offload_enabled = 0; | ||
1315 | logical_drive->queue_depth = h->nr_cmds; | ||
1316 | } | ||
1317 | } | ||
1318 | if (nraid_map_entries) | ||
1319 | /* | ||
1320 | * This is correct for reads, too high for full stripe writes, | ||
1321 | * way too high for partial stripe writes | ||
1322 | */ | ||
1323 | logical_drive->queue_depth = qdepth; | ||
1324 | else | ||
1325 | logical_drive->queue_depth = h->nr_cmds; | ||
1326 | } | ||
1327 | |||
1328 | static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, | ||
1329 | struct hpsa_scsi_dev_t *dev[], int ndevices) | ||
1330 | { | ||
1331 | int i; | ||
1332 | |||
1333 | for (i = 0; i < ndevices; i++) { | ||
1334 | if (dev[i]->devtype != TYPE_DISK) | ||
1335 | continue; | ||
1336 | if (!is_logical_dev_addr_mode(dev[i]->scsi3addr)) | ||
1337 | continue; | ||
1338 | hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); | ||
1339 | } | ||
1340 | } | ||
1341 | |||
1263 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | 1342 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, |
1264 | struct hpsa_scsi_dev_t *sd[], int nsds) | 1343 | struct hpsa_scsi_dev_t *sd[], int nsds) |
1265 | { | 1344 | { |
@@ -1444,8 +1523,12 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) | |||
1444 | spin_lock_irqsave(&h->devlock, flags); | 1523 | spin_lock_irqsave(&h->devlock, flags); |
1445 | sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), | 1524 | sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), |
1446 | sdev_id(sdev), sdev->lun); | 1525 | sdev_id(sdev), sdev->lun); |
1447 | if (sd != NULL) | 1526 | if (sd != NULL) { |
1448 | sdev->hostdata = sd; | 1527 | sdev->hostdata = sd; |
1528 | if (sd->queue_depth) | ||
1529 | scsi_change_queue_depth(sdev, sd->queue_depth); | ||
1530 | atomic_set(&sd->ioaccel_cmds_out, 0); | ||
1531 | } | ||
1449 | spin_unlock_irqrestore(&h->devlock, flags); | 1532 | spin_unlock_irqrestore(&h->devlock, flags); |
1450 | return 0; | 1533 | return 0; |
1451 | } | 1534 | } |
@@ -1478,13 +1561,17 @@ static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) | |||
1478 | 1561 | ||
1479 | h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, | 1562 | h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, |
1480 | GFP_KERNEL); | 1563 | GFP_KERNEL); |
1481 | if (!h->cmd_sg_list) | 1564 | if (!h->cmd_sg_list) { |
1565 | dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); | ||
1482 | return -ENOMEM; | 1566 | return -ENOMEM; |
1567 | } | ||
1483 | for (i = 0; i < h->nr_cmds; i++) { | 1568 | for (i = 0; i < h->nr_cmds; i++) { |
1484 | h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * | 1569 | h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * |
1485 | h->chainsize, GFP_KERNEL); | 1570 | h->chainsize, GFP_KERNEL); |
1486 | if (!h->cmd_sg_list[i]) | 1571 | if (!h->cmd_sg_list[i]) { |
1572 | dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); | ||
1487 | goto clean; | 1573 | goto clean; |
1574 | } | ||
1488 | } | 1575 | } |
1489 | return 0; | 1576 | return 0; |
1490 | 1577 | ||
@@ -1504,7 +1591,7 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h, | |||
1504 | chain_block = h->cmd_sg_list[c->cmdindex]; | 1591 | chain_block = h->cmd_sg_list[c->cmdindex]; |
1505 | chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); | 1592 | chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); |
1506 | chain_len = sizeof(*chain_sg) * | 1593 | chain_len = sizeof(*chain_sg) * |
1507 | (c->Header.SGTotal - h->max_cmd_sg_entries); | 1594 | (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); |
1508 | chain_sg->Len = cpu_to_le32(chain_len); | 1595 | chain_sg->Len = cpu_to_le32(chain_len); |
1509 | temp64 = pci_map_single(h->pdev, chain_block, chain_len, | 1596 | temp64 = pci_map_single(h->pdev, chain_block, chain_len, |
1510 | PCI_DMA_TODEVICE); | 1597 | PCI_DMA_TODEVICE); |
@@ -1635,7 +1722,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h, | |||
1635 | struct hpsa_scsi_dev_t *dev) | 1722 | struct hpsa_scsi_dev_t *dev) |
1636 | { | 1723 | { |
1637 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | 1724 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; |
1638 | int raid_retry = 0; | ||
1639 | 1725 | ||
1640 | /* check for good status */ | 1726 | /* check for good status */ |
1641 | if (likely(c2->error_data.serv_response == 0 && | 1727 | if (likely(c2->error_data.serv_response == 0 && |
@@ -1652,26 +1738,22 @@ static void process_ioaccel2_completion(struct ctlr_info *h, | |||
1652 | if (is_logical_dev_addr_mode(dev->scsi3addr) && | 1738 | if (is_logical_dev_addr_mode(dev->scsi3addr) && |
1653 | c2->error_data.serv_response == | 1739 | c2->error_data.serv_response == |
1654 | IOACCEL2_SERV_RESPONSE_FAILURE) { | 1740 | IOACCEL2_SERV_RESPONSE_FAILURE) { |
1655 | dev->offload_enabled = 0; | 1741 | if (c2->error_data.status == |
1656 | h->drv_req_rescan = 1; /* schedule controller for a rescan */ | 1742 | IOACCEL2_STATUS_SR_IOACCEL_DISABLED) |
1657 | cmd->result = DID_SOFT_ERROR << 16; | 1743 | dev->offload_enabled = 0; |
1658 | cmd_free(h, c); | 1744 | goto retry_cmd; |
1659 | cmd->scsi_done(cmd); | ||
1660 | return; | ||
1661 | } | ||
1662 | raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); | ||
1663 | /* If error found, disable Smart Path, schedule a rescan, | ||
1664 | * and force a retry on the standard path. | ||
1665 | */ | ||
1666 | if (raid_retry) { | ||
1667 | dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", | ||
1668 | "HP SSD Smart Path"); | ||
1669 | dev->offload_enabled = 0; /* Disable Smart Path */ | ||
1670 | h->drv_req_rescan = 1; /* schedule controller rescan */ | ||
1671 | cmd->result = DID_SOFT_ERROR << 16; | ||
1672 | } | 1745 | } |
1746 | |||
1747 | if (handle_ioaccel_mode2_error(h, c, cmd, c2)) | ||
1748 | goto retry_cmd; | ||
1749 | |||
1673 | cmd_free(h, c); | 1750 | cmd_free(h, c); |
1674 | cmd->scsi_done(cmd); | 1751 | cmd->scsi_done(cmd); |
1752 | return; | ||
1753 | |||
1754 | retry_cmd: | ||
1755 | INIT_WORK(&c->work, hpsa_command_resubmit_worker); | ||
1756 | queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); | ||
1675 | } | 1757 | } |
1676 | 1758 | ||
1677 | static void complete_scsi_command(struct CommandList *cp) | 1759 | static void complete_scsi_command(struct CommandList *cp) |
@@ -1687,18 +1769,21 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1687 | unsigned long sense_data_size; | 1769 | unsigned long sense_data_size; |
1688 | 1770 | ||
1689 | ei = cp->err_info; | 1771 | ei = cp->err_info; |
1690 | cmd = (struct scsi_cmnd *) cp->scsi_cmd; | 1772 | cmd = cp->scsi_cmd; |
1691 | h = cp->h; | 1773 | h = cp->h; |
1692 | dev = cmd->device->hostdata; | 1774 | dev = cmd->device->hostdata; |
1693 | 1775 | ||
1694 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ | 1776 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ |
1695 | if ((cp->cmd_type == CMD_SCSI) && | 1777 | if ((cp->cmd_type == CMD_SCSI) && |
1696 | (cp->Header.SGTotal > h->max_cmd_sg_entries)) | 1778 | (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) |
1697 | hpsa_unmap_sg_chain_block(h, cp); | 1779 | hpsa_unmap_sg_chain_block(h, cp); |
1698 | 1780 | ||
1699 | cmd->result = (DID_OK << 16); /* host byte */ | 1781 | cmd->result = (DID_OK << 16); /* host byte */ |
1700 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | 1782 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
1701 | 1783 | ||
1784 | if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) | ||
1785 | atomic_dec(&cp->phys_disk->ioaccel_cmds_out); | ||
1786 | |||
1702 | if (cp->cmd_type == CMD_IOACCEL2) | 1787 | if (cp->cmd_type == CMD_IOACCEL2) |
1703 | return process_ioaccel2_completion(h, cp, cmd, dev); | 1788 | return process_ioaccel2_completion(h, cp, cmd, dev); |
1704 | 1789 | ||
@@ -1706,6 +1791,8 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1706 | 1791 | ||
1707 | scsi_set_resid(cmd, ei->ResidualCnt); | 1792 | scsi_set_resid(cmd, ei->ResidualCnt); |
1708 | if (ei->CommandStatus == 0) { | 1793 | if (ei->CommandStatus == 0) { |
1794 | if (cp->cmd_type == CMD_IOACCEL1) | ||
1795 | atomic_dec(&cp->phys_disk->ioaccel_cmds_out); | ||
1709 | cmd_free(h, cp); | 1796 | cmd_free(h, cp); |
1710 | cmd->scsi_done(cmd); | 1797 | cmd->scsi_done(cmd); |
1711 | return; | 1798 | return; |
@@ -1726,8 +1813,10 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1726 | */ | 1813 | */ |
1727 | if (cp->cmd_type == CMD_IOACCEL1) { | 1814 | if (cp->cmd_type == CMD_IOACCEL1) { |
1728 | struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; | 1815 | struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; |
1729 | cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); | 1816 | cp->Header.SGList = scsi_sg_count(cmd); |
1730 | cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; | 1817 | cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); |
1818 | cp->Request.CDBLen = le16_to_cpu(c->io_flags) & | ||
1819 | IOACCEL1_IOFLAGS_CDBLEN_MASK; | ||
1731 | cp->Header.tag = c->tag; | 1820 | cp->Header.tag = c->tag; |
1732 | memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); | 1821 | memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); |
1733 | memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); | 1822 | memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); |
@@ -1739,9 +1828,9 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1739 | if (is_logical_dev_addr_mode(dev->scsi3addr)) { | 1828 | if (is_logical_dev_addr_mode(dev->scsi3addr)) { |
1740 | if (ei->CommandStatus == CMD_IOACCEL_DISABLED) | 1829 | if (ei->CommandStatus == CMD_IOACCEL_DISABLED) |
1741 | dev->offload_enabled = 0; | 1830 | dev->offload_enabled = 0; |
1742 | cmd->result = DID_SOFT_ERROR << 16; | 1831 | INIT_WORK(&cp->work, hpsa_command_resubmit_worker); |
1743 | cmd_free(h, cp); | 1832 | queue_work_on(raw_smp_processor_id(), |
1744 | cmd->scsi_done(cmd); | 1833 | h->resubmit_wq, &cp->work); |
1745 | return; | 1834 | return; |
1746 | } | 1835 | } |
1747 | } | 1836 | } |
@@ -1798,9 +1887,8 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1798 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | 1887 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
1799 | break; | 1888 | break; |
1800 | case CMD_DATA_OVERRUN: | 1889 | case CMD_DATA_OVERRUN: |
1801 | dev_warn(&h->pdev->dev, "cp %p has" | 1890 | dev_warn(&h->pdev->dev, |
1802 | " completed with data overrun " | 1891 | "CDB %16phN data overrun\n", cp->Request.CDB); |
1803 | "reported\n", cp); | ||
1804 | break; | 1892 | break; |
1805 | case CMD_INVALID: { | 1893 | case CMD_INVALID: { |
1806 | /* print_bytes(cp, sizeof(*cp), 1, 0); | 1894 | /* print_bytes(cp, sizeof(*cp), 1, 0); |
@@ -1816,34 +1904,38 @@ static void complete_scsi_command(struct CommandList *cp) | |||
1816 | break; | 1904 | break; |
1817 | case CMD_PROTOCOL_ERR: | 1905 | case CMD_PROTOCOL_ERR: |
1818 | cmd->result = DID_ERROR << 16; | 1906 | cmd->result = DID_ERROR << 16; |
1819 | dev_warn(&h->pdev->dev, "cp %p has " | 1907 | dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", |
1820 | "protocol error\n", cp); | 1908 | cp->Request.CDB); |
1821 | break; | 1909 | break; |
1822 | case CMD_HARDWARE_ERR: | 1910 | case CMD_HARDWARE_ERR: |
1823 | cmd->result = DID_ERROR << 16; | 1911 | cmd->result = DID_ERROR << 16; |
1824 | dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); | 1912 | dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", |
1913 | cp->Request.CDB); | ||
1825 | break; | 1914 | break; |
1826 | case CMD_CONNECTION_LOST: | 1915 | case CMD_CONNECTION_LOST: |
1827 | cmd->result = DID_ERROR << 16; | 1916 | cmd->result = DID_ERROR << 16; |
1828 | dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); | 1917 | dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", |
1918 | cp->Request.CDB); | ||
1829 | break; | 1919 | break; |
1830 | case CMD_ABORTED: | 1920 | case CMD_ABORTED: |
1831 | cmd->result = DID_ABORT << 16; | 1921 | cmd->result = DID_ABORT << 16; |
1832 | dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", | 1922 | dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", |
1833 | cp, ei->ScsiStatus); | 1923 | cp->Request.CDB, ei->ScsiStatus); |
1834 | break; | 1924 | break; |
1835 | case CMD_ABORT_FAILED: | 1925 | case CMD_ABORT_FAILED: |
1836 | cmd->result = DID_ERROR << 16; | 1926 | cmd->result = DID_ERROR << 16; |
1837 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); | 1927 | dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", |
1928 | cp->Request.CDB); | ||
1838 | break; | 1929 | break; |
1839 | case CMD_UNSOLICITED_ABORT: | 1930 | case CMD_UNSOLICITED_ABORT: |
1840 | cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ | 1931 | cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ |
1841 | dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited " | 1932 | dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", |
1842 | "abort\n", cp); | 1933 | cp->Request.CDB); |
1843 | break; | 1934 | break; |
1844 | case CMD_TIMEOUT: | 1935 | case CMD_TIMEOUT: |
1845 | cmd->result = DID_TIME_OUT << 16; | 1936 | cmd->result = DID_TIME_OUT << 16; |
1846 | dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); | 1937 | dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", |
1938 | cp->Request.CDB); | ||
1847 | break; | 1939 | break; |
1848 | case CMD_UNABORTABLE: | 1940 | case CMD_UNABORTABLE: |
1849 | cmd->result = DID_ERROR << 16; | 1941 | cmd->result = DID_ERROR << 16; |
@@ -2048,10 +2140,10 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2048 | struct CommandList *c; | 2140 | struct CommandList *c; |
2049 | struct ErrorInfo *ei; | 2141 | struct ErrorInfo *ei; |
2050 | 2142 | ||
2051 | c = cmd_special_alloc(h); | 2143 | c = cmd_alloc(h); |
2052 | 2144 | ||
2053 | if (c == NULL) { /* trouble... */ | 2145 | if (c == NULL) { |
2054 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2146 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2055 | return -ENOMEM; | 2147 | return -ENOMEM; |
2056 | } | 2148 | } |
2057 | 2149 | ||
@@ -2067,7 +2159,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2067 | rc = -1; | 2159 | rc = -1; |
2068 | } | 2160 | } |
2069 | out: | 2161 | out: |
2070 | cmd_special_free(h, c); | 2162 | cmd_free(h, c); |
2071 | return rc; | 2163 | return rc; |
2072 | } | 2164 | } |
2073 | 2165 | ||
@@ -2079,10 +2171,9 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, | |||
2079 | struct CommandList *c; | 2171 | struct CommandList *c; |
2080 | struct ErrorInfo *ei; | 2172 | struct ErrorInfo *ei; |
2081 | 2173 | ||
2082 | c = cmd_special_alloc(h); | 2174 | c = cmd_alloc(h); |
2083 | |||
2084 | if (c == NULL) { /* trouble... */ | 2175 | if (c == NULL) { /* trouble... */ |
2085 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2176 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2086 | return -ENOMEM; | 2177 | return -ENOMEM; |
2087 | } | 2178 | } |
2088 | 2179 | ||
@@ -2098,7 +2189,7 @@ static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, | |||
2098 | rc = -1; | 2189 | rc = -1; |
2099 | } | 2190 | } |
2100 | out: | 2191 | out: |
2101 | cmd_special_free(h, c); | 2192 | cmd_free(h, c); |
2102 | return rc; | 2193 | return rc; |
2103 | } | 2194 | } |
2104 | 2195 | ||
@@ -2109,10 +2200,10 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2109 | struct CommandList *c; | 2200 | struct CommandList *c; |
2110 | struct ErrorInfo *ei; | 2201 | struct ErrorInfo *ei; |
2111 | 2202 | ||
2112 | c = cmd_special_alloc(h); | 2203 | c = cmd_alloc(h); |
2113 | 2204 | ||
2114 | if (c == NULL) { /* trouble... */ | 2205 | if (c == NULL) { /* trouble... */ |
2115 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2206 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2116 | return -ENOMEM; | 2207 | return -ENOMEM; |
2117 | } | 2208 | } |
2118 | 2209 | ||
@@ -2128,7 +2219,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2128 | hpsa_scsi_interpret_error(h, c); | 2219 | hpsa_scsi_interpret_error(h, c); |
2129 | rc = -1; | 2220 | rc = -1; |
2130 | } | 2221 | } |
2131 | cmd_special_free(h, c); | 2222 | cmd_free(h, c); |
2132 | return rc; | 2223 | return rc; |
2133 | } | 2224 | } |
2134 | 2225 | ||
@@ -2191,15 +2282,13 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, | |||
2191 | le16_to_cpu(map_buff->row_cnt)); | 2282 | le16_to_cpu(map_buff->row_cnt)); |
2192 | dev_info(&h->pdev->dev, "layout_map_count = %u\n", | 2283 | dev_info(&h->pdev->dev, "layout_map_count = %u\n", |
2193 | le16_to_cpu(map_buff->layout_map_count)); | 2284 | le16_to_cpu(map_buff->layout_map_count)); |
2194 | dev_info(&h->pdev->dev, "flags = %u\n", | 2285 | dev_info(&h->pdev->dev, "flags = 0x%x\n", |
2195 | le16_to_cpu(map_buff->flags)); | 2286 | le16_to_cpu(map_buff->flags)); |
2196 | if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON) | 2287 | dev_info(&h->pdev->dev, "encrypytion = %s\n", |
2197 | dev_info(&h->pdev->dev, "encrypytion = ON\n"); | 2288 | le16_to_cpu(map_buff->flags) & |
2198 | else | 2289 | RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); |
2199 | dev_info(&h->pdev->dev, "encrypytion = OFF\n"); | ||
2200 | dev_info(&h->pdev->dev, "dekindex = %u\n", | 2290 | dev_info(&h->pdev->dev, "dekindex = %u\n", |
2201 | le16_to_cpu(map_buff->dekindex)); | 2291 | le16_to_cpu(map_buff->dekindex)); |
2202 | |||
2203 | map_cnt = le16_to_cpu(map_buff->layout_map_count); | 2292 | map_cnt = le16_to_cpu(map_buff->layout_map_count); |
2204 | for (map = 0; map < map_cnt; map++) { | 2293 | for (map = 0; map < map_cnt; map++) { |
2205 | dev_info(&h->pdev->dev, "Map%u:\n", map); | 2294 | dev_info(&h->pdev->dev, "Map%u:\n", map); |
@@ -2238,26 +2327,26 @@ static int hpsa_get_raid_map(struct ctlr_info *h, | |||
2238 | struct CommandList *c; | 2327 | struct CommandList *c; |
2239 | struct ErrorInfo *ei; | 2328 | struct ErrorInfo *ei; |
2240 | 2329 | ||
2241 | c = cmd_special_alloc(h); | 2330 | c = cmd_alloc(h); |
2242 | if (c == NULL) { | 2331 | if (c == NULL) { |
2243 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2332 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2244 | return -ENOMEM; | 2333 | return -ENOMEM; |
2245 | } | 2334 | } |
2246 | if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, | 2335 | if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, |
2247 | sizeof(this_device->raid_map), 0, | 2336 | sizeof(this_device->raid_map), 0, |
2248 | scsi3addr, TYPE_CMD)) { | 2337 | scsi3addr, TYPE_CMD)) { |
2249 | dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); | 2338 | dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); |
2250 | cmd_special_free(h, c); | 2339 | cmd_free(h, c); |
2251 | return -ENOMEM; | 2340 | return -ENOMEM; |
2252 | } | 2341 | } |
2253 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | 2342 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); |
2254 | ei = c->err_info; | 2343 | ei = c->err_info; |
2255 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | 2344 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
2256 | hpsa_scsi_interpret_error(h, c); | 2345 | hpsa_scsi_interpret_error(h, c); |
2257 | cmd_special_free(h, c); | 2346 | cmd_free(h, c); |
2258 | return -1; | 2347 | return -1; |
2259 | } | 2348 | } |
2260 | cmd_special_free(h, c); | 2349 | cmd_free(h, c); |
2261 | 2350 | ||
2262 | /* @todo in the future, dynamically allocate RAID map memory */ | 2351 | /* @todo in the future, dynamically allocate RAID map memory */ |
2263 | if (le32_to_cpu(this_device->raid_map.structure_size) > | 2352 | if (le32_to_cpu(this_device->raid_map.structure_size) > |
@@ -2269,6 +2358,34 @@ static int hpsa_get_raid_map(struct ctlr_info *h, | |||
2269 | return rc; | 2358 | return rc; |
2270 | } | 2359 | } |
2271 | 2360 | ||
2361 | static int hpsa_bmic_id_physical_device(struct ctlr_info *h, | ||
2362 | unsigned char scsi3addr[], u16 bmic_device_index, | ||
2363 | struct bmic_identify_physical_device *buf, size_t bufsize) | ||
2364 | { | ||
2365 | int rc = IO_OK; | ||
2366 | struct CommandList *c; | ||
2367 | struct ErrorInfo *ei; | ||
2368 | |||
2369 | c = cmd_alloc(h); | ||
2370 | rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, | ||
2371 | 0, RAID_CTLR_LUNID, TYPE_CMD); | ||
2372 | if (rc) | ||
2373 | goto out; | ||
2374 | |||
2375 | c->Request.CDB[2] = bmic_device_index & 0xff; | ||
2376 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; | ||
2377 | |||
2378 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | ||
2379 | ei = c->err_info; | ||
2380 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | ||
2381 | hpsa_scsi_interpret_error(h, c); | ||
2382 | rc = -1; | ||
2383 | } | ||
2384 | out: | ||
2385 | cmd_free(h, c); | ||
2386 | return rc; | ||
2387 | } | ||
2388 | |||
2272 | static int hpsa_vpd_page_supported(struct ctlr_info *h, | 2389 | static int hpsa_vpd_page_supported(struct ctlr_info *h, |
2273 | unsigned char scsi3addr[], u8 page) | 2390 | unsigned char scsi3addr[], u8 page) |
2274 | { | 2391 | { |
@@ -2369,7 +2486,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, | |||
2369 | } | 2486 | } |
2370 | 2487 | ||
2371 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | 2488 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, |
2372 | struct ReportLUNdata *buf, int bufsize, | 2489 | void *buf, int bufsize, |
2373 | int extended_response) | 2490 | int extended_response) |
2374 | { | 2491 | { |
2375 | int rc = IO_OK; | 2492 | int rc = IO_OK; |
@@ -2377,9 +2494,9 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
2377 | unsigned char scsi3addr[8]; | 2494 | unsigned char scsi3addr[8]; |
2378 | struct ErrorInfo *ei; | 2495 | struct ErrorInfo *ei; |
2379 | 2496 | ||
2380 | c = cmd_special_alloc(h); | 2497 | c = cmd_alloc(h); |
2381 | if (c == NULL) { /* trouble... */ | 2498 | if (c == NULL) { /* trouble... */ |
2382 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 2499 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
2383 | return -1; | 2500 | return -1; |
2384 | } | 2501 | } |
2385 | /* address the controller */ | 2502 | /* address the controller */ |
@@ -2398,24 +2515,26 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
2398 | hpsa_scsi_interpret_error(h, c); | 2515 | hpsa_scsi_interpret_error(h, c); |
2399 | rc = -1; | 2516 | rc = -1; |
2400 | } else { | 2517 | } else { |
2401 | if (buf->extended_response_flag != extended_response) { | 2518 | struct ReportLUNdata *rld = buf; |
2519 | |||
2520 | if (rld->extended_response_flag != extended_response) { | ||
2402 | dev_err(&h->pdev->dev, | 2521 | dev_err(&h->pdev->dev, |
2403 | "report luns requested format %u, got %u\n", | 2522 | "report luns requested format %u, got %u\n", |
2404 | extended_response, | 2523 | extended_response, |
2405 | buf->extended_response_flag); | 2524 | rld->extended_response_flag); |
2406 | rc = -1; | 2525 | rc = -1; |
2407 | } | 2526 | } |
2408 | } | 2527 | } |
2409 | out: | 2528 | out: |
2410 | cmd_special_free(h, c); | 2529 | cmd_free(h, c); |
2411 | return rc; | 2530 | return rc; |
2412 | } | 2531 | } |
2413 | 2532 | ||
2414 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, | 2533 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, |
2415 | struct ReportLUNdata *buf, | 2534 | struct ReportExtendedLUNdata *buf, int bufsize) |
2416 | int bufsize, int extended_response) | ||
2417 | { | 2535 | { |
2418 | return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); | 2536 | return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, |
2537 | HPSA_REPORT_PHYS_EXTENDED); | ||
2419 | } | 2538 | } |
2420 | 2539 | ||
2421 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, | 2540 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, |
@@ -2590,6 +2709,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
2590 | this_device->offload_config = 0; | 2709 | this_device->offload_config = 0; |
2591 | this_device->offload_enabled = 0; | 2710 | this_device->offload_enabled = 0; |
2592 | this_device->volume_offline = 0; | 2711 | this_device->volume_offline = 0; |
2712 | this_device->queue_depth = h->nr_cmds; | ||
2593 | } | 2713 | } |
2594 | 2714 | ||
2595 | if (is_OBDR_device) { | 2715 | if (is_OBDR_device) { |
@@ -2732,7 +2852,6 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2732 | { | 2852 | { |
2733 | struct ReportExtendedLUNdata *physicals = NULL; | 2853 | struct ReportExtendedLUNdata *physicals = NULL; |
2734 | int responsesize = 24; /* size of physical extended response */ | 2854 | int responsesize = 24; /* size of physical extended response */ |
2735 | int extended = 2; /* flag forces reporting 'other dev info'. */ | ||
2736 | int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; | 2855 | int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; |
2737 | u32 nphysicals = 0; /* number of reported physical devs */ | 2856 | u32 nphysicals = 0; /* number of reported physical devs */ |
2738 | int found = 0; /* found match (1) or not (0) */ | 2857 | int found = 0; /* found match (1) or not (0) */ |
@@ -2741,8 +2860,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2741 | struct scsi_cmnd *scmd; /* scsi command within request being aborted */ | 2860 | struct scsi_cmnd *scmd; /* scsi command within request being aborted */ |
2742 | struct hpsa_scsi_dev_t *d; /* device of request being aborted */ | 2861 | struct hpsa_scsi_dev_t *d; /* device of request being aborted */ |
2743 | struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ | 2862 | struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ |
2744 | u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ | 2863 | __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ |
2745 | u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ | 2864 | __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ |
2746 | 2865 | ||
2747 | if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) | 2866 | if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) |
2748 | return 0; /* no match */ | 2867 | return 0; /* no match */ |
@@ -2761,8 +2880,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2761 | return 0; /* no match */ | 2880 | return 0; /* no match */ |
2762 | 2881 | ||
2763 | it_nexus = cpu_to_le32(d->ioaccel_handle); | 2882 | it_nexus = cpu_to_le32(d->ioaccel_handle); |
2764 | scsi_nexus = cpu_to_le32(c2a->scsi_nexus); | 2883 | scsi_nexus = c2a->scsi_nexus; |
2765 | find = c2a->scsi_nexus; | 2884 | find = le32_to_cpu(c2a->scsi_nexus); |
2766 | 2885 | ||
2767 | if (h->raid_offload_debug > 0) | 2886 | if (h->raid_offload_debug > 0) |
2768 | dev_info(&h->pdev->dev, | 2887 | dev_info(&h->pdev->dev, |
@@ -2779,8 +2898,7 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2779 | physicals = kzalloc(reportsize, GFP_KERNEL); | 2898 | physicals = kzalloc(reportsize, GFP_KERNEL); |
2780 | if (physicals == NULL) | 2899 | if (physicals == NULL) |
2781 | return 0; | 2900 | return 0; |
2782 | if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, | 2901 | if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) { |
2783 | reportsize, extended)) { | ||
2784 | dev_err(&h->pdev->dev, | 2902 | dev_err(&h->pdev->dev, |
2785 | "Can't lookup %s device handle: report physical LUNs failed.\n", | 2903 | "Can't lookup %s device handle: report physical LUNs failed.\n", |
2786 | "HP SSD Smart Path"); | 2904 | "HP SSD Smart Path"); |
@@ -2821,34 +2939,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |||
2821 | * Returns 0 on success, -1 otherwise. | 2939 | * Returns 0 on success, -1 otherwise. |
2822 | */ | 2940 | */ |
2823 | static int hpsa_gather_lun_info(struct ctlr_info *h, | 2941 | static int hpsa_gather_lun_info(struct ctlr_info *h, |
2824 | int reportphyslunsize, int reportloglunsize, | 2942 | struct ReportExtendedLUNdata *physdev, u32 *nphysicals, |
2825 | struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, | ||
2826 | struct ReportLUNdata *logdev, u32 *nlogicals) | 2943 | struct ReportLUNdata *logdev, u32 *nlogicals) |
2827 | { | 2944 | { |
2828 | int physical_entry_size = 8; | 2945 | if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { |
2829 | |||
2830 | *physical_mode = 0; | ||
2831 | |||
2832 | /* For I/O accelerator mode we need to read physical device handles */ | ||
2833 | if (h->transMethod & CFGTBL_Trans_io_accel1 || | ||
2834 | h->transMethod & CFGTBL_Trans_io_accel2) { | ||
2835 | *physical_mode = HPSA_REPORT_PHYS_EXTENDED; | ||
2836 | physical_entry_size = 24; | ||
2837 | } | ||
2838 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize, | ||
2839 | *physical_mode)) { | ||
2840 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); | 2946 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); |
2841 | return -1; | 2947 | return -1; |
2842 | } | 2948 | } |
2843 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / | 2949 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; |
2844 | physical_entry_size; | ||
2845 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { | 2950 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
2846 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." | 2951 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", |
2847 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | 2952 | HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); |
2848 | *nphysicals - HPSA_MAX_PHYS_LUN); | ||
2849 | *nphysicals = HPSA_MAX_PHYS_LUN; | 2953 | *nphysicals = HPSA_MAX_PHYS_LUN; |
2850 | } | 2954 | } |
2851 | if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) { | 2955 | if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { |
2852 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); | 2956 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); |
2853 | return -1; | 2957 | return -1; |
2854 | } | 2958 | } |
@@ -2921,6 +3025,33 @@ static int hpsa_hba_mode_enabled(struct ctlr_info *h) | |||
2921 | return hba_mode_enabled; | 3025 | return hba_mode_enabled; |
2922 | } | 3026 | } |
2923 | 3027 | ||
3028 | /* get physical drive ioaccel handle and queue depth */ | ||
3029 | static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, | ||
3030 | struct hpsa_scsi_dev_t *dev, | ||
3031 | u8 *lunaddrbytes, | ||
3032 | struct bmic_identify_physical_device *id_phys) | ||
3033 | { | ||
3034 | int rc; | ||
3035 | struct ext_report_lun_entry *rle = | ||
3036 | (struct ext_report_lun_entry *) lunaddrbytes; | ||
3037 | |||
3038 | dev->ioaccel_handle = rle->ioaccel_handle; | ||
3039 | memset(id_phys, 0, sizeof(*id_phys)); | ||
3040 | rc = hpsa_bmic_id_physical_device(h, lunaddrbytes, | ||
3041 | GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys, | ||
3042 | sizeof(*id_phys)); | ||
3043 | if (!rc) | ||
3044 | /* Reserve space for FW operations */ | ||
3045 | #define DRIVE_CMDS_RESERVED_FOR_FW 2 | ||
3046 | #define DRIVE_QUEUE_DEPTH 7 | ||
3047 | dev->queue_depth = | ||
3048 | le16_to_cpu(id_phys->current_queue_depth_limit) - | ||
3049 | DRIVE_CMDS_RESERVED_FOR_FW; | ||
3050 | else | ||
3051 | dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ | ||
3052 | atomic_set(&dev->ioaccel_cmds_out, 0); | ||
3053 | } | ||
3054 | |||
2924 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | 3055 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
2925 | { | 3056 | { |
2926 | /* the idea here is we could get notified | 3057 | /* the idea here is we could get notified |
@@ -2935,9 +3066,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
2935 | */ | 3066 | */ |
2936 | struct ReportExtendedLUNdata *physdev_list = NULL; | 3067 | struct ReportExtendedLUNdata *physdev_list = NULL; |
2937 | struct ReportLUNdata *logdev_list = NULL; | 3068 | struct ReportLUNdata *logdev_list = NULL; |
3069 | struct bmic_identify_physical_device *id_phys = NULL; | ||
2938 | u32 nphysicals = 0; | 3070 | u32 nphysicals = 0; |
2939 | u32 nlogicals = 0; | 3071 | u32 nlogicals = 0; |
2940 | int physical_mode = 0; | ||
2941 | u32 ndev_allocated = 0; | 3072 | u32 ndev_allocated = 0; |
2942 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; | 3073 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
2943 | int ncurrent = 0; | 3074 | int ncurrent = 0; |
@@ -2950,8 +3081,10 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
2950 | physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); | 3081 | physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); |
2951 | logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); | 3082 | logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); |
2952 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); | 3083 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); |
3084 | id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); | ||
2953 | 3085 | ||
2954 | if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { | 3086 | if (!currentsd || !physdev_list || !logdev_list || |
3087 | !tmpdevice || !id_phys) { | ||
2955 | dev_err(&h->pdev->dev, "out of memory\n"); | 3088 | dev_err(&h->pdev->dev, "out of memory\n"); |
2956 | goto out; | 3089 | goto out; |
2957 | } | 3090 | } |
@@ -2968,10 +3101,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
2968 | 3101 | ||
2969 | h->hba_mode_enabled = rescan_hba_mode; | 3102 | h->hba_mode_enabled = rescan_hba_mode; |
2970 | 3103 | ||
2971 | if (hpsa_gather_lun_info(h, | 3104 | if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, |
2972 | sizeof(*physdev_list), sizeof(*logdev_list), | 3105 | logdev_list, &nlogicals)) |
2973 | (struct ReportLUNdata *) physdev_list, &nphysicals, | ||
2974 | &physical_mode, logdev_list, &nlogicals)) | ||
2975 | goto out; | 3106 | goto out; |
2976 | 3107 | ||
2977 | /* We might see up to the maximum number of logical and physical disks | 3108 | /* We might see up to the maximum number of logical and physical disks |
@@ -3068,10 +3199,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
3068 | ncurrent++; | 3199 | ncurrent++; |
3069 | break; | 3200 | break; |
3070 | } | 3201 | } |
3071 | if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { | 3202 | if (h->transMethod & CFGTBL_Trans_io_accel1 || |
3072 | memcpy(&this_device->ioaccel_handle, | 3203 | h->transMethod & CFGTBL_Trans_io_accel2) { |
3073 | &lunaddrbytes[20], | 3204 | hpsa_get_ioaccel_drive_info(h, this_device, |
3074 | sizeof(this_device->ioaccel_handle)); | 3205 | lunaddrbytes, id_phys); |
3206 | atomic_set(&this_device->ioaccel_cmds_out, 0); | ||
3075 | ncurrent++; | 3207 | ncurrent++; |
3076 | } | 3208 | } |
3077 | break; | 3209 | break; |
@@ -3095,6 +3227,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
3095 | if (ncurrent >= HPSA_MAX_DEVICES) | 3227 | if (ncurrent >= HPSA_MAX_DEVICES) |
3096 | break; | 3228 | break; |
3097 | } | 3229 | } |
3230 | hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent); | ||
3098 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); | 3231 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); |
3099 | out: | 3232 | out: |
3100 | kfree(tmpdevice); | 3233 | kfree(tmpdevice); |
@@ -3103,9 +3236,22 @@ out: | |||
3103 | kfree(currentsd); | 3236 | kfree(currentsd); |
3104 | kfree(physdev_list); | 3237 | kfree(physdev_list); |
3105 | kfree(logdev_list); | 3238 | kfree(logdev_list); |
3239 | kfree(id_phys); | ||
3106 | } | 3240 | } |
3107 | 3241 | ||
3108 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | 3242 | static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, |
3243 | struct scatterlist *sg) | ||
3244 | { | ||
3245 | u64 addr64 = (u64) sg_dma_address(sg); | ||
3246 | unsigned int len = sg_dma_len(sg); | ||
3247 | |||
3248 | desc->Addr = cpu_to_le64(addr64); | ||
3249 | desc->Len = cpu_to_le32(len); | ||
3250 | desc->Ext = 0; | ||
3251 | } | ||
3252 | |||
3253 | /* | ||
3254 | * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | ||
3109 | * dma mapping and fills in the scatter gather entries of the | 3255 | * dma mapping and fills in the scatter gather entries of the |
3110 | * hpsa command, cp. | 3256 | * hpsa command, cp. |
3111 | */ | 3257 | */ |
@@ -3113,9 +3259,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, | |||
3113 | struct CommandList *cp, | 3259 | struct CommandList *cp, |
3114 | struct scsi_cmnd *cmd) | 3260 | struct scsi_cmnd *cmd) |
3115 | { | 3261 | { |
3116 | unsigned int len; | ||
3117 | struct scatterlist *sg; | 3262 | struct scatterlist *sg; |
3118 | u64 addr64; | ||
3119 | int use_sg, i, sg_index, chained; | 3263 | int use_sg, i, sg_index, chained; |
3120 | struct SGDescriptor *curr_sg; | 3264 | struct SGDescriptor *curr_sg; |
3121 | 3265 | ||
@@ -3138,13 +3282,11 @@ static int hpsa_scatter_gather(struct ctlr_info *h, | |||
3138 | curr_sg = h->cmd_sg_list[cp->cmdindex]; | 3282 | curr_sg = h->cmd_sg_list[cp->cmdindex]; |
3139 | sg_index = 0; | 3283 | sg_index = 0; |
3140 | } | 3284 | } |
3141 | addr64 = (u64) sg_dma_address(sg); | 3285 | hpsa_set_sg_descriptor(curr_sg, sg); |
3142 | len = sg_dma_len(sg); | ||
3143 | curr_sg->Addr = cpu_to_le64(addr64); | ||
3144 | curr_sg->Len = cpu_to_le32(len); | ||
3145 | curr_sg->Ext = cpu_to_le32(0); | ||
3146 | curr_sg++; | 3286 | curr_sg++; |
3147 | } | 3287 | } |
3288 | |||
3289 | /* Back the pointer up to the last entry and mark it as "last". */ | ||
3148 | (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); | 3290 | (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); |
3149 | 3291 | ||
3150 | if (use_sg + chained > h->maxSG) | 3292 | if (use_sg + chained > h->maxSG) |
@@ -3163,7 +3305,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, | |||
3163 | sglist_finished: | 3305 | sglist_finished: |
3164 | 3306 | ||
3165 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ | 3307 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
3166 | cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in this cmd list */ | 3308 | cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ |
3167 | return 0; | 3309 | return 0; |
3168 | } | 3310 | } |
3169 | 3311 | ||
@@ -3217,7 +3359,7 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) | |||
3217 | 3359 | ||
3218 | static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, | 3360 | static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, |
3219 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | 3361 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
3220 | u8 *scsi3addr) | 3362 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
3221 | { | 3363 | { |
3222 | struct scsi_cmnd *cmd = c->scsi_cmd; | 3364 | struct scsi_cmnd *cmd = c->scsi_cmd; |
3223 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; | 3365 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; |
@@ -3230,13 +3372,17 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, | |||
3230 | u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; | 3372 | u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; |
3231 | 3373 | ||
3232 | /* TODO: implement chaining support */ | 3374 | /* TODO: implement chaining support */ |
3233 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) | 3375 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { |
3376 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3234 | return IO_ACCEL_INELIGIBLE; | 3377 | return IO_ACCEL_INELIGIBLE; |
3378 | } | ||
3235 | 3379 | ||
3236 | BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); | 3380 | BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); |
3237 | 3381 | ||
3238 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) | 3382 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) { |
3383 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3239 | return IO_ACCEL_INELIGIBLE; | 3384 | return IO_ACCEL_INELIGIBLE; |
3385 | } | ||
3240 | 3386 | ||
3241 | c->cmd_type = CMD_IOACCEL1; | 3387 | c->cmd_type = CMD_IOACCEL1; |
3242 | 3388 | ||
@@ -3246,8 +3392,10 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, | |||
3246 | BUG_ON(c->busaddr & 0x0000007F); | 3392 | BUG_ON(c->busaddr & 0x0000007F); |
3247 | 3393 | ||
3248 | use_sg = scsi_dma_map(cmd); | 3394 | use_sg = scsi_dma_map(cmd); |
3249 | if (use_sg < 0) | 3395 | if (use_sg < 0) { |
3396 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3250 | return use_sg; | 3397 | return use_sg; |
3398 | } | ||
3251 | 3399 | ||
3252 | if (use_sg) { | 3400 | if (use_sg) { |
3253 | curr_sg = cp->SG; | 3401 | curr_sg = cp->SG; |
@@ -3284,11 +3432,11 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, | |||
3284 | 3432 | ||
3285 | c->Header.SGList = use_sg; | 3433 | c->Header.SGList = use_sg; |
3286 | /* Fill out the command structure to submit */ | 3434 | /* Fill out the command structure to submit */ |
3287 | cp->dev_handle = ioaccel_handle & 0xFFFF; | 3435 | cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); |
3288 | cp->transfer_len = total_len; | 3436 | cp->transfer_len = cpu_to_le32(total_len); |
3289 | cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | | 3437 | cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | |
3290 | (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); | 3438 | (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); |
3291 | cp->control = control; | 3439 | cp->control = cpu_to_le32(control); |
3292 | memcpy(cp->CDB, cdb, cdb_len); | 3440 | memcpy(cp->CDB, cdb, cdb_len); |
3293 | memcpy(cp->CISS_LUN, scsi3addr, 8); | 3441 | memcpy(cp->CISS_LUN, scsi3addr, 8); |
3294 | /* Tag was already set at init time. */ | 3442 | /* Tag was already set at init time. */ |
@@ -3306,8 +3454,10 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, | |||
3306 | struct scsi_cmnd *cmd = c->scsi_cmd; | 3454 | struct scsi_cmnd *cmd = c->scsi_cmd; |
3307 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; | 3455 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; |
3308 | 3456 | ||
3457 | c->phys_disk = dev; | ||
3458 | |||
3309 | return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, | 3459 | return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, |
3310 | cmd->cmnd, cmd->cmd_len, dev->scsi3addr); | 3460 | cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); |
3311 | } | 3461 | } |
3312 | 3462 | ||
3313 | /* | 3463 | /* |
@@ -3321,10 +3471,8 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h, | |||
3321 | struct raid_map_data *map = &dev->raid_map; | 3471 | struct raid_map_data *map = &dev->raid_map; |
3322 | u64 first_block; | 3472 | u64 first_block; |
3323 | 3473 | ||
3324 | BUG_ON(!(dev->offload_config && dev->offload_enabled)); | ||
3325 | |||
3326 | /* Are we doing encryption on this device */ | 3474 | /* Are we doing encryption on this device */ |
3327 | if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON)) | 3475 | if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) |
3328 | return; | 3476 | return; |
3329 | /* Set the data encryption key index. */ | 3477 | /* Set the data encryption key index. */ |
3330 | cp->dekindex = map->dekindex; | 3478 | cp->dekindex = map->dekindex; |
@@ -3340,101 +3488,38 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h, | |||
3340 | /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ | 3488 | /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ |
3341 | case WRITE_6: | 3489 | case WRITE_6: |
3342 | case READ_6: | 3490 | case READ_6: |
3343 | if (map->volume_blk_size == 512) { | 3491 | first_block = get_unaligned_be16(&cmd->cmnd[2]); |
3344 | cp->tweak_lower = | ||
3345 | (((u32) cmd->cmnd[2]) << 8) | | ||
3346 | cmd->cmnd[3]; | ||
3347 | cp->tweak_upper = 0; | ||
3348 | } else { | ||
3349 | first_block = | ||
3350 | (((u64) cmd->cmnd[2]) << 8) | | ||
3351 | cmd->cmnd[3]; | ||
3352 | first_block = (first_block * map->volume_blk_size)/512; | ||
3353 | cp->tweak_lower = (u32)first_block; | ||
3354 | cp->tweak_upper = (u32)(first_block >> 32); | ||
3355 | } | ||
3356 | break; | 3492 | break; |
3357 | case WRITE_10: | 3493 | case WRITE_10: |
3358 | case READ_10: | 3494 | case READ_10: |
3359 | if (map->volume_blk_size == 512) { | ||
3360 | cp->tweak_lower = | ||
3361 | (((u32) cmd->cmnd[2]) << 24) | | ||
3362 | (((u32) cmd->cmnd[3]) << 16) | | ||
3363 | (((u32) cmd->cmnd[4]) << 8) | | ||
3364 | cmd->cmnd[5]; | ||
3365 | cp->tweak_upper = 0; | ||
3366 | } else { | ||
3367 | first_block = | ||
3368 | (((u64) cmd->cmnd[2]) << 24) | | ||
3369 | (((u64) cmd->cmnd[3]) << 16) | | ||
3370 | (((u64) cmd->cmnd[4]) << 8) | | ||
3371 | cmd->cmnd[5]; | ||
3372 | first_block = (first_block * map->volume_blk_size)/512; | ||
3373 | cp->tweak_lower = (u32)first_block; | ||
3374 | cp->tweak_upper = (u32)(first_block >> 32); | ||
3375 | } | ||
3376 | break; | ||
3377 | /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ | 3495 | /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ |
3378 | case WRITE_12: | 3496 | case WRITE_12: |
3379 | case READ_12: | 3497 | case READ_12: |
3380 | if (map->volume_blk_size == 512) { | 3498 | first_block = get_unaligned_be32(&cmd->cmnd[2]); |
3381 | cp->tweak_lower = | ||
3382 | (((u32) cmd->cmnd[2]) << 24) | | ||
3383 | (((u32) cmd->cmnd[3]) << 16) | | ||
3384 | (((u32) cmd->cmnd[4]) << 8) | | ||
3385 | cmd->cmnd[5]; | ||
3386 | cp->tweak_upper = 0; | ||
3387 | } else { | ||
3388 | first_block = | ||
3389 | (((u64) cmd->cmnd[2]) << 24) | | ||
3390 | (((u64) cmd->cmnd[3]) << 16) | | ||
3391 | (((u64) cmd->cmnd[4]) << 8) | | ||
3392 | cmd->cmnd[5]; | ||
3393 | first_block = (first_block * map->volume_blk_size)/512; | ||
3394 | cp->tweak_lower = (u32)first_block; | ||
3395 | cp->tweak_upper = (u32)(first_block >> 32); | ||
3396 | } | ||
3397 | break; | 3499 | break; |
3398 | case WRITE_16: | 3500 | case WRITE_16: |
3399 | case READ_16: | 3501 | case READ_16: |
3400 | if (map->volume_blk_size == 512) { | 3502 | first_block = get_unaligned_be64(&cmd->cmnd[2]); |
3401 | cp->tweak_lower = | ||
3402 | (((u32) cmd->cmnd[6]) << 24) | | ||
3403 | (((u32) cmd->cmnd[7]) << 16) | | ||
3404 | (((u32) cmd->cmnd[8]) << 8) | | ||
3405 | cmd->cmnd[9]; | ||
3406 | cp->tweak_upper = | ||
3407 | (((u32) cmd->cmnd[2]) << 24) | | ||
3408 | (((u32) cmd->cmnd[3]) << 16) | | ||
3409 | (((u32) cmd->cmnd[4]) << 8) | | ||
3410 | cmd->cmnd[5]; | ||
3411 | } else { | ||
3412 | first_block = | ||
3413 | (((u64) cmd->cmnd[2]) << 56) | | ||
3414 | (((u64) cmd->cmnd[3]) << 48) | | ||
3415 | (((u64) cmd->cmnd[4]) << 40) | | ||
3416 | (((u64) cmd->cmnd[5]) << 32) | | ||
3417 | (((u64) cmd->cmnd[6]) << 24) | | ||
3418 | (((u64) cmd->cmnd[7]) << 16) | | ||
3419 | (((u64) cmd->cmnd[8]) << 8) | | ||
3420 | cmd->cmnd[9]; | ||
3421 | first_block = (first_block * map->volume_blk_size)/512; | ||
3422 | cp->tweak_lower = (u32)first_block; | ||
3423 | cp->tweak_upper = (u32)(first_block >> 32); | ||
3424 | } | ||
3425 | break; | 3503 | break; |
3426 | default: | 3504 | default: |
3427 | dev_err(&h->pdev->dev, | 3505 | dev_err(&h->pdev->dev, |
3428 | "ERROR: %s: IOACCEL request CDB size not supported for encryption\n", | 3506 | "ERROR: %s: size (0x%x) not supported for encryption\n", |
3429 | __func__); | 3507 | __func__, cmd->cmnd[0]); |
3430 | BUG(); | 3508 | BUG(); |
3431 | break; | 3509 | break; |
3432 | } | 3510 | } |
3511 | |||
3512 | if (le32_to_cpu(map->volume_blk_size) != 512) | ||
3513 | first_block = first_block * | ||
3514 | le32_to_cpu(map->volume_blk_size)/512; | ||
3515 | |||
3516 | cp->tweak_lower = cpu_to_le32(first_block); | ||
3517 | cp->tweak_upper = cpu_to_le32(first_block >> 32); | ||
3433 | } | 3518 | } |
3434 | 3519 | ||
3435 | static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | 3520 | static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, |
3436 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | 3521 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
3437 | u8 *scsi3addr) | 3522 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
3438 | { | 3523 | { |
3439 | struct scsi_cmnd *cmd = c->scsi_cmd; | 3524 | struct scsi_cmnd *cmd = c->scsi_cmd; |
3440 | struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; | 3525 | struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; |
@@ -3445,11 +3530,16 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
3445 | u32 len; | 3530 | u32 len; |
3446 | u32 total_len = 0; | 3531 | u32 total_len = 0; |
3447 | 3532 | ||
3448 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) | 3533 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { |
3534 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3449 | return IO_ACCEL_INELIGIBLE; | 3535 | return IO_ACCEL_INELIGIBLE; |
3536 | } | ||
3450 | 3537 | ||
3451 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) | 3538 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) { |
3539 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3452 | return IO_ACCEL_INELIGIBLE; | 3540 | return IO_ACCEL_INELIGIBLE; |
3541 | } | ||
3542 | |||
3453 | c->cmd_type = CMD_IOACCEL2; | 3543 | c->cmd_type = CMD_IOACCEL2; |
3454 | /* Adjust the DMA address to point to the accelerated command buffer */ | 3544 | /* Adjust the DMA address to point to the accelerated command buffer */ |
3455 | c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + | 3545 | c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + |
@@ -3460,8 +3550,10 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
3460 | cp->IU_type = IOACCEL2_IU_TYPE; | 3550 | cp->IU_type = IOACCEL2_IU_TYPE; |
3461 | 3551 | ||
3462 | use_sg = scsi_dma_map(cmd); | 3552 | use_sg = scsi_dma_map(cmd); |
3463 | if (use_sg < 0) | 3553 | if (use_sg < 0) { |
3554 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3464 | return use_sg; | 3555 | return use_sg; |
3556 | } | ||
3465 | 3557 | ||
3466 | if (use_sg) { | 3558 | if (use_sg) { |
3467 | BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); | 3559 | BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); |
@@ -3506,9 +3598,8 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
3506 | /* Set encryption parameters, if necessary */ | 3598 | /* Set encryption parameters, if necessary */ |
3507 | set_encrypt_ioaccel2(h, c, cp); | 3599 | set_encrypt_ioaccel2(h, c, cp); |
3508 | 3600 | ||
3509 | cp->scsi_nexus = ioaccel_handle; | 3601 | cp->scsi_nexus = cpu_to_le32(ioaccel_handle); |
3510 | cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | | 3602 | cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); |
3511 | DIRECT_LOOKUP_BIT; | ||
3512 | memcpy(cp->cdb, cdb, sizeof(cp->cdb)); | 3603 | memcpy(cp->cdb, cdb, sizeof(cp->cdb)); |
3513 | 3604 | ||
3514 | /* fill in sg elements */ | 3605 | /* fill in sg elements */ |
@@ -3528,14 +3619,22 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, | |||
3528 | */ | 3619 | */ |
3529 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, | 3620 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, |
3530 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | 3621 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
3531 | u8 *scsi3addr) | 3622 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
3532 | { | 3623 | { |
3624 | /* Try to honor the device's queue depth */ | ||
3625 | if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > | ||
3626 | phys_disk->queue_depth) { | ||
3627 | atomic_dec(&phys_disk->ioaccel_cmds_out); | ||
3628 | return IO_ACCEL_INELIGIBLE; | ||
3629 | } | ||
3533 | if (h->transMethod & CFGTBL_Trans_io_accel1) | 3630 | if (h->transMethod & CFGTBL_Trans_io_accel1) |
3534 | return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, | 3631 | return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, |
3535 | cdb, cdb_len, scsi3addr); | 3632 | cdb, cdb_len, scsi3addr, |
3633 | phys_disk); | ||
3536 | else | 3634 | else |
3537 | return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, | 3635 | return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, |
3538 | cdb, cdb_len, scsi3addr); | 3636 | cdb, cdb_len, scsi3addr, |
3637 | phys_disk); | ||
3539 | } | 3638 | } |
3540 | 3639 | ||
3541 | static void raid_map_helper(struct raid_map_data *map, | 3640 | static void raid_map_helper(struct raid_map_data *map, |
@@ -3543,21 +3642,22 @@ static void raid_map_helper(struct raid_map_data *map, | |||
3543 | { | 3642 | { |
3544 | if (offload_to_mirror == 0) { | 3643 | if (offload_to_mirror == 0) { |
3545 | /* use physical disk in the first mirrored group. */ | 3644 | /* use physical disk in the first mirrored group. */ |
3546 | *map_index %= map->data_disks_per_row; | 3645 | *map_index %= le16_to_cpu(map->data_disks_per_row); |
3547 | return; | 3646 | return; |
3548 | } | 3647 | } |
3549 | do { | 3648 | do { |
3550 | /* determine mirror group that *map_index indicates */ | 3649 | /* determine mirror group that *map_index indicates */ |
3551 | *current_group = *map_index / map->data_disks_per_row; | 3650 | *current_group = *map_index / |
3651 | le16_to_cpu(map->data_disks_per_row); | ||
3552 | if (offload_to_mirror == *current_group) | 3652 | if (offload_to_mirror == *current_group) |
3553 | continue; | 3653 | continue; |
3554 | if (*current_group < (map->layout_map_count - 1)) { | 3654 | if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { |
3555 | /* select map index from next group */ | 3655 | /* select map index from next group */ |
3556 | *map_index += map->data_disks_per_row; | 3656 | *map_index += le16_to_cpu(map->data_disks_per_row); |
3557 | (*current_group)++; | 3657 | (*current_group)++; |
3558 | } else { | 3658 | } else { |
3559 | /* select map index from first group */ | 3659 | /* select map index from first group */ |
3560 | *map_index %= map->data_disks_per_row; | 3660 | *map_index %= le16_to_cpu(map->data_disks_per_row); |
3561 | *current_group = 0; | 3661 | *current_group = 0; |
3562 | } | 3662 | } |
3563 | } while (offload_to_mirror != *current_group); | 3663 | } while (offload_to_mirror != *current_group); |
@@ -3595,13 +3695,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3595 | u32 disk_block_cnt; | 3695 | u32 disk_block_cnt; |
3596 | u8 cdb[16]; | 3696 | u8 cdb[16]; |
3597 | u8 cdb_len; | 3697 | u8 cdb_len; |
3698 | u16 strip_size; | ||
3598 | #if BITS_PER_LONG == 32 | 3699 | #if BITS_PER_LONG == 32 |
3599 | u64 tmpdiv; | 3700 | u64 tmpdiv; |
3600 | #endif | 3701 | #endif |
3601 | int offload_to_mirror; | 3702 | int offload_to_mirror; |
3602 | 3703 | ||
3603 | BUG_ON(!(dev->offload_config && dev->offload_enabled)); | ||
3604 | |||
3605 | /* check for valid opcode, get LBA and block count */ | 3704 | /* check for valid opcode, get LBA and block count */ |
3606 | switch (cmd->cmnd[0]) { | 3705 | switch (cmd->cmnd[0]) { |
3607 | case WRITE_6: | 3706 | case WRITE_6: |
@@ -3668,11 +3767,14 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3668 | return IO_ACCEL_INELIGIBLE; | 3767 | return IO_ACCEL_INELIGIBLE; |
3669 | 3768 | ||
3670 | /* check for invalid block or wraparound */ | 3769 | /* check for invalid block or wraparound */ |
3671 | if (last_block >= map->volume_blk_cnt || last_block < first_block) | 3770 | if (last_block >= le64_to_cpu(map->volume_blk_cnt) || |
3771 | last_block < first_block) | ||
3672 | return IO_ACCEL_INELIGIBLE; | 3772 | return IO_ACCEL_INELIGIBLE; |
3673 | 3773 | ||
3674 | /* calculate stripe information for the request */ | 3774 | /* calculate stripe information for the request */ |
3675 | blocks_per_row = map->data_disks_per_row * map->strip_size; | 3775 | blocks_per_row = le16_to_cpu(map->data_disks_per_row) * |
3776 | le16_to_cpu(map->strip_size); | ||
3777 | strip_size = le16_to_cpu(map->strip_size); | ||
3676 | #if BITS_PER_LONG == 32 | 3778 | #if BITS_PER_LONG == 32 |
3677 | tmpdiv = first_block; | 3779 | tmpdiv = first_block; |
3678 | (void) do_div(tmpdiv, blocks_per_row); | 3780 | (void) do_div(tmpdiv, blocks_per_row); |
@@ -3683,18 +3785,18 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3683 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); | 3785 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); |
3684 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); | 3786 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); |
3685 | tmpdiv = first_row_offset; | 3787 | tmpdiv = first_row_offset; |
3686 | (void) do_div(tmpdiv, map->strip_size); | 3788 | (void) do_div(tmpdiv, strip_size); |
3687 | first_column = tmpdiv; | 3789 | first_column = tmpdiv; |
3688 | tmpdiv = last_row_offset; | 3790 | tmpdiv = last_row_offset; |
3689 | (void) do_div(tmpdiv, map->strip_size); | 3791 | (void) do_div(tmpdiv, strip_size); |
3690 | last_column = tmpdiv; | 3792 | last_column = tmpdiv; |
3691 | #else | 3793 | #else |
3692 | first_row = first_block / blocks_per_row; | 3794 | first_row = first_block / blocks_per_row; |
3693 | last_row = last_block / blocks_per_row; | 3795 | last_row = last_block / blocks_per_row; |
3694 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); | 3796 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); |
3695 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); | 3797 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); |
3696 | first_column = first_row_offset / map->strip_size; | 3798 | first_column = first_row_offset / strip_size; |
3697 | last_column = last_row_offset / map->strip_size; | 3799 | last_column = last_row_offset / strip_size; |
3698 | #endif | 3800 | #endif |
3699 | 3801 | ||
3700 | /* if this isn't a single row/column then give to the controller */ | 3802 | /* if this isn't a single row/column then give to the controller */ |
@@ -3702,10 +3804,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3702 | return IO_ACCEL_INELIGIBLE; | 3804 | return IO_ACCEL_INELIGIBLE; |
3703 | 3805 | ||
3704 | /* proceeding with driver mapping */ | 3806 | /* proceeding with driver mapping */ |
3705 | total_disks_per_row = map->data_disks_per_row + | 3807 | total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + |
3706 | map->metadata_disks_per_row; | 3808 | le16_to_cpu(map->metadata_disks_per_row); |
3707 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % | 3809 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % |
3708 | map->row_cnt; | 3810 | le16_to_cpu(map->row_cnt); |
3709 | map_index = (map_row * total_disks_per_row) + first_column; | 3811 | map_index = (map_row * total_disks_per_row) + first_column; |
3710 | 3812 | ||
3711 | switch (dev->raid_level) { | 3813 | switch (dev->raid_level) { |
@@ -3716,23 +3818,24 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3716 | * (2-drive R1 and R10 with even # of drives.) | 3818 | * (2-drive R1 and R10 with even # of drives.) |
3717 | * Appropriate for SSDs, not optimal for HDDs | 3819 | * Appropriate for SSDs, not optimal for HDDs |
3718 | */ | 3820 | */ |
3719 | BUG_ON(map->layout_map_count != 2); | 3821 | BUG_ON(le16_to_cpu(map->layout_map_count) != 2); |
3720 | if (dev->offload_to_mirror) | 3822 | if (dev->offload_to_mirror) |
3721 | map_index += map->data_disks_per_row; | 3823 | map_index += le16_to_cpu(map->data_disks_per_row); |
3722 | dev->offload_to_mirror = !dev->offload_to_mirror; | 3824 | dev->offload_to_mirror = !dev->offload_to_mirror; |
3723 | break; | 3825 | break; |
3724 | case HPSA_RAID_ADM: | 3826 | case HPSA_RAID_ADM: |
3725 | /* Handles N-way mirrors (R1-ADM) | 3827 | /* Handles N-way mirrors (R1-ADM) |
3726 | * and R10 with # of drives divisible by 3.) | 3828 | * and R10 with # of drives divisible by 3.) |
3727 | */ | 3829 | */ |
3728 | BUG_ON(map->layout_map_count != 3); | 3830 | BUG_ON(le16_to_cpu(map->layout_map_count) != 3); |
3729 | 3831 | ||
3730 | offload_to_mirror = dev->offload_to_mirror; | 3832 | offload_to_mirror = dev->offload_to_mirror; |
3731 | raid_map_helper(map, offload_to_mirror, | 3833 | raid_map_helper(map, offload_to_mirror, |
3732 | &map_index, ¤t_group); | 3834 | &map_index, ¤t_group); |
3733 | /* set mirror group to use next time */ | 3835 | /* set mirror group to use next time */ |
3734 | offload_to_mirror = | 3836 | offload_to_mirror = |
3735 | (offload_to_mirror >= map->layout_map_count - 1) | 3837 | (offload_to_mirror >= |
3838 | le16_to_cpu(map->layout_map_count) - 1) | ||
3736 | ? 0 : offload_to_mirror + 1; | 3839 | ? 0 : offload_to_mirror + 1; |
3737 | dev->offload_to_mirror = offload_to_mirror; | 3840 | dev->offload_to_mirror = offload_to_mirror; |
3738 | /* Avoid direct use of dev->offload_to_mirror within this | 3841 | /* Avoid direct use of dev->offload_to_mirror within this |
@@ -3742,14 +3845,16 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3742 | break; | 3845 | break; |
3743 | case HPSA_RAID_5: | 3846 | case HPSA_RAID_5: |
3744 | case HPSA_RAID_6: | 3847 | case HPSA_RAID_6: |
3745 | if (map->layout_map_count <= 1) | 3848 | if (le16_to_cpu(map->layout_map_count) <= 1) |
3746 | break; | 3849 | break; |
3747 | 3850 | ||
3748 | /* Verify first and last block are in same RAID group */ | 3851 | /* Verify first and last block are in same RAID group */ |
3749 | r5or6_blocks_per_row = | 3852 | r5or6_blocks_per_row = |
3750 | map->strip_size * map->data_disks_per_row; | 3853 | le16_to_cpu(map->strip_size) * |
3854 | le16_to_cpu(map->data_disks_per_row); | ||
3751 | BUG_ON(r5or6_blocks_per_row == 0); | 3855 | BUG_ON(r5or6_blocks_per_row == 0); |
3752 | stripesize = r5or6_blocks_per_row * map->layout_map_count; | 3856 | stripesize = r5or6_blocks_per_row * |
3857 | le16_to_cpu(map->layout_map_count); | ||
3753 | #if BITS_PER_LONG == 32 | 3858 | #if BITS_PER_LONG == 32 |
3754 | tmpdiv = first_block; | 3859 | tmpdiv = first_block; |
3755 | first_group = do_div(tmpdiv, stripesize); | 3860 | first_group = do_div(tmpdiv, stripesize); |
@@ -3812,28 +3917,35 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3812 | r5or6_blocks_per_row); | 3917 | r5or6_blocks_per_row); |
3813 | 3918 | ||
3814 | first_column = r5or6_first_column = | 3919 | first_column = r5or6_first_column = |
3815 | r5or6_first_row_offset / map->strip_size; | 3920 | r5or6_first_row_offset / le16_to_cpu(map->strip_size); |
3816 | r5or6_last_column = | 3921 | r5or6_last_column = |
3817 | r5or6_last_row_offset / map->strip_size; | 3922 | r5or6_last_row_offset / le16_to_cpu(map->strip_size); |
3818 | #endif | 3923 | #endif |
3819 | if (r5or6_first_column != r5or6_last_column) | 3924 | if (r5or6_first_column != r5or6_last_column) |
3820 | return IO_ACCEL_INELIGIBLE; | 3925 | return IO_ACCEL_INELIGIBLE; |
3821 | 3926 | ||
3822 | /* Request is eligible */ | 3927 | /* Request is eligible */ |
3823 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % | 3928 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % |
3824 | map->row_cnt; | 3929 | le16_to_cpu(map->row_cnt); |
3825 | 3930 | ||
3826 | map_index = (first_group * | 3931 | map_index = (first_group * |
3827 | (map->row_cnt * total_disks_per_row)) + | 3932 | (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + |
3828 | (map_row * total_disks_per_row) + first_column; | 3933 | (map_row * total_disks_per_row) + first_column; |
3829 | break; | 3934 | break; |
3830 | default: | 3935 | default: |
3831 | return IO_ACCEL_INELIGIBLE; | 3936 | return IO_ACCEL_INELIGIBLE; |
3832 | } | 3937 | } |
3833 | 3938 | ||
3939 | if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) | ||
3940 | return IO_ACCEL_INELIGIBLE; | ||
3941 | |||
3942 | c->phys_disk = dev->phys_disk[map_index]; | ||
3943 | |||
3834 | disk_handle = dd[map_index].ioaccel_handle; | 3944 | disk_handle = dd[map_index].ioaccel_handle; |
3835 | disk_block = map->disk_starting_blk + (first_row * map->strip_size) + | 3945 | disk_block = le64_to_cpu(map->disk_starting_blk) + |
3836 | (first_row_offset - (first_column * map->strip_size)); | 3946 | first_row * le16_to_cpu(map->strip_size) + |
3947 | (first_row_offset - first_column * | ||
3948 | le16_to_cpu(map->strip_size)); | ||
3837 | disk_block_cnt = block_cnt; | 3949 | disk_block_cnt = block_cnt; |
3838 | 3950 | ||
3839 | /* handle differing logical/physical block sizes */ | 3951 | /* handle differing logical/physical block sizes */ |
@@ -3876,78 +3988,21 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |||
3876 | cdb_len = 10; | 3988 | cdb_len = 10; |
3877 | } | 3989 | } |
3878 | return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, | 3990 | return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, |
3879 | dev->scsi3addr); | 3991 | dev->scsi3addr, |
3992 | dev->phys_disk[map_index]); | ||
3880 | } | 3993 | } |
3881 | 3994 | ||
3882 | /* | 3995 | /* Submit commands down the "normal" RAID stack path */ |
3883 | * Running in struct Scsi_Host->host_lock less mode using LLD internal | 3996 | static int hpsa_ciss_submit(struct ctlr_info *h, |
3884 | * struct ctlr_info *h->lock w/ spin_lock_irqsave() protection. | 3997 | struct CommandList *c, struct scsi_cmnd *cmd, |
3885 | */ | 3998 | unsigned char scsi3addr[]) |
3886 | static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | ||
3887 | { | 3999 | { |
3888 | struct ctlr_info *h; | ||
3889 | struct hpsa_scsi_dev_t *dev; | ||
3890 | unsigned char scsi3addr[8]; | ||
3891 | struct CommandList *c; | ||
3892 | int rc = 0; | ||
3893 | |||
3894 | /* Get the ptr to our adapter structure out of cmd->host. */ | ||
3895 | h = sdev_to_hba(cmd->device); | ||
3896 | dev = cmd->device->hostdata; | ||
3897 | if (!dev) { | ||
3898 | cmd->result = DID_NO_CONNECT << 16; | ||
3899 | cmd->scsi_done(cmd); | ||
3900 | return 0; | ||
3901 | } | ||
3902 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | ||
3903 | |||
3904 | if (unlikely(lockup_detected(h))) { | ||
3905 | cmd->result = DID_ERROR << 16; | ||
3906 | cmd->scsi_done(cmd); | ||
3907 | return 0; | ||
3908 | } | ||
3909 | c = cmd_alloc(h); | ||
3910 | if (c == NULL) { /* trouble... */ | ||
3911 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); | ||
3912 | return SCSI_MLQUEUE_HOST_BUSY; | ||
3913 | } | ||
3914 | |||
3915 | /* Fill in the command list header */ | ||
3916 | /* save c in case we have to abort it */ | ||
3917 | cmd->host_scribble = (unsigned char *) c; | 4000 | cmd->host_scribble = (unsigned char *) c; |
3918 | |||
3919 | c->cmd_type = CMD_SCSI; | 4001 | c->cmd_type = CMD_SCSI; |
3920 | c->scsi_cmd = cmd; | 4002 | c->scsi_cmd = cmd; |
3921 | |||
3922 | /* Call alternate submit routine for I/O accelerated commands. | ||
3923 | * Retries always go down the normal I/O path. | ||
3924 | */ | ||
3925 | if (likely(cmd->retries == 0 && | ||
3926 | cmd->request->cmd_type == REQ_TYPE_FS && | ||
3927 | h->acciopath_status)) { | ||
3928 | if (dev->offload_enabled) { | ||
3929 | rc = hpsa_scsi_ioaccel_raid_map(h, c); | ||
3930 | if (rc == 0) | ||
3931 | return 0; /* Sent on ioaccel path */ | ||
3932 | if (rc < 0) { /* scsi_dma_map failed. */ | ||
3933 | cmd_free(h, c); | ||
3934 | return SCSI_MLQUEUE_HOST_BUSY; | ||
3935 | } | ||
3936 | } else if (dev->ioaccel_handle) { | ||
3937 | rc = hpsa_scsi_ioaccel_direct_map(h, c); | ||
3938 | if (rc == 0) | ||
3939 | return 0; /* Sent on direct map path */ | ||
3940 | if (rc < 0) { /* scsi_dma_map failed. */ | ||
3941 | cmd_free(h, c); | ||
3942 | return SCSI_MLQUEUE_HOST_BUSY; | ||
3943 | } | ||
3944 | } | ||
3945 | } | ||
3946 | |||
3947 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | 4003 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
3948 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | 4004 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); |
3949 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT) | | 4005 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); |
3950 | DIRECT_LOOKUP_BIT); | ||
3951 | 4006 | ||
3952 | /* Fill in the request block... */ | 4007 | /* Fill in the request block... */ |
3953 | 4008 | ||
@@ -4003,25 +4058,108 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
4003 | return 0; | 4058 | return 0; |
4004 | } | 4059 | } |
4005 | 4060 | ||
4006 | static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) | 4061 | static void hpsa_command_resubmit_worker(struct work_struct *work) |
4007 | { | 4062 | { |
4008 | unsigned long flags; | 4063 | struct scsi_cmnd *cmd; |
4064 | struct hpsa_scsi_dev_t *dev; | ||
4065 | struct CommandList *c = | ||
4066 | container_of(work, struct CommandList, work); | ||
4067 | |||
4068 | cmd = c->scsi_cmd; | ||
4069 | dev = cmd->device->hostdata; | ||
4070 | if (!dev) { | ||
4071 | cmd->result = DID_NO_CONNECT << 16; | ||
4072 | cmd->scsi_done(cmd); | ||
4073 | return; | ||
4074 | } | ||
4075 | if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { | ||
4076 | /* | ||
4077 | * If we get here, it means dma mapping failed. Try | ||
4078 | * again via scsi mid layer, which will then get | ||
4079 | * SCSI_MLQUEUE_HOST_BUSY. | ||
4080 | */ | ||
4081 | cmd->result = DID_IMM_RETRY << 16; | ||
4082 | cmd->scsi_done(cmd); | ||
4083 | } | ||
4084 | } | ||
4085 | |||
4086 | /* Running in struct Scsi_Host->host_lock less mode */ | ||
4087 | static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | ||
4088 | { | ||
4089 | struct ctlr_info *h; | ||
4090 | struct hpsa_scsi_dev_t *dev; | ||
4091 | unsigned char scsi3addr[8]; | ||
4092 | struct CommandList *c; | ||
4093 | int rc = 0; | ||
4094 | |||
4095 | /* Get the ptr to our adapter structure out of cmd->host. */ | ||
4096 | h = sdev_to_hba(cmd->device); | ||
4097 | dev = cmd->device->hostdata; | ||
4098 | if (!dev) { | ||
4099 | cmd->result = DID_NO_CONNECT << 16; | ||
4100 | cmd->scsi_done(cmd); | ||
4101 | return 0; | ||
4102 | } | ||
4103 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | ||
4104 | |||
4105 | if (unlikely(lockup_detected(h))) { | ||
4106 | cmd->result = DID_ERROR << 16; | ||
4107 | cmd->scsi_done(cmd); | ||
4108 | return 0; | ||
4109 | } | ||
4110 | c = cmd_alloc(h); | ||
4111 | if (c == NULL) { /* trouble... */ | ||
4112 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); | ||
4113 | return SCSI_MLQUEUE_HOST_BUSY; | ||
4114 | } | ||
4115 | if (unlikely(lockup_detected(h))) { | ||
4116 | cmd->result = DID_ERROR << 16; | ||
4117 | cmd_free(h, c); | ||
4118 | cmd->scsi_done(cmd); | ||
4119 | return 0; | ||
4120 | } | ||
4009 | 4121 | ||
4010 | /* | 4122 | /* |
4011 | * Don't let rescans be initiated on a controller known | 4123 | * Call alternate submit routine for I/O accelerated commands. |
4012 | * to be locked up. If the controller locks up *during* | 4124 | * Retries always go down the normal I/O path. |
4013 | * a rescan, that thread is probably hosed, but at least | ||
4014 | * we can prevent new rescan threads from piling up on a | ||
4015 | * locked up controller. | ||
4016 | */ | 4125 | */ |
4017 | if (unlikely(lockup_detected(h))) { | 4126 | if (likely(cmd->retries == 0 && |
4018 | spin_lock_irqsave(&h->scan_lock, flags); | 4127 | cmd->request->cmd_type == REQ_TYPE_FS && |
4019 | h->scan_finished = 1; | 4128 | h->acciopath_status)) { |
4020 | wake_up_all(&h->scan_wait_queue); | 4129 | |
4021 | spin_unlock_irqrestore(&h->scan_lock, flags); | 4130 | cmd->host_scribble = (unsigned char *) c; |
4022 | return 1; | 4131 | c->cmd_type = CMD_SCSI; |
4132 | c->scsi_cmd = cmd; | ||
4133 | |||
4134 | if (dev->offload_enabled) { | ||
4135 | rc = hpsa_scsi_ioaccel_raid_map(h, c); | ||
4136 | if (rc == 0) | ||
4137 | return 0; /* Sent on ioaccel path */ | ||
4138 | if (rc < 0) { /* scsi_dma_map failed. */ | ||
4139 | cmd_free(h, c); | ||
4140 | return SCSI_MLQUEUE_HOST_BUSY; | ||
4141 | } | ||
4142 | } else if (dev->ioaccel_handle) { | ||
4143 | rc = hpsa_scsi_ioaccel_direct_map(h, c); | ||
4144 | if (rc == 0) | ||
4145 | return 0; /* Sent on direct map path */ | ||
4146 | if (rc < 0) { /* scsi_dma_map failed. */ | ||
4147 | cmd_free(h, c); | ||
4148 | return SCSI_MLQUEUE_HOST_BUSY; | ||
4149 | } | ||
4150 | } | ||
4023 | } | 4151 | } |
4024 | return 0; | 4152 | return hpsa_ciss_submit(h, c, cmd, scsi3addr); |
4153 | } | ||
4154 | |||
4155 | static void hpsa_scan_complete(struct ctlr_info *h) | ||
4156 | { | ||
4157 | unsigned long flags; | ||
4158 | |||
4159 | spin_lock_irqsave(&h->scan_lock, flags); | ||
4160 | h->scan_finished = 1; | ||
4161 | wake_up_all(&h->scan_wait_queue); | ||
4162 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
4025 | } | 4163 | } |
4026 | 4164 | ||
4027 | static void hpsa_scan_start(struct Scsi_Host *sh) | 4165 | static void hpsa_scan_start(struct Scsi_Host *sh) |
@@ -4029,8 +4167,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh) | |||
4029 | struct ctlr_info *h = shost_to_hba(sh); | 4167 | struct ctlr_info *h = shost_to_hba(sh); |
4030 | unsigned long flags; | 4168 | unsigned long flags; |
4031 | 4169 | ||
4032 | if (do_not_scan_if_controller_locked_up(h)) | 4170 | /* |
4033 | return; | 4171 | * Don't let rescans be initiated on a controller known to be locked |
4172 | * up. If the controller locks up *during* a rescan, that thread is | ||
4173 | * probably hosed, but at least we can prevent new rescan threads from | ||
4174 | * piling up on a locked up controller. | ||
4175 | */ | ||
4176 | if (unlikely(lockup_detected(h))) | ||
4177 | return hpsa_scan_complete(h); | ||
4034 | 4178 | ||
4035 | /* wait until any scan already in progress is finished. */ | 4179 | /* wait until any scan already in progress is finished. */ |
4036 | while (1) { | 4180 | while (1) { |
@@ -4048,15 +4192,27 @@ static void hpsa_scan_start(struct Scsi_Host *sh) | |||
4048 | h->scan_finished = 0; /* mark scan as in progress */ | 4192 | h->scan_finished = 0; /* mark scan as in progress */ |
4049 | spin_unlock_irqrestore(&h->scan_lock, flags); | 4193 | spin_unlock_irqrestore(&h->scan_lock, flags); |
4050 | 4194 | ||
4051 | if (do_not_scan_if_controller_locked_up(h)) | 4195 | if (unlikely(lockup_detected(h))) |
4052 | return; | 4196 | return hpsa_scan_complete(h); |
4053 | 4197 | ||
4054 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); | 4198 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); |
4055 | 4199 | ||
4056 | spin_lock_irqsave(&h->scan_lock, flags); | 4200 | hpsa_scan_complete(h); |
4057 | h->scan_finished = 1; /* mark scan as finished. */ | 4201 | } |
4058 | wake_up_all(&h->scan_wait_queue); | 4202 | |
4059 | spin_unlock_irqrestore(&h->scan_lock, flags); | 4203 | static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) |
4204 | { | ||
4205 | struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; | ||
4206 | |||
4207 | if (!logical_drive) | ||
4208 | return -ENODEV; | ||
4209 | |||
4210 | if (qdepth < 1) | ||
4211 | qdepth = 1; | ||
4212 | else if (qdepth > logical_drive->queue_depth) | ||
4213 | qdepth = logical_drive->queue_depth; | ||
4214 | |||
4215 | return scsi_change_queue_depth(sdev, qdepth); | ||
4060 | } | 4216 | } |
4061 | 4217 | ||
4062 | static int hpsa_scan_finished(struct Scsi_Host *sh, | 4218 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
@@ -4096,11 +4252,11 @@ static int hpsa_register_scsi(struct ctlr_info *h) | |||
4096 | sh->max_cmd_len = MAX_COMMAND_SIZE; | 4252 | sh->max_cmd_len = MAX_COMMAND_SIZE; |
4097 | sh->max_lun = HPSA_MAX_LUN; | 4253 | sh->max_lun = HPSA_MAX_LUN; |
4098 | sh->max_id = HPSA_MAX_LUN; | 4254 | sh->max_id = HPSA_MAX_LUN; |
4099 | sh->can_queue = h->nr_cmds; | 4255 | sh->can_queue = h->nr_cmds - |
4100 | if (h->hba_mode_enabled) | 4256 | HPSA_CMDS_RESERVED_FOR_ABORTS - |
4101 | sh->cmd_per_lun = 7; | 4257 | HPSA_CMDS_RESERVED_FOR_DRIVER - |
4102 | else | 4258 | HPSA_MAX_CONCURRENT_PASSTHRUS; |
4103 | sh->cmd_per_lun = h->nr_cmds; | 4259 | sh->cmd_per_lun = sh->can_queue; |
4104 | sh->sg_tablesize = h->maxsgentries; | 4260 | sh->sg_tablesize = h->maxsgentries; |
4105 | h->scsi_host = sh; | 4261 | h->scsi_host = sh; |
4106 | sh->hostdata[0] = (unsigned long) h; | 4262 | sh->hostdata[0] = (unsigned long) h; |
@@ -4131,7 +4287,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, | |||
4131 | int waittime = 1; /* seconds */ | 4287 | int waittime = 1; /* seconds */ |
4132 | struct CommandList *c; | 4288 | struct CommandList *c; |
4133 | 4289 | ||
4134 | c = cmd_special_alloc(h); | 4290 | c = cmd_alloc(h); |
4135 | if (!c) { | 4291 | if (!c) { |
4136 | dev_warn(&h->pdev->dev, "out of memory in " | 4292 | dev_warn(&h->pdev->dev, "out of memory in " |
4137 | "wait_for_device_to_become_ready.\n"); | 4293 | "wait_for_device_to_become_ready.\n"); |
@@ -4177,7 +4333,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, | |||
4177 | else | 4333 | else |
4178 | dev_warn(&h->pdev->dev, "device is ready.\n"); | 4334 | dev_warn(&h->pdev->dev, "device is ready.\n"); |
4179 | 4335 | ||
4180 | cmd_special_free(h, c); | 4336 | cmd_free(h, c); |
4181 | return rc; | 4337 | return rc; |
4182 | } | 4338 | } |
4183 | 4339 | ||
@@ -4194,6 +4350,10 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
4194 | h = sdev_to_hba(scsicmd->device); | 4350 | h = sdev_to_hba(scsicmd->device); |
4195 | if (h == NULL) /* paranoia */ | 4351 | if (h == NULL) /* paranoia */ |
4196 | return FAILED; | 4352 | return FAILED; |
4353 | |||
4354 | if (lockup_detected(h)) | ||
4355 | return FAILED; | ||
4356 | |||
4197 | dev = scsicmd->device->hostdata; | 4357 | dev = scsicmd->device->hostdata; |
4198 | if (!dev) { | 4358 | if (!dev) { |
4199 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " | 4359 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " |
@@ -4227,13 +4387,15 @@ static void swizzle_abort_tag(u8 *tag) | |||
4227 | } | 4387 | } |
4228 | 4388 | ||
4229 | static void hpsa_get_tag(struct ctlr_info *h, | 4389 | static void hpsa_get_tag(struct ctlr_info *h, |
4230 | struct CommandList *c, u32 *taglower, u32 *tagupper) | 4390 | struct CommandList *c, __le32 *taglower, __le32 *tagupper) |
4231 | { | 4391 | { |
4392 | u64 tag; | ||
4232 | if (c->cmd_type == CMD_IOACCEL1) { | 4393 | if (c->cmd_type == CMD_IOACCEL1) { |
4233 | struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) | 4394 | struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) |
4234 | &h->ioaccel_cmd_pool[c->cmdindex]; | 4395 | &h->ioaccel_cmd_pool[c->cmdindex]; |
4235 | *tagupper = (u32) (cm1->tag >> 32); | 4396 | tag = le64_to_cpu(cm1->tag); |
4236 | *taglower = (u32) (cm1->tag & 0x0ffffffffULL); | 4397 | *tagupper = cpu_to_le32(tag >> 32); |
4398 | *taglower = cpu_to_le32(tag); | ||
4237 | return; | 4399 | return; |
4238 | } | 4400 | } |
4239 | if (c->cmd_type == CMD_IOACCEL2) { | 4401 | if (c->cmd_type == CMD_IOACCEL2) { |
@@ -4244,8 +4406,9 @@ static void hpsa_get_tag(struct ctlr_info *h, | |||
4244 | *taglower = cm2->Tag; | 4406 | *taglower = cm2->Tag; |
4245 | return; | 4407 | return; |
4246 | } | 4408 | } |
4247 | *tagupper = (u32) (c->Header.tag >> 32); | 4409 | tag = le64_to_cpu(c->Header.tag); |
4248 | *taglower = (u32) (c->Header.tag & 0x0ffffffffULL); | 4410 | *tagupper = cpu_to_le32(tag >> 32); |
4411 | *taglower = cpu_to_le32(tag); | ||
4249 | } | 4412 | } |
4250 | 4413 | ||
4251 | static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, | 4414 | static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, |
@@ -4254,11 +4417,11 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, | |||
4254 | int rc = IO_OK; | 4417 | int rc = IO_OK; |
4255 | struct CommandList *c; | 4418 | struct CommandList *c; |
4256 | struct ErrorInfo *ei; | 4419 | struct ErrorInfo *ei; |
4257 | u32 tagupper, taglower; | 4420 | __le32 tagupper, taglower; |
4258 | 4421 | ||
4259 | c = cmd_special_alloc(h); | 4422 | c = cmd_alloc(h); |
4260 | if (c == NULL) { /* trouble... */ | 4423 | if (c == NULL) { /* trouble... */ |
4261 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 4424 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
4262 | return -ENOMEM; | 4425 | return -ENOMEM; |
4263 | } | 4426 | } |
4264 | 4427 | ||
@@ -4287,62 +4450,12 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, | |||
4287 | rc = -1; | 4450 | rc = -1; |
4288 | break; | 4451 | break; |
4289 | } | 4452 | } |
4290 | cmd_special_free(h, c); | 4453 | cmd_free(h, c); |
4291 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", | 4454 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", |
4292 | __func__, tagupper, taglower); | 4455 | __func__, tagupper, taglower); |
4293 | return rc; | 4456 | return rc; |
4294 | } | 4457 | } |
4295 | 4458 | ||
4296 | /* | ||
4297 | * hpsa_find_cmd_in_queue | ||
4298 | * | ||
4299 | * Used to determine whether a command (find) is still present | ||
4300 | * in queue_head. Optionally excludes the last element of queue_head. | ||
4301 | * | ||
4302 | * This is used to avoid unnecessary aborts. Commands in h->reqQ have | ||
4303 | * not yet been submitted, and so can be aborted by the driver without | ||
4304 | * sending an abort to the hardware. | ||
4305 | * | ||
4306 | * Returns pointer to command if found in queue, NULL otherwise. | ||
4307 | */ | ||
4308 | static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h, | ||
4309 | struct scsi_cmnd *find, struct list_head *queue_head) | ||
4310 | { | ||
4311 | unsigned long flags; | ||
4312 | struct CommandList *c = NULL; /* ptr into cmpQ */ | ||
4313 | |||
4314 | if (!find) | ||
4315 | return NULL; | ||
4316 | spin_lock_irqsave(&h->lock, flags); | ||
4317 | list_for_each_entry(c, queue_head, list) { | ||
4318 | if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */ | ||
4319 | continue; | ||
4320 | if (c->scsi_cmd == find) { | ||
4321 | spin_unlock_irqrestore(&h->lock, flags); | ||
4322 | return c; | ||
4323 | } | ||
4324 | } | ||
4325 | spin_unlock_irqrestore(&h->lock, flags); | ||
4326 | return NULL; | ||
4327 | } | ||
4328 | |||
4329 | static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, | ||
4330 | u8 *tag, struct list_head *queue_head) | ||
4331 | { | ||
4332 | unsigned long flags; | ||
4333 | struct CommandList *c; | ||
4334 | |||
4335 | spin_lock_irqsave(&h->lock, flags); | ||
4336 | list_for_each_entry(c, queue_head, list) { | ||
4337 | if (memcmp(&c->Header.tag, tag, 8) != 0) | ||
4338 | continue; | ||
4339 | spin_unlock_irqrestore(&h->lock, flags); | ||
4340 | return c; | ||
4341 | } | ||
4342 | spin_unlock_irqrestore(&h->lock, flags); | ||
4343 | return NULL; | ||
4344 | } | ||
4345 | |||
4346 | /* ioaccel2 path firmware cannot handle abort task requests. | 4459 | /* ioaccel2 path firmware cannot handle abort task requests. |
4347 | * Change abort requests to physical target reset, and send to the | 4460 | * Change abort requests to physical target reset, and send to the |
4348 | * address of the physical disk used for the ioaccel 2 command. | 4461 | * address of the physical disk used for the ioaccel 2 command. |
@@ -4360,7 +4473,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, | |||
4360 | unsigned char *psa = &phys_scsi3addr[0]; | 4473 | unsigned char *psa = &phys_scsi3addr[0]; |
4361 | 4474 | ||
4362 | /* Get a pointer to the hpsa logical device. */ | 4475 | /* Get a pointer to the hpsa logical device. */ |
4363 | scmd = (struct scsi_cmnd *) abort->scsi_cmd; | 4476 | scmd = abort->scsi_cmd; |
4364 | dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); | 4477 | dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); |
4365 | if (dev == NULL) { | 4478 | if (dev == NULL) { |
4366 | dev_warn(&h->pdev->dev, | 4479 | dev_warn(&h->pdev->dev, |
@@ -4429,10 +4542,6 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, | |||
4429 | static int hpsa_send_abort_both_ways(struct ctlr_info *h, | 4542 | static int hpsa_send_abort_both_ways(struct ctlr_info *h, |
4430 | unsigned char *scsi3addr, struct CommandList *abort) | 4543 | unsigned char *scsi3addr, struct CommandList *abort) |
4431 | { | 4544 | { |
4432 | u8 swizzled_tag[8]; | ||
4433 | struct CommandList *c; | ||
4434 | int rc = 0, rc2 = 0; | ||
4435 | |||
4436 | /* ioccelerator mode 2 commands should be aborted via the | 4545 | /* ioccelerator mode 2 commands should be aborted via the |
4437 | * accelerated path, since RAID path is unaware of these commands, | 4546 | * accelerated path, since RAID path is unaware of these commands, |
4438 | * but underlying firmware can't handle abort TMF. | 4547 | * but underlying firmware can't handle abort TMF. |
@@ -4441,27 +4550,8 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h, | |||
4441 | if (abort->cmd_type == CMD_IOACCEL2) | 4550 | if (abort->cmd_type == CMD_IOACCEL2) |
4442 | return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); | 4551 | return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); |
4443 | 4552 | ||
4444 | /* we do not expect to find the swizzled tag in our queue, but | 4553 | return hpsa_send_abort(h, scsi3addr, abort, 0) && |
4445 | * check anyway just to be sure the assumptions which make this | 4554 | hpsa_send_abort(h, scsi3addr, abort, 1); |
4446 | * the case haven't become wrong. | ||
4447 | */ | ||
4448 | memcpy(swizzled_tag, &abort->Request.CDB[4], 8); | ||
4449 | swizzle_abort_tag(swizzled_tag); | ||
4450 | c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ); | ||
4451 | if (c != NULL) { | ||
4452 | dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n"); | ||
4453 | return hpsa_send_abort(h, scsi3addr, abort, 0); | ||
4454 | } | ||
4455 | rc = hpsa_send_abort(h, scsi3addr, abort, 0); | ||
4456 | |||
4457 | /* if the command is still in our queue, we can't conclude that it was | ||
4458 | * aborted (it might have just completed normally) but in any case | ||
4459 | * we don't need to try to abort it another way. | ||
4460 | */ | ||
4461 | c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ); | ||
4462 | if (c) | ||
4463 | rc2 = hpsa_send_abort(h, scsi3addr, abort, 1); | ||
4464 | return rc && rc2; | ||
4465 | } | 4555 | } |
4466 | 4556 | ||
4467 | /* Send an abort for the specified command. | 4557 | /* Send an abort for the specified command. |
@@ -4475,11 +4565,11 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4475 | struct ctlr_info *h; | 4565 | struct ctlr_info *h; |
4476 | struct hpsa_scsi_dev_t *dev; | 4566 | struct hpsa_scsi_dev_t *dev; |
4477 | struct CommandList *abort; /* pointer to command to be aborted */ | 4567 | struct CommandList *abort; /* pointer to command to be aborted */ |
4478 | struct CommandList *found; | ||
4479 | struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ | 4568 | struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ |
4480 | char msg[256]; /* For debug messaging. */ | 4569 | char msg[256]; /* For debug messaging. */ |
4481 | int ml = 0; | 4570 | int ml = 0; |
4482 | u32 tagupper, taglower; | 4571 | __le32 tagupper, taglower; |
4572 | int refcount; | ||
4483 | 4573 | ||
4484 | /* Find the controller of the command to be aborted */ | 4574 | /* Find the controller of the command to be aborted */ |
4485 | h = sdev_to_hba(sc->device); | 4575 | h = sdev_to_hba(sc->device); |
@@ -4487,6 +4577,9 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4487 | "ABORT REQUEST FAILED, Controller lookup failed.\n")) | 4577 | "ABORT REQUEST FAILED, Controller lookup failed.\n")) |
4488 | return FAILED; | 4578 | return FAILED; |
4489 | 4579 | ||
4580 | if (lockup_detected(h)) | ||
4581 | return FAILED; | ||
4582 | |||
4490 | /* Check that controller supports some kind of task abort */ | 4583 | /* Check that controller supports some kind of task abort */ |
4491 | if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && | 4584 | if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && |
4492 | !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) | 4585 | !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) |
@@ -4508,41 +4601,23 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4508 | /* Get SCSI command to be aborted */ | 4601 | /* Get SCSI command to be aborted */ |
4509 | abort = (struct CommandList *) sc->host_scribble; | 4602 | abort = (struct CommandList *) sc->host_scribble; |
4510 | if (abort == NULL) { | 4603 | if (abort == NULL) { |
4511 | dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n", | 4604 | /* This can happen if the command already completed. */ |
4512 | msg); | 4605 | return SUCCESS; |
4513 | return FAILED; | 4606 | } |
4607 | refcount = atomic_inc_return(&abort->refcount); | ||
4608 | if (refcount == 1) { /* Command is done already. */ | ||
4609 | cmd_free(h, abort); | ||
4610 | return SUCCESS; | ||
4514 | } | 4611 | } |
4515 | hpsa_get_tag(h, abort, &taglower, &tagupper); | 4612 | hpsa_get_tag(h, abort, &taglower, &tagupper); |
4516 | ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); | 4613 | ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); |
4517 | as = (struct scsi_cmnd *) abort->scsi_cmd; | 4614 | as = abort->scsi_cmd; |
4518 | if (as != NULL) | 4615 | if (as != NULL) |
4519 | ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", | 4616 | ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", |
4520 | as->cmnd[0], as->serial_number); | 4617 | as->cmnd[0], as->serial_number); |
4521 | dev_dbg(&h->pdev->dev, "%s\n", msg); | 4618 | dev_dbg(&h->pdev->dev, "%s\n", msg); |
4522 | dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", | 4619 | dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n", |
4523 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); | 4620 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); |
4524 | |||
4525 | /* Search reqQ to See if command is queued but not submitted, | ||
4526 | * if so, complete the command with aborted status and remove | ||
4527 | * it from the reqQ. | ||
4528 | */ | ||
4529 | found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ); | ||
4530 | if (found) { | ||
4531 | found->err_info->CommandStatus = CMD_ABORTED; | ||
4532 | finish_cmd(found); | ||
4533 | dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n", | ||
4534 | msg); | ||
4535 | return SUCCESS; | ||
4536 | } | ||
4537 | |||
4538 | /* not in reqQ, if also not in cmpQ, must have already completed */ | ||
4539 | found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); | ||
4540 | if (!found) { | ||
4541 | dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n", | ||
4542 | msg); | ||
4543 | return SUCCESS; | ||
4544 | } | ||
4545 | |||
4546 | /* | 4621 | /* |
4547 | * Command is in flight, or possibly already completed | 4622 | * Command is in flight, or possibly already completed |
4548 | * by the firmware (but not to the scsi mid layer) but we can't | 4623 | * by the firmware (but not to the scsi mid layer) but we can't |
@@ -4554,6 +4629,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4554 | dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", | 4629 | dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n", |
4555 | h->scsi_host->host_no, | 4630 | h->scsi_host->host_no, |
4556 | dev->bus, dev->target, dev->lun); | 4631 | dev->bus, dev->target, dev->lun); |
4632 | cmd_free(h, abort); | ||
4557 | return FAILED; | 4633 | return FAILED; |
4558 | } | 4634 | } |
4559 | dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); | 4635 | dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg); |
@@ -4565,32 +4641,38 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |||
4565 | */ | 4641 | */ |
4566 | #define ABORT_COMPLETE_WAIT_SECS 30 | 4642 | #define ABORT_COMPLETE_WAIT_SECS 30 |
4567 | for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { | 4643 | for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) { |
4568 | found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ); | 4644 | refcount = atomic_read(&abort->refcount); |
4569 | if (!found) | 4645 | if (refcount < 2) { |
4646 | cmd_free(h, abort); | ||
4570 | return SUCCESS; | 4647 | return SUCCESS; |
4571 | msleep(100); | 4648 | } else { |
4649 | msleep(100); | ||
4650 | } | ||
4572 | } | 4651 | } |
4573 | dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", | 4652 | dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n", |
4574 | msg, ABORT_COMPLETE_WAIT_SECS); | 4653 | msg, ABORT_COMPLETE_WAIT_SECS); |
4654 | cmd_free(h, abort); | ||
4575 | return FAILED; | 4655 | return FAILED; |
4576 | } | 4656 | } |
4577 | 4657 | ||
4578 | |||
4579 | /* | 4658 | /* |
4580 | * For operations that cannot sleep, a command block is allocated at init, | 4659 | * For operations that cannot sleep, a command block is allocated at init, |
4581 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track | 4660 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track |
4582 | * which ones are free or in use. Lock must be held when calling this. | 4661 | * which ones are free or in use. Lock must be held when calling this. |
4583 | * cmd_free() is the complement. | 4662 | * cmd_free() is the complement. |
4584 | */ | 4663 | */ |
4664 | |||
4585 | static struct CommandList *cmd_alloc(struct ctlr_info *h) | 4665 | static struct CommandList *cmd_alloc(struct ctlr_info *h) |
4586 | { | 4666 | { |
4587 | struct CommandList *c; | 4667 | struct CommandList *c; |
4588 | int i; | 4668 | int i; |
4589 | union u64bit temp64; | 4669 | union u64bit temp64; |
4590 | dma_addr_t cmd_dma_handle, err_dma_handle; | 4670 | dma_addr_t cmd_dma_handle, err_dma_handle; |
4591 | int loopcount; | 4671 | int refcount; |
4672 | unsigned long offset; | ||
4592 | 4673 | ||
4593 | /* There is some *extremely* small but non-zero chance that that | 4674 | /* |
4675 | * There is some *extremely* small but non-zero chance that that | ||
4594 | * multiple threads could get in here, and one thread could | 4676 | * multiple threads could get in here, and one thread could |
4595 | * be scanning through the list of bits looking for a free | 4677 | * be scanning through the list of bits looking for a free |
4596 | * one, but the free ones are always behind him, and other | 4678 | * one, but the free ones are always behind him, and other |
@@ -4601,24 +4683,30 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) | |||
4601 | * infrequently as to be indistinguishable from never. | 4683 | * infrequently as to be indistinguishable from never. |
4602 | */ | 4684 | */ |
4603 | 4685 | ||
4604 | loopcount = 0; | 4686 | offset = h->last_allocation; /* benignly racy */ |
4605 | do { | 4687 | for (;;) { |
4606 | i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); | 4688 | i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset); |
4607 | if (i == h->nr_cmds) | 4689 | if (unlikely(i == h->nr_cmds)) { |
4608 | i = 0; | 4690 | offset = 0; |
4609 | loopcount++; | 4691 | continue; |
4610 | } while (test_and_set_bit(i & (BITS_PER_LONG - 1), | 4692 | } |
4611 | h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 && | 4693 | c = h->cmd_pool + i; |
4612 | loopcount < 10); | 4694 | refcount = atomic_inc_return(&c->refcount); |
4613 | 4695 | if (unlikely(refcount > 1)) { | |
4614 | /* Thread got starved? We do not expect this to ever happen. */ | 4696 | cmd_free(h, c); /* already in use */ |
4615 | if (loopcount >= 10) | 4697 | offset = (i + 1) % h->nr_cmds; |
4616 | return NULL; | 4698 | continue; |
4617 | 4699 | } | |
4618 | c = h->cmd_pool + i; | 4700 | set_bit(i & (BITS_PER_LONG - 1), |
4619 | memset(c, 0, sizeof(*c)); | 4701 | h->cmd_pool_bits + (i / BITS_PER_LONG)); |
4620 | cmd_dma_handle = h->cmd_pool_dhandle | 4702 | break; /* it's ours now. */ |
4621 | + i * sizeof(*c); | 4703 | } |
4704 | h->last_allocation = i; /* benignly racy */ | ||
4705 | |||
4706 | /* Zero out all of commandlist except the last field, refcount */ | ||
4707 | memset(c, 0, offsetof(struct CommandList, refcount)); | ||
4708 | c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT)); | ||
4709 | cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c); | ||
4622 | c->err_info = h->errinfo_pool + i; | 4710 | c->err_info = h->errinfo_pool + i; |
4623 | memset(c->err_info, 0, sizeof(*c->err_info)); | 4711 | memset(c->err_info, 0, sizeof(*c->err_info)); |
4624 | err_dma_handle = h->errinfo_pool_dhandle | 4712 | err_dma_handle = h->errinfo_pool_dhandle |
@@ -4626,45 +4714,10 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) | |||
4626 | 4714 | ||
4627 | c->cmdindex = i; | 4715 | c->cmdindex = i; |
4628 | 4716 | ||
4629 | INIT_LIST_HEAD(&c->list); | ||
4630 | c->busaddr = (u32) cmd_dma_handle; | 4717 | c->busaddr = (u32) cmd_dma_handle; |
4631 | temp64.val = (u64) err_dma_handle; | 4718 | temp64.val = (u64) err_dma_handle; |
4632 | c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); | 4719 | c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); |
4633 | c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); | 4720 | c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); |
4634 | |||
4635 | c->h = h; | ||
4636 | return c; | ||
4637 | } | ||
4638 | |||
4639 | /* For operations that can wait for kmalloc to possibly sleep, | ||
4640 | * this routine can be called. Lock need not be held to call | ||
4641 | * cmd_special_alloc. cmd_special_free() is the complement. | ||
4642 | */ | ||
4643 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | ||
4644 | { | ||
4645 | struct CommandList *c; | ||
4646 | dma_addr_t cmd_dma_handle, err_dma_handle; | ||
4647 | |||
4648 | c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); | ||
4649 | if (c == NULL) | ||
4650 | return NULL; | ||
4651 | |||
4652 | c->cmd_type = CMD_SCSI; | ||
4653 | c->cmdindex = -1; | ||
4654 | |||
4655 | c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info), | ||
4656 | &err_dma_handle); | ||
4657 | |||
4658 | if (c->err_info == NULL) { | ||
4659 | pci_free_consistent(h->pdev, | ||
4660 | sizeof(*c), c, cmd_dma_handle); | ||
4661 | return NULL; | ||
4662 | } | ||
4663 | |||
4664 | INIT_LIST_HEAD(&c->list); | ||
4665 | c->busaddr = (u32) cmd_dma_handle; | ||
4666 | c->ErrDesc.Addr = cpu_to_le64(err_dma_handle); | ||
4667 | c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info)); | ||
4668 | 4721 | ||
4669 | c->h = h; | 4722 | c->h = h; |
4670 | return c; | 4723 | return c; |
@@ -4672,20 +4725,13 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | |||
4672 | 4725 | ||
4673 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) | 4726 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) |
4674 | { | 4727 | { |
4675 | int i; | 4728 | if (atomic_dec_and_test(&c->refcount)) { |
4676 | 4729 | int i; | |
4677 | i = c - h->cmd_pool; | ||
4678 | clear_bit(i & (BITS_PER_LONG - 1), | ||
4679 | h->cmd_pool_bits + (i / BITS_PER_LONG)); | ||
4680 | } | ||
4681 | 4730 | ||
4682 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) | 4731 | i = c - h->cmd_pool; |
4683 | { | 4732 | clear_bit(i & (BITS_PER_LONG - 1), |
4684 | pci_free_consistent(h->pdev, sizeof(*c->err_info), | 4733 | h->cmd_pool_bits + (i / BITS_PER_LONG)); |
4685 | c->err_info, | 4734 | } |
4686 | (dma_addr_t) le64_to_cpu(c->ErrDesc.Addr)); | ||
4687 | pci_free_consistent(h->pdev, sizeof(*c), | ||
4688 | c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); | ||
4689 | } | 4735 | } |
4690 | 4736 | ||
4691 | #ifdef CONFIG_COMPAT | 4737 | #ifdef CONFIG_COMPAT |
@@ -4866,7 +4912,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4866 | memset(buff, 0, iocommand.buf_size); | 4912 | memset(buff, 0, iocommand.buf_size); |
4867 | } | 4913 | } |
4868 | } | 4914 | } |
4869 | c = cmd_special_alloc(h); | 4915 | c = cmd_alloc(h); |
4870 | if (c == NULL) { | 4916 | if (c == NULL) { |
4871 | rc = -ENOMEM; | 4917 | rc = -ENOMEM; |
4872 | goto out_kfree; | 4918 | goto out_kfree; |
@@ -4883,8 +4929,6 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4883 | c->Header.SGTotal = cpu_to_le16(0); | 4929 | c->Header.SGTotal = cpu_to_le16(0); |
4884 | } | 4930 | } |
4885 | memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); | 4931 | memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); |
4886 | /* use the kernel address the cmd block for tag */ | ||
4887 | c->Header.tag = c->busaddr; | ||
4888 | 4932 | ||
4889 | /* Fill in Request block */ | 4933 | /* Fill in Request block */ |
4890 | memcpy(&c->Request, &iocommand.Request, | 4934 | memcpy(&c->Request, &iocommand.Request, |
@@ -4925,7 +4969,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4925 | } | 4969 | } |
4926 | } | 4970 | } |
4927 | out: | 4971 | out: |
4928 | cmd_special_free(h, c); | 4972 | cmd_free(h, c); |
4929 | out_kfree: | 4973 | out_kfree: |
4930 | kfree(buff); | 4974 | kfree(buff); |
4931 | return rc; | 4975 | return rc; |
@@ -4940,7 +4984,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
4940 | u64 temp64; | 4984 | u64 temp64; |
4941 | BYTE sg_used = 0; | 4985 | BYTE sg_used = 0; |
4942 | int status = 0; | 4986 | int status = 0; |
4943 | int i; | ||
4944 | u32 left; | 4987 | u32 left; |
4945 | u32 sz; | 4988 | u32 sz; |
4946 | BYTE __user *data_ptr; | 4989 | BYTE __user *data_ptr; |
@@ -5004,7 +5047,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5004 | data_ptr += sz; | 5047 | data_ptr += sz; |
5005 | sg_used++; | 5048 | sg_used++; |
5006 | } | 5049 | } |
5007 | c = cmd_special_alloc(h); | 5050 | c = cmd_alloc(h); |
5008 | if (c == NULL) { | 5051 | if (c == NULL) { |
5009 | status = -ENOMEM; | 5052 | status = -ENOMEM; |
5010 | goto cleanup1; | 5053 | goto cleanup1; |
@@ -5014,7 +5057,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5014 | c->Header.SGList = (u8) sg_used; | 5057 | c->Header.SGList = (u8) sg_used; |
5015 | c->Header.SGTotal = cpu_to_le16(sg_used); | 5058 | c->Header.SGTotal = cpu_to_le16(sg_used); |
5016 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); | 5059 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); |
5017 | c->Header.tag = c->busaddr; | ||
5018 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); | 5060 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); |
5019 | if (ioc->buf_size > 0) { | 5061 | if (ioc->buf_size > 0) { |
5020 | int i; | 5062 | int i; |
@@ -5047,6 +5089,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5047 | goto cleanup0; | 5089 | goto cleanup0; |
5048 | } | 5090 | } |
5049 | if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { | 5091 | if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { |
5092 | int i; | ||
5093 | |||
5050 | /* Copy the data out of the buffer we created */ | 5094 | /* Copy the data out of the buffer we created */ |
5051 | BYTE __user *ptr = ioc->buf; | 5095 | BYTE __user *ptr = ioc->buf; |
5052 | for (i = 0; i < sg_used; i++) { | 5096 | for (i = 0; i < sg_used; i++) { |
@@ -5059,9 +5103,11 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
5059 | } | 5103 | } |
5060 | status = 0; | 5104 | status = 0; |
5061 | cleanup0: | 5105 | cleanup0: |
5062 | cmd_special_free(h, c); | 5106 | cmd_free(h, c); |
5063 | cleanup1: | 5107 | cleanup1: |
5064 | if (buff) { | 5108 | if (buff) { |
5109 | int i; | ||
5110 | |||
5065 | for (i = 0; i < sg_used; i++) | 5111 | for (i = 0; i < sg_used; i++) |
5066 | kfree(buff[i]); | 5112 | kfree(buff[i]); |
5067 | kfree(buff); | 5113 | kfree(buff); |
@@ -5079,35 +5125,6 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, | |||
5079 | (void) check_for_unit_attention(h, c); | 5125 | (void) check_for_unit_attention(h, c); |
5080 | } | 5126 | } |
5081 | 5127 | ||
5082 | static int increment_passthru_count(struct ctlr_info *h) | ||
5083 | { | ||
5084 | unsigned long flags; | ||
5085 | |||
5086 | spin_lock_irqsave(&h->passthru_count_lock, flags); | ||
5087 | if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { | ||
5088 | spin_unlock_irqrestore(&h->passthru_count_lock, flags); | ||
5089 | return -1; | ||
5090 | } | ||
5091 | h->passthru_count++; | ||
5092 | spin_unlock_irqrestore(&h->passthru_count_lock, flags); | ||
5093 | return 0; | ||
5094 | } | ||
5095 | |||
5096 | static void decrement_passthru_count(struct ctlr_info *h) | ||
5097 | { | ||
5098 | unsigned long flags; | ||
5099 | |||
5100 | spin_lock_irqsave(&h->passthru_count_lock, flags); | ||
5101 | if (h->passthru_count <= 0) { | ||
5102 | spin_unlock_irqrestore(&h->passthru_count_lock, flags); | ||
5103 | /* not expecting to get here. */ | ||
5104 | dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); | ||
5105 | return; | ||
5106 | } | ||
5107 | h->passthru_count--; | ||
5108 | spin_unlock_irqrestore(&h->passthru_count_lock, flags); | ||
5109 | } | ||
5110 | |||
5111 | /* | 5128 | /* |
5112 | * ioctl | 5129 | * ioctl |
5113 | */ | 5130 | */ |
@@ -5130,16 +5147,16 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) | |||
5130 | case CCISS_GETDRIVVER: | 5147 | case CCISS_GETDRIVVER: |
5131 | return hpsa_getdrivver_ioctl(h, argp); | 5148 | return hpsa_getdrivver_ioctl(h, argp); |
5132 | case CCISS_PASSTHRU: | 5149 | case CCISS_PASSTHRU: |
5133 | if (increment_passthru_count(h)) | 5150 | if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
5134 | return -EAGAIN; | 5151 | return -EAGAIN; |
5135 | rc = hpsa_passthru_ioctl(h, argp); | 5152 | rc = hpsa_passthru_ioctl(h, argp); |
5136 | decrement_passthru_count(h); | 5153 | atomic_inc(&h->passthru_cmds_avail); |
5137 | return rc; | 5154 | return rc; |
5138 | case CCISS_BIG_PASSTHRU: | 5155 | case CCISS_BIG_PASSTHRU: |
5139 | if (increment_passthru_count(h)) | 5156 | if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
5140 | return -EAGAIN; | 5157 | return -EAGAIN; |
5141 | rc = hpsa_big_passthru_ioctl(h, argp); | 5158 | rc = hpsa_big_passthru_ioctl(h, argp); |
5142 | decrement_passthru_count(h); | 5159 | atomic_inc(&h->passthru_cmds_avail); |
5143 | return rc; | 5160 | return rc; |
5144 | default: | 5161 | default: |
5145 | return -ENOTTY; | 5162 | return -ENOTTY; |
@@ -5173,7 +5190,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5173 | { | 5190 | { |
5174 | int pci_dir = XFER_NONE; | 5191 | int pci_dir = XFER_NONE; |
5175 | struct CommandList *a; /* for commands to be aborted */ | 5192 | struct CommandList *a; /* for commands to be aborted */ |
5176 | u32 tupper, tlower; | ||
5177 | 5193 | ||
5178 | c->cmd_type = CMD_IOCTL_PEND; | 5194 | c->cmd_type = CMD_IOCTL_PEND; |
5179 | c->Header.ReplyQueue = 0; | 5195 | c->Header.ReplyQueue = 0; |
@@ -5184,7 +5200,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5184 | c->Header.SGList = 0; | 5200 | c->Header.SGList = 0; |
5185 | c->Header.SGTotal = cpu_to_le16(0); | 5201 | c->Header.SGTotal = cpu_to_le16(0); |
5186 | } | 5202 | } |
5187 | c->Header.tag = c->busaddr; | ||
5188 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); | 5203 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); |
5189 | 5204 | ||
5190 | if (cmd_type == TYPE_CMD) { | 5205 | if (cmd_type == TYPE_CMD) { |
@@ -5256,6 +5271,16 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5256 | c->Request.CDB[7] = (size >> 16) & 0xFF; | 5271 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
5257 | c->Request.CDB[8] = (size >> 8) & 0xFF; | 5272 | c->Request.CDB[8] = (size >> 8) & 0xFF; |
5258 | break; | 5273 | break; |
5274 | case BMIC_IDENTIFY_PHYSICAL_DEVICE: | ||
5275 | c->Request.CDBLen = 10; | ||
5276 | c->Request.type_attr_dir = | ||
5277 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | ||
5278 | c->Request.Timeout = 0; | ||
5279 | c->Request.CDB[0] = BMIC_READ; | ||
5280 | c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; | ||
5281 | c->Request.CDB[7] = (size >> 16) & 0xFF; | ||
5282 | c->Request.CDB[8] = (size >> 8) & 0XFF; | ||
5283 | break; | ||
5259 | default: | 5284 | default: |
5260 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); | 5285 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); |
5261 | BUG(); | 5286 | BUG(); |
@@ -5281,10 +5306,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5281 | break; | 5306 | break; |
5282 | case HPSA_ABORT_MSG: | 5307 | case HPSA_ABORT_MSG: |
5283 | a = buff; /* point to command to be aborted */ | 5308 | a = buff; /* point to command to be aborted */ |
5284 | dev_dbg(&h->pdev->dev, "Abort Tag:0x%016llx using request Tag:0x%016llx", | 5309 | dev_dbg(&h->pdev->dev, |
5310 | "Abort Tag:0x%016llx request Tag:0x%016llx", | ||
5285 | a->Header.tag, c->Header.tag); | 5311 | a->Header.tag, c->Header.tag); |
5286 | tlower = (u32) (a->Header.tag >> 32); | ||
5287 | tupper = (u32) (a->Header.tag & 0x0ffffffffULL); | ||
5288 | c->Request.CDBLen = 16; | 5312 | c->Request.CDBLen = 16; |
5289 | c->Request.type_attr_dir = | 5313 | c->Request.type_attr_dir = |
5290 | TYPE_ATTR_DIR(cmd_type, | 5314 | TYPE_ATTR_DIR(cmd_type, |
@@ -5295,14 +5319,8 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, | |||
5295 | c->Request.CDB[2] = 0x00; /* reserved */ | 5319 | c->Request.CDB[2] = 0x00; /* reserved */ |
5296 | c->Request.CDB[3] = 0x00; /* reserved */ | 5320 | c->Request.CDB[3] = 0x00; /* reserved */ |
5297 | /* Tag to abort goes in CDB[4]-CDB[11] */ | 5321 | /* Tag to abort goes in CDB[4]-CDB[11] */ |
5298 | c->Request.CDB[4] = tlower & 0xFF; | 5322 | memcpy(&c->Request.CDB[4], &a->Header.tag, |
5299 | c->Request.CDB[5] = (tlower >> 8) & 0xFF; | 5323 | sizeof(a->Header.tag)); |
5300 | c->Request.CDB[6] = (tlower >> 16) & 0xFF; | ||
5301 | c->Request.CDB[7] = (tlower >> 24) & 0xFF; | ||
5302 | c->Request.CDB[8] = tupper & 0xFF; | ||
5303 | c->Request.CDB[9] = (tupper >> 8) & 0xFF; | ||
5304 | c->Request.CDB[10] = (tupper >> 16) & 0xFF; | ||
5305 | c->Request.CDB[11] = (tupper >> 24) & 0xFF; | ||
5306 | c->Request.CDB[12] = 0x00; /* reserved */ | 5324 | c->Request.CDB[12] = 0x00; /* reserved */ |
5307 | c->Request.CDB[13] = 0x00; /* reserved */ | 5325 | c->Request.CDB[13] = 0x00; /* reserved */ |
5308 | c->Request.CDB[14] = 0x00; /* reserved */ | 5326 | c->Request.CDB[14] = 0x00; /* reserved */ |
@@ -5349,47 +5367,6 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) | |||
5349 | return page_remapped ? (page_remapped + page_offs) : NULL; | 5367 | return page_remapped ? (page_remapped + page_offs) : NULL; |
5350 | } | 5368 | } |
5351 | 5369 | ||
5352 | /* Takes cmds off the submission queue and sends them to the hardware, | ||
5353 | * then puts them on the queue of cmds waiting for completion. | ||
5354 | * Assumes h->lock is held | ||
5355 | */ | ||
5356 | static void start_io(struct ctlr_info *h, unsigned long *flags) | ||
5357 | { | ||
5358 | struct CommandList *c; | ||
5359 | |||
5360 | while (!list_empty(&h->reqQ)) { | ||
5361 | c = list_entry(h->reqQ.next, struct CommandList, list); | ||
5362 | /* can't do anything if fifo is full */ | ||
5363 | if ((h->access.fifo_full(h))) { | ||
5364 | h->fifo_recently_full = 1; | ||
5365 | dev_warn(&h->pdev->dev, "fifo full\n"); | ||
5366 | break; | ||
5367 | } | ||
5368 | h->fifo_recently_full = 0; | ||
5369 | |||
5370 | /* Get the first entry from the Request Q */ | ||
5371 | removeQ(c); | ||
5372 | h->Qdepth--; | ||
5373 | |||
5374 | /* Put job onto the completed Q */ | ||
5375 | addQ(&h->cmpQ, c); | ||
5376 | atomic_inc(&h->commands_outstanding); | ||
5377 | spin_unlock_irqrestore(&h->lock, *flags); | ||
5378 | /* Tell the controller execute command */ | ||
5379 | h->access.submit_command(h, c); | ||
5380 | spin_lock_irqsave(&h->lock, *flags); | ||
5381 | } | ||
5382 | } | ||
5383 | |||
5384 | static void lock_and_start_io(struct ctlr_info *h) | ||
5385 | { | ||
5386 | unsigned long flags; | ||
5387 | |||
5388 | spin_lock_irqsave(&h->lock, flags); | ||
5389 | start_io(h, &flags); | ||
5390 | spin_unlock_irqrestore(&h->lock, flags); | ||
5391 | } | ||
5392 | |||
5393 | static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) | 5370 | static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) |
5394 | { | 5371 | { |
5395 | return h->access.command_completed(h, q); | 5372 | return h->access.command_completed(h, q); |
@@ -5418,53 +5395,12 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index, | |||
5418 | 5395 | ||
5419 | static inline void finish_cmd(struct CommandList *c) | 5396 | static inline void finish_cmd(struct CommandList *c) |
5420 | { | 5397 | { |
5421 | unsigned long flags; | ||
5422 | int io_may_be_stalled = 0; | ||
5423 | struct ctlr_info *h = c->h; | ||
5424 | int count; | ||
5425 | |||
5426 | spin_lock_irqsave(&h->lock, flags); | ||
5427 | removeQ(c); | ||
5428 | |||
5429 | /* | ||
5430 | * Check for possibly stalled i/o. | ||
5431 | * | ||
5432 | * If a fifo_full condition is encountered, requests will back up | ||
5433 | * in h->reqQ. This queue is only emptied out by start_io which is | ||
5434 | * only called when a new i/o request comes in. If no i/o's are | ||
5435 | * forthcoming, the i/o's in h->reqQ can get stuck. So we call | ||
5436 | * start_io from here if we detect such a danger. | ||
5437 | * | ||
5438 | * Normally, we shouldn't hit this case, but pounding on the | ||
5439 | * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if | ||
5440 | * commands_outstanding is low. We want to avoid calling | ||
5441 | * start_io from in here as much as possible, and esp. don't | ||
5442 | * want to get in a cycle where we call start_io every time | ||
5443 | * through here. | ||
5444 | */ | ||
5445 | count = atomic_read(&h->commands_outstanding); | ||
5446 | spin_unlock_irqrestore(&h->lock, flags); | ||
5447 | if (unlikely(h->fifo_recently_full) && count < 5) | ||
5448 | io_may_be_stalled = 1; | ||
5449 | |||
5450 | dial_up_lockup_detection_on_fw_flash_complete(c->h, c); | 5398 | dial_up_lockup_detection_on_fw_flash_complete(c->h, c); |
5451 | if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI | 5399 | if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI |
5452 | || c->cmd_type == CMD_IOACCEL2)) | 5400 | || c->cmd_type == CMD_IOACCEL2)) |
5453 | complete_scsi_command(c); | 5401 | complete_scsi_command(c); |
5454 | else if (c->cmd_type == CMD_IOCTL_PEND) | 5402 | else if (c->cmd_type == CMD_IOCTL_PEND) |
5455 | complete(c->waiting); | 5403 | complete(c->waiting); |
5456 | if (unlikely(io_may_be_stalled)) | ||
5457 | lock_and_start_io(h); | ||
5458 | } | ||
5459 | |||
5460 | static inline u32 hpsa_tag_contains_index(u32 tag) | ||
5461 | { | ||
5462 | return tag & DIRECT_LOOKUP_BIT; | ||
5463 | } | ||
5464 | |||
5465 | static inline u32 hpsa_tag_to_index(u32 tag) | ||
5466 | { | ||
5467 | return tag >> DIRECT_LOOKUP_SHIFT; | ||
5468 | } | 5404 | } |
5469 | 5405 | ||
5470 | 5406 | ||
@@ -5484,34 +5420,13 @@ static inline void process_indexed_cmd(struct ctlr_info *h, | |||
5484 | u32 tag_index; | 5420 | u32 tag_index; |
5485 | struct CommandList *c; | 5421 | struct CommandList *c; |
5486 | 5422 | ||
5487 | tag_index = hpsa_tag_to_index(raw_tag); | 5423 | tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; |
5488 | if (!bad_tag(h, tag_index, raw_tag)) { | 5424 | if (!bad_tag(h, tag_index, raw_tag)) { |
5489 | c = h->cmd_pool + tag_index; | 5425 | c = h->cmd_pool + tag_index; |
5490 | finish_cmd(c); | 5426 | finish_cmd(c); |
5491 | } | 5427 | } |
5492 | } | 5428 | } |
5493 | 5429 | ||
5494 | /* process completion of a non-indexed command */ | ||
5495 | static inline void process_nonindexed_cmd(struct ctlr_info *h, | ||
5496 | u32 raw_tag) | ||
5497 | { | ||
5498 | u32 tag; | ||
5499 | struct CommandList *c = NULL; | ||
5500 | unsigned long flags; | ||
5501 | |||
5502 | tag = hpsa_tag_discard_error_bits(h, raw_tag); | ||
5503 | spin_lock_irqsave(&h->lock, flags); | ||
5504 | list_for_each_entry(c, &h->cmpQ, list) { | ||
5505 | if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { | ||
5506 | spin_unlock_irqrestore(&h->lock, flags); | ||
5507 | finish_cmd(c); | ||
5508 | return; | ||
5509 | } | ||
5510 | } | ||
5511 | spin_unlock_irqrestore(&h->lock, flags); | ||
5512 | bad_tag(h, h->nr_cmds + 1, raw_tag); | ||
5513 | } | ||
5514 | |||
5515 | /* Some controllers, like p400, will give us one interrupt | 5430 | /* Some controllers, like p400, will give us one interrupt |
5516 | * after a soft reset, even if we turned interrupts off. | 5431 | * after a soft reset, even if we turned interrupts off. |
5517 | * Only need to check for this in the hpsa_xxx_discard_completions | 5432 | * Only need to check for this in the hpsa_xxx_discard_completions |
@@ -5589,10 +5504,7 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) | |||
5589 | while (interrupt_pending(h)) { | 5504 | while (interrupt_pending(h)) { |
5590 | raw_tag = get_next_completion(h, q); | 5505 | raw_tag = get_next_completion(h, q); |
5591 | while (raw_tag != FIFO_EMPTY) { | 5506 | while (raw_tag != FIFO_EMPTY) { |
5592 | if (likely(hpsa_tag_contains_index(raw_tag))) | 5507 | process_indexed_cmd(h, raw_tag); |
5593 | process_indexed_cmd(h, raw_tag); | ||
5594 | else | ||
5595 | process_nonindexed_cmd(h, raw_tag); | ||
5596 | raw_tag = next_command(h, q); | 5508 | raw_tag = next_command(h, q); |
5597 | } | 5509 | } |
5598 | } | 5510 | } |
@@ -5608,10 +5520,7 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) | |||
5608 | h->last_intr_timestamp = get_jiffies_64(); | 5520 | h->last_intr_timestamp = get_jiffies_64(); |
5609 | raw_tag = get_next_completion(h, q); | 5521 | raw_tag = get_next_completion(h, q); |
5610 | while (raw_tag != FIFO_EMPTY) { | 5522 | while (raw_tag != FIFO_EMPTY) { |
5611 | if (likely(hpsa_tag_contains_index(raw_tag))) | 5523 | process_indexed_cmd(h, raw_tag); |
5612 | process_indexed_cmd(h, raw_tag); | ||
5613 | else | ||
5614 | process_nonindexed_cmd(h, raw_tag); | ||
5615 | raw_tag = next_command(h, q); | 5524 | raw_tag = next_command(h, q); |
5616 | } | 5525 | } |
5617 | return IRQ_HANDLED; | 5526 | return IRQ_HANDLED; |
@@ -5633,7 +5542,8 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5633 | static const size_t cmd_sz = sizeof(*cmd) + | 5542 | static const size_t cmd_sz = sizeof(*cmd) + |
5634 | sizeof(cmd->ErrorDescriptor); | 5543 | sizeof(cmd->ErrorDescriptor); |
5635 | dma_addr_t paddr64; | 5544 | dma_addr_t paddr64; |
5636 | uint32_t paddr32, tag; | 5545 | __le32 paddr32; |
5546 | u32 tag; | ||
5637 | void __iomem *vaddr; | 5547 | void __iomem *vaddr; |
5638 | int i, err; | 5548 | int i, err; |
5639 | 5549 | ||
@@ -5648,7 +5558,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5648 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 5558 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
5649 | if (err) { | 5559 | if (err) { |
5650 | iounmap(vaddr); | 5560 | iounmap(vaddr); |
5651 | return -ENOMEM; | 5561 | return err; |
5652 | } | 5562 | } |
5653 | 5563 | ||
5654 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); | 5564 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); |
@@ -5661,12 +5571,12 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5661 | * although there's no guarantee, we assume that the address is at | 5571 | * although there's no guarantee, we assume that the address is at |
5662 | * least 4-byte aligned (most likely, it's page-aligned). | 5572 | * least 4-byte aligned (most likely, it's page-aligned). |
5663 | */ | 5573 | */ |
5664 | paddr32 = paddr64; | 5574 | paddr32 = cpu_to_le32(paddr64); |
5665 | 5575 | ||
5666 | cmd->CommandHeader.ReplyQueue = 0; | 5576 | cmd->CommandHeader.ReplyQueue = 0; |
5667 | cmd->CommandHeader.SGList = 0; | 5577 | cmd->CommandHeader.SGList = 0; |
5668 | cmd->CommandHeader.SGTotal = cpu_to_le16(0); | 5578 | cmd->CommandHeader.SGTotal = cpu_to_le16(0); |
5669 | cmd->CommandHeader.tag = paddr32; | 5579 | cmd->CommandHeader.tag = cpu_to_le64(paddr64); |
5670 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); | 5580 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); |
5671 | 5581 | ||
5672 | cmd->Request.CDBLen = 16; | 5582 | cmd->Request.CDBLen = 16; |
@@ -5677,14 +5587,14 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5677 | cmd->Request.CDB[1] = type; | 5587 | cmd->Request.CDB[1] = type; |
5678 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ | 5588 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ |
5679 | cmd->ErrorDescriptor.Addr = | 5589 | cmd->ErrorDescriptor.Addr = |
5680 | cpu_to_le64((paddr32 + sizeof(*cmd))); | 5590 | cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); |
5681 | cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); | 5591 | cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); |
5682 | 5592 | ||
5683 | writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); | 5593 | writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); |
5684 | 5594 | ||
5685 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { | 5595 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { |
5686 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); | 5596 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); |
5687 | if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) | 5597 | if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) |
5688 | break; | 5598 | break; |
5689 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); | 5599 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); |
5690 | } | 5600 | } |
@@ -5718,8 +5628,6 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
5718 | static int hpsa_controller_hard_reset(struct pci_dev *pdev, | 5628 | static int hpsa_controller_hard_reset(struct pci_dev *pdev, |
5719 | void __iomem *vaddr, u32 use_doorbell) | 5629 | void __iomem *vaddr, u32 use_doorbell) |
5720 | { | 5630 | { |
5721 | u16 pmcsr; | ||
5722 | int pos; | ||
5723 | 5631 | ||
5724 | if (use_doorbell) { | 5632 | if (use_doorbell) { |
5725 | /* For everything after the P600, the PCI power state method | 5633 | /* For everything after the P600, the PCI power state method |
@@ -5745,26 +5653,21 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, | |||
5745 | * this causes a secondary PCI reset which will reset the | 5653 | * this causes a secondary PCI reset which will reset the |
5746 | * controller." */ | 5654 | * controller." */ |
5747 | 5655 | ||
5748 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); | 5656 | int rc = 0; |
5749 | if (pos == 0) { | 5657 | |
5750 | dev_err(&pdev->dev, | ||
5751 | "hpsa_reset_controller: " | ||
5752 | "PCI PM not supported\n"); | ||
5753 | return -ENODEV; | ||
5754 | } | ||
5755 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); | 5658 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); |
5659 | |||
5756 | /* enter the D3hot power management state */ | 5660 | /* enter the D3hot power management state */ |
5757 | pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); | 5661 | rc = pci_set_power_state(pdev, PCI_D3hot); |
5758 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 5662 | if (rc) |
5759 | pmcsr |= PCI_D3hot; | 5663 | return rc; |
5760 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | ||
5761 | 5664 | ||
5762 | msleep(500); | 5665 | msleep(500); |
5763 | 5666 | ||
5764 | /* enter the D0 power management state */ | 5667 | /* enter the D0 power management state */ |
5765 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | 5668 | rc = pci_set_power_state(pdev, PCI_D0); |
5766 | pmcsr |= PCI_D0; | 5669 | if (rc) |
5767 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | 5670 | return rc; |
5768 | 5671 | ||
5769 | /* | 5672 | /* |
5770 | * The P600 requires a small delay when changing states. | 5673 | * The P600 requires a small delay when changing states. |
@@ -5858,8 +5761,12 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
5858 | */ | 5761 | */ |
5859 | 5762 | ||
5860 | rc = hpsa_lookup_board_id(pdev, &board_id); | 5763 | rc = hpsa_lookup_board_id(pdev, &board_id); |
5861 | if (rc < 0 || !ctlr_is_resettable(board_id)) { | 5764 | if (rc < 0) { |
5862 | dev_warn(&pdev->dev, "Not resetting device.\n"); | 5765 | dev_warn(&pdev->dev, "Board ID not found\n"); |
5766 | return rc; | ||
5767 | } | ||
5768 | if (!ctlr_is_resettable(board_id)) { | ||
5769 | dev_warn(&pdev->dev, "Controller not resettable\n"); | ||
5863 | return -ENODEV; | 5770 | return -ENODEV; |
5864 | } | 5771 | } |
5865 | 5772 | ||
@@ -5892,7 +5799,7 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
5892 | } | 5799 | } |
5893 | rc = write_driver_ver_to_cfgtable(cfgtable); | 5800 | rc = write_driver_ver_to_cfgtable(cfgtable); |
5894 | if (rc) | 5801 | if (rc) |
5895 | goto unmap_vaddr; | 5802 | goto unmap_cfgtable; |
5896 | 5803 | ||
5897 | /* If reset via doorbell register is supported, use that. | 5804 | /* If reset via doorbell register is supported, use that. |
5898 | * There are two such methods. Favor the newest method. | 5805 | * There are two such methods. Favor the newest method. |
@@ -5904,8 +5811,8 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
5904 | } else { | 5811 | } else { |
5905 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | 5812 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; |
5906 | if (use_doorbell) { | 5813 | if (use_doorbell) { |
5907 | dev_warn(&pdev->dev, "Soft reset not supported. " | 5814 | dev_warn(&pdev->dev, |
5908 | "Firmware update is required.\n"); | 5815 | "Soft reset not supported. Firmware update is required.\n"); |
5909 | rc = -ENOTSUPP; /* try soft reset */ | 5816 | rc = -ENOTSUPP; /* try soft reset */ |
5910 | goto unmap_cfgtable; | 5817 | goto unmap_cfgtable; |
5911 | } | 5818 | } |
@@ -5925,8 +5832,7 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) | |||
5925 | rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); | 5832 | rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); |
5926 | if (rc) { | 5833 | if (rc) { |
5927 | dev_warn(&pdev->dev, | 5834 | dev_warn(&pdev->dev, |
5928 | "failed waiting for board to become ready " | 5835 | "Failed waiting for board to become ready after hard reset\n"); |
5929 | "after hard reset\n"); | ||
5930 | goto unmap_cfgtable; | 5836 | goto unmap_cfgtable; |
5931 | } | 5837 | } |
5932 | 5838 | ||
@@ -5977,7 +5883,7 @@ static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) | |||
5977 | readl(&(tb->HostWrite.CoalIntDelay))); | 5883 | readl(&(tb->HostWrite.CoalIntDelay))); |
5978 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", | 5884 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", |
5979 | readl(&(tb->HostWrite.CoalIntCount))); | 5885 | readl(&(tb->HostWrite.CoalIntCount))); |
5980 | dev_info(dev, " Max outstanding commands = 0x%d\n", | 5886 | dev_info(dev, " Max outstanding commands = %d\n", |
5981 | readl(&(tb->CmdsOutMax))); | 5887 | readl(&(tb->CmdsOutMax))); |
5982 | dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); | 5888 | dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); |
5983 | for (i = 0; i < 16; i++) | 5889 | for (i = 0; i < 16; i++) |
@@ -6025,7 +5931,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |||
6025 | } | 5931 | } |
6026 | 5932 | ||
6027 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on | 5933 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on |
6028 | * controllers that are capable. If not, we use IO-APIC mode. | 5934 | * controllers that are capable. If not, we use legacy INTx mode. |
6029 | */ | 5935 | */ |
6030 | 5936 | ||
6031 | static void hpsa_interrupt_mode(struct ctlr_info *h) | 5937 | static void hpsa_interrupt_mode(struct ctlr_info *h) |
@@ -6044,7 +5950,7 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) | |||
6044 | (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) | 5950 | (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) |
6045 | goto default_int_mode; | 5951 | goto default_int_mode; |
6046 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { | 5952 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { |
6047 | dev_info(&h->pdev->dev, "MSIX\n"); | 5953 | dev_info(&h->pdev->dev, "MSI-X capable controller\n"); |
6048 | h->msix_vector = MAX_REPLY_QUEUES; | 5954 | h->msix_vector = MAX_REPLY_QUEUES; |
6049 | if (h->msix_vector > num_online_cpus()) | 5955 | if (h->msix_vector > num_online_cpus()) |
6050 | h->msix_vector = num_online_cpus(); | 5956 | h->msix_vector = num_online_cpus(); |
@@ -6065,7 +5971,7 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) | |||
6065 | } | 5971 | } |
6066 | single_msi_mode: | 5972 | single_msi_mode: |
6067 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { | 5973 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { |
6068 | dev_info(&h->pdev->dev, "MSI\n"); | 5974 | dev_info(&h->pdev->dev, "MSI capable controller\n"); |
6069 | if (!pci_enable_msi(h->pdev)) | 5975 | if (!pci_enable_msi(h->pdev)) |
6070 | h->msi_vector = 1; | 5976 | h->msi_vector = 1; |
6071 | else | 5977 | else |
@@ -6172,8 +6078,10 @@ static int hpsa_find_cfgtables(struct ctlr_info *h) | |||
6172 | return rc; | 6078 | return rc; |
6173 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, | 6079 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
6174 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); | 6080 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); |
6175 | if (!h->cfgtable) | 6081 | if (!h->cfgtable) { |
6082 | dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); | ||
6176 | return -ENOMEM; | 6083 | return -ENOMEM; |
6084 | } | ||
6177 | rc = write_driver_ver_to_cfgtable(h->cfgtable); | 6085 | rc = write_driver_ver_to_cfgtable(h->cfgtable); |
6178 | if (rc) | 6086 | if (rc) |
6179 | return rc; | 6087 | return rc; |
@@ -6204,6 +6112,15 @@ static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) | |||
6204 | } | 6112 | } |
6205 | } | 6113 | } |
6206 | 6114 | ||
6115 | /* If the controller reports that the total max sg entries is greater than 512, | ||
6116 | * then we know that chained SG blocks work. (Original smart arrays did not | ||
6117 | * support chained SG blocks and would return zero for max sg entries.) | ||
6118 | */ | ||
6119 | static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) | ||
6120 | { | ||
6121 | return h->maxsgentries > 512; | ||
6122 | } | ||
6123 | |||
6207 | /* Interrogate the hardware for some limits: | 6124 | /* Interrogate the hardware for some limits: |
6208 | * max commands, max SG elements without chaining, and with chaining, | 6125 | * max commands, max SG elements without chaining, and with chaining, |
6209 | * SG chain block size, etc. | 6126 | * SG chain block size, etc. |
@@ -6211,21 +6128,23 @@ static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) | |||
6211 | static void hpsa_find_board_params(struct ctlr_info *h) | 6128 | static void hpsa_find_board_params(struct ctlr_info *h) |
6212 | { | 6129 | { |
6213 | hpsa_get_max_perf_mode_cmds(h); | 6130 | hpsa_get_max_perf_mode_cmds(h); |
6214 | h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ | 6131 | h->nr_cmds = h->max_commands; |
6215 | h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); | 6132 | h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); |
6216 | h->fw_support = readl(&(h->cfgtable->misc_fw_support)); | 6133 | h->fw_support = readl(&(h->cfgtable->misc_fw_support)); |
6217 | /* | 6134 | if (hpsa_supports_chained_sg_blocks(h)) { |
6218 | * Limit in-command s/g elements to 32 save dma'able memory. | 6135 | /* Limit in-command s/g elements to 32 save dma'able memory. */ |
6219 | * Howvever spec says if 0, use 31 | ||
6220 | */ | ||
6221 | h->max_cmd_sg_entries = 31; | ||
6222 | if (h->maxsgentries > 512) { | ||
6223 | h->max_cmd_sg_entries = 32; | 6136 | h->max_cmd_sg_entries = 32; |
6224 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; | 6137 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; |
6225 | h->maxsgentries--; /* save one for chain pointer */ | 6138 | h->maxsgentries--; /* save one for chain pointer */ |
6226 | } else { | 6139 | } else { |
6227 | h->chainsize = 0; | 6140 | /* |
6141 | * Original smart arrays supported at most 31 s/g entries | ||
6142 | * embedded inline in the command (trying to use more | ||
6143 | * would lock up the controller) | ||
6144 | */ | ||
6145 | h->max_cmd_sg_entries = 31; | ||
6228 | h->maxsgentries = 31; /* default to traditional values */ | 6146 | h->maxsgentries = 31; /* default to traditional values */ |
6147 | h->chainsize = 0; | ||
6229 | } | 6148 | } |
6230 | 6149 | ||
6231 | /* Find out what task management functions are supported and cache */ | 6150 | /* Find out what task management functions are supported and cache */ |
@@ -6239,7 +6158,7 @@ static void hpsa_find_board_params(struct ctlr_info *h) | |||
6239 | static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) | 6158 | static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) |
6240 | { | 6159 | { |
6241 | if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { | 6160 | if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { |
6242 | dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); | 6161 | dev_err(&h->pdev->dev, "not a valid CISS config table\n"); |
6243 | return false; | 6162 | return false; |
6244 | } | 6163 | } |
6245 | return true; | 6164 | return true; |
@@ -6272,24 +6191,27 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) | |||
6272 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); | 6191 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); |
6273 | } | 6192 | } |
6274 | 6193 | ||
6275 | static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) | 6194 | static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) |
6276 | { | 6195 | { |
6277 | int i; | 6196 | int i; |
6278 | u32 doorbell_value; | 6197 | u32 doorbell_value; |
6279 | unsigned long flags; | 6198 | unsigned long flags; |
6280 | /* wait until the clear_event_notify bit 6 is cleared by controller. */ | 6199 | /* wait until the clear_event_notify bit 6 is cleared by controller. */ |
6281 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | 6200 | for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { |
6282 | spin_lock_irqsave(&h->lock, flags); | 6201 | spin_lock_irqsave(&h->lock, flags); |
6283 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); | 6202 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); |
6284 | spin_unlock_irqrestore(&h->lock, flags); | 6203 | spin_unlock_irqrestore(&h->lock, flags); |
6285 | if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) | 6204 | if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) |
6286 | break; | 6205 | goto done; |
6287 | /* delay and try again */ | 6206 | /* delay and try again */ |
6288 | msleep(20); | 6207 | msleep(CLEAR_EVENT_WAIT_INTERVAL); |
6289 | } | 6208 | } |
6209 | return -ENODEV; | ||
6210 | done: | ||
6211 | return 0; | ||
6290 | } | 6212 | } |
6291 | 6213 | ||
6292 | static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) | 6214 | static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) |
6293 | { | 6215 | { |
6294 | int i; | 6216 | int i; |
6295 | u32 doorbell_value; | 6217 | u32 doorbell_value; |
@@ -6299,17 +6221,21 @@ static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) | |||
6299 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | 6221 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right |
6300 | * as we enter this code.) | 6222 | * as we enter this code.) |
6301 | */ | 6223 | */ |
6302 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | 6224 | for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { |
6303 | spin_lock_irqsave(&h->lock, flags); | 6225 | spin_lock_irqsave(&h->lock, flags); |
6304 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); | 6226 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); |
6305 | spin_unlock_irqrestore(&h->lock, flags); | 6227 | spin_unlock_irqrestore(&h->lock, flags); |
6306 | if (!(doorbell_value & CFGTBL_ChangeReq)) | 6228 | if (!(doorbell_value & CFGTBL_ChangeReq)) |
6307 | break; | 6229 | goto done; |
6308 | /* delay and try again */ | 6230 | /* delay and try again */ |
6309 | usleep_range(10000, 20000); | 6231 | msleep(MODE_CHANGE_WAIT_INTERVAL); |
6310 | } | 6232 | } |
6233 | return -ENODEV; | ||
6234 | done: | ||
6235 | return 0; | ||
6311 | } | 6236 | } |
6312 | 6237 | ||
6238 | /* return -ENODEV or other reason on error, 0 on success */ | ||
6313 | static int hpsa_enter_simple_mode(struct ctlr_info *h) | 6239 | static int hpsa_enter_simple_mode(struct ctlr_info *h) |
6314 | { | 6240 | { |
6315 | u32 trans_support; | 6241 | u32 trans_support; |
@@ -6324,14 +6250,15 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h) | |||
6324 | writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); | 6250 | writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); |
6325 | writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); | 6251 | writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); |
6326 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | 6252 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
6327 | hpsa_wait_for_mode_change_ack(h); | 6253 | if (hpsa_wait_for_mode_change_ack(h)) |
6254 | goto error; | ||
6328 | print_cfg_table(&h->pdev->dev, h->cfgtable); | 6255 | print_cfg_table(&h->pdev->dev, h->cfgtable); |
6329 | if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) | 6256 | if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) |
6330 | goto error; | 6257 | goto error; |
6331 | h->transMethod = CFGTBL_Trans_Simple; | 6258 | h->transMethod = CFGTBL_Trans_Simple; |
6332 | return 0; | 6259 | return 0; |
6333 | error: | 6260 | error: |
6334 | dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); | 6261 | dev_err(&h->pdev->dev, "failed to enter simple mode\n"); |
6335 | return -ENODEV; | 6262 | return -ENODEV; |
6336 | } | 6263 | } |
6337 | 6264 | ||
@@ -6341,7 +6268,7 @@ static int hpsa_pci_init(struct ctlr_info *h) | |||
6341 | 6268 | ||
6342 | prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); | 6269 | prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); |
6343 | if (prod_index < 0) | 6270 | if (prod_index < 0) |
6344 | return -ENODEV; | 6271 | return prod_index; |
6345 | h->product_name = products[prod_index].product_name; | 6272 | h->product_name = products[prod_index].product_name; |
6346 | h->access = *(products[prod_index].access); | 6273 | h->access = *(products[prod_index].access); |
6347 | 6274 | ||
@@ -6422,6 +6349,7 @@ static void hpsa_hba_inquiry(struct ctlr_info *h) | |||
6422 | static int hpsa_init_reset_devices(struct pci_dev *pdev) | 6349 | static int hpsa_init_reset_devices(struct pci_dev *pdev) |
6423 | { | 6350 | { |
6424 | int rc, i; | 6351 | int rc, i; |
6352 | void __iomem *vaddr; | ||
6425 | 6353 | ||
6426 | if (!reset_devices) | 6354 | if (!reset_devices) |
6427 | return 0; | 6355 | return 0; |
@@ -6445,6 +6373,14 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) | |||
6445 | 6373 | ||
6446 | pci_set_master(pdev); | 6374 | pci_set_master(pdev); |
6447 | 6375 | ||
6376 | vaddr = pci_ioremap_bar(pdev, 0); | ||
6377 | if (vaddr == NULL) { | ||
6378 | rc = -ENOMEM; | ||
6379 | goto out_disable; | ||
6380 | } | ||
6381 | writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); | ||
6382 | iounmap(vaddr); | ||
6383 | |||
6448 | /* Reset the controller with a PCI power-cycle or via doorbell */ | 6384 | /* Reset the controller with a PCI power-cycle or via doorbell */ |
6449 | rc = hpsa_kdump_hard_reset_controller(pdev); | 6385 | rc = hpsa_kdump_hard_reset_controller(pdev); |
6450 | 6386 | ||
@@ -6453,14 +6389,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev) | |||
6453 | * "performant mode". Or, it might be 640x, which can't reset | 6389 | * "performant mode". Or, it might be 640x, which can't reset |
6454 | * due to concerns about shared bbwc between 6402/6404 pair. | 6390 | * due to concerns about shared bbwc between 6402/6404 pair. |
6455 | */ | 6391 | */ |
6456 | if (rc) { | 6392 | if (rc) |
6457 | if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */ | ||
6458 | rc = -ENODEV; | ||
6459 | goto out_disable; | 6393 | goto out_disable; |
6460 | } | ||
6461 | 6394 | ||
6462 | /* Now try to get the controller to respond to a no-op */ | 6395 | /* Now try to get the controller to respond to a no-op */ |
6463 | dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); | 6396 | dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); |
6464 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { | 6397 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { |
6465 | if (hpsa_noop(pdev) == 0) | 6398 | if (hpsa_noop(pdev) == 0) |
6466 | break; | 6399 | break; |
@@ -6490,9 +6423,12 @@ static int hpsa_allocate_cmd_pool(struct ctlr_info *h) | |||
6490 | || (h->cmd_pool == NULL) | 6423 | || (h->cmd_pool == NULL) |
6491 | || (h->errinfo_pool == NULL)) { | 6424 | || (h->errinfo_pool == NULL)) { |
6492 | dev_err(&h->pdev->dev, "out of memory in %s", __func__); | 6425 | dev_err(&h->pdev->dev, "out of memory in %s", __func__); |
6493 | return -ENOMEM; | 6426 | goto clean_up; |
6494 | } | 6427 | } |
6495 | return 0; | 6428 | return 0; |
6429 | clean_up: | ||
6430 | hpsa_free_cmd_pool(h); | ||
6431 | return -ENOMEM; | ||
6496 | } | 6432 | } |
6497 | 6433 | ||
6498 | static void hpsa_free_cmd_pool(struct ctlr_info *h) | 6434 | static void hpsa_free_cmd_pool(struct ctlr_info *h) |
@@ -6519,16 +6455,38 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h) | |||
6519 | 6455 | ||
6520 | static void hpsa_irq_affinity_hints(struct ctlr_info *h) | 6456 | static void hpsa_irq_affinity_hints(struct ctlr_info *h) |
6521 | { | 6457 | { |
6522 | int i, cpu, rc; | 6458 | int i, cpu; |
6523 | 6459 | ||
6524 | cpu = cpumask_first(cpu_online_mask); | 6460 | cpu = cpumask_first(cpu_online_mask); |
6525 | for (i = 0; i < h->msix_vector; i++) { | 6461 | for (i = 0; i < h->msix_vector; i++) { |
6526 | rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); | 6462 | irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); |
6527 | cpu = cpumask_next(cpu, cpu_online_mask); | 6463 | cpu = cpumask_next(cpu, cpu_online_mask); |
6528 | } | 6464 | } |
6529 | } | 6465 | } |
6530 | 6466 | ||
6531 | static int hpsa_request_irq(struct ctlr_info *h, | 6467 | /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ |
6468 | static void hpsa_free_irqs(struct ctlr_info *h) | ||
6469 | { | ||
6470 | int i; | ||
6471 | |||
6472 | if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { | ||
6473 | /* Single reply queue, only one irq to free */ | ||
6474 | i = h->intr_mode; | ||
6475 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6476 | free_irq(h->intr[i], &h->q[i]); | ||
6477 | return; | ||
6478 | } | ||
6479 | |||
6480 | for (i = 0; i < h->msix_vector; i++) { | ||
6481 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6482 | free_irq(h->intr[i], &h->q[i]); | ||
6483 | } | ||
6484 | for (; i < MAX_REPLY_QUEUES; i++) | ||
6485 | h->q[i] = 0; | ||
6486 | } | ||
6487 | |||
6488 | /* returns 0 on success; cleans up and returns -Enn on error */ | ||
6489 | static int hpsa_request_irqs(struct ctlr_info *h, | ||
6532 | irqreturn_t (*msixhandler)(int, void *), | 6490 | irqreturn_t (*msixhandler)(int, void *), |
6533 | irqreturn_t (*intxhandler)(int, void *)) | 6491 | irqreturn_t (*intxhandler)(int, void *)) |
6534 | { | 6492 | { |
@@ -6543,10 +6501,25 @@ static int hpsa_request_irq(struct ctlr_info *h, | |||
6543 | 6501 | ||
6544 | if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { | 6502 | if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { |
6545 | /* If performant mode and MSI-X, use multiple reply queues */ | 6503 | /* If performant mode and MSI-X, use multiple reply queues */ |
6546 | for (i = 0; i < h->msix_vector; i++) | 6504 | for (i = 0; i < h->msix_vector; i++) { |
6547 | rc = request_irq(h->intr[i], msixhandler, | 6505 | rc = request_irq(h->intr[i], msixhandler, |
6548 | 0, h->devname, | 6506 | 0, h->devname, |
6549 | &h->q[i]); | 6507 | &h->q[i]); |
6508 | if (rc) { | ||
6509 | int j; | ||
6510 | |||
6511 | dev_err(&h->pdev->dev, | ||
6512 | "failed to get irq %d for %s\n", | ||
6513 | h->intr[i], h->devname); | ||
6514 | for (j = 0; j < i; j++) { | ||
6515 | free_irq(h->intr[j], &h->q[j]); | ||
6516 | h->q[j] = 0; | ||
6517 | } | ||
6518 | for (; j < MAX_REPLY_QUEUES; j++) | ||
6519 | h->q[j] = 0; | ||
6520 | return rc; | ||
6521 | } | ||
6522 | } | ||
6550 | hpsa_irq_affinity_hints(h); | 6523 | hpsa_irq_affinity_hints(h); |
6551 | } else { | 6524 | } else { |
6552 | /* Use single reply pool */ | 6525 | /* Use single reply pool */ |
@@ -6592,27 +6565,9 @@ static int hpsa_kdump_soft_reset(struct ctlr_info *h) | |||
6592 | return 0; | 6565 | return 0; |
6593 | } | 6566 | } |
6594 | 6567 | ||
6595 | static void free_irqs(struct ctlr_info *h) | ||
6596 | { | ||
6597 | int i; | ||
6598 | |||
6599 | if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { | ||
6600 | /* Single reply queue, only one irq to free */ | ||
6601 | i = h->intr_mode; | ||
6602 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6603 | free_irq(h->intr[i], &h->q[i]); | ||
6604 | return; | ||
6605 | } | ||
6606 | |||
6607 | for (i = 0; i < h->msix_vector; i++) { | ||
6608 | irq_set_affinity_hint(h->intr[i], NULL); | ||
6609 | free_irq(h->intr[i], &h->q[i]); | ||
6610 | } | ||
6611 | } | ||
6612 | |||
6613 | static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) | 6568 | static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) |
6614 | { | 6569 | { |
6615 | free_irqs(h); | 6570 | hpsa_free_irqs(h); |
6616 | #ifdef CONFIG_PCI_MSI | 6571 | #ifdef CONFIG_PCI_MSI |
6617 | if (h->msix_vector) { | 6572 | if (h->msix_vector) { |
6618 | if (h->pdev->msix_enabled) | 6573 | if (h->pdev->msix_enabled) |
@@ -6658,16 +6613,20 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | |||
6658 | } | 6613 | } |
6659 | 6614 | ||
6660 | /* Called when controller lockup detected. */ | 6615 | /* Called when controller lockup detected. */ |
6661 | static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) | 6616 | static void fail_all_outstanding_cmds(struct ctlr_info *h) |
6662 | { | 6617 | { |
6663 | struct CommandList *c = NULL; | 6618 | int i, refcount; |
6619 | struct CommandList *c; | ||
6664 | 6620 | ||
6665 | assert_spin_locked(&h->lock); | 6621 | flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ |
6666 | /* Mark all outstanding commands as failed and complete them. */ | 6622 | for (i = 0; i < h->nr_cmds; i++) { |
6667 | while (!list_empty(list)) { | 6623 | c = h->cmd_pool + i; |
6668 | c = list_entry(list->next, struct CommandList, list); | 6624 | refcount = atomic_inc_return(&c->refcount); |
6669 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; | 6625 | if (refcount > 1) { |
6670 | finish_cmd(c); | 6626 | c->err_info->CommandStatus = CMD_HARDWARE_ERR; |
6627 | finish_cmd(c); | ||
6628 | } | ||
6629 | cmd_free(h, c); | ||
6671 | } | 6630 | } |
6672 | } | 6631 | } |
6673 | 6632 | ||
@@ -6704,10 +6663,7 @@ static void controller_lockup_detected(struct ctlr_info *h) | |||
6704 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", | 6663 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", |
6705 | lockup_detected); | 6664 | lockup_detected); |
6706 | pci_disable_device(h->pdev); | 6665 | pci_disable_device(h->pdev); |
6707 | spin_lock_irqsave(&h->lock, flags); | 6666 | fail_all_outstanding_cmds(h); |
6708 | fail_all_cmds_on_list(h, &h->cmpQ); | ||
6709 | fail_all_cmds_on_list(h, &h->reqQ); | ||
6710 | spin_unlock_irqrestore(&h->lock, flags); | ||
6711 | } | 6667 | } |
6712 | 6668 | ||
6713 | static void detect_controller_lockup(struct ctlr_info *h) | 6669 | static void detect_controller_lockup(struct ctlr_info *h) |
@@ -6750,8 +6706,8 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h) | |||
6750 | int i; | 6706 | int i; |
6751 | char *event_type; | 6707 | char *event_type; |
6752 | 6708 | ||
6753 | /* Clear the driver-requested rescan flag */ | 6709 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) |
6754 | h->drv_req_rescan = 0; | 6710 | return; |
6755 | 6711 | ||
6756 | /* Ask the controller to clear the events we're handling. */ | 6712 | /* Ask the controller to clear the events we're handling. */ |
6757 | if ((h->transMethod & (CFGTBL_Trans_io_accel1 | 6713 | if ((h->transMethod & (CFGTBL_Trans_io_accel1 |
@@ -6798,9 +6754,6 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h) | |||
6798 | */ | 6754 | */ |
6799 | static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) | 6755 | static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) |
6800 | { | 6756 | { |
6801 | if (h->drv_req_rescan) | ||
6802 | return 1; | ||
6803 | |||
6804 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) | 6757 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) |
6805 | return 0; | 6758 | return 0; |
6806 | 6759 | ||
@@ -6834,34 +6787,60 @@ static int hpsa_offline_devices_ready(struct ctlr_info *h) | |||
6834 | return 0; | 6787 | return 0; |
6835 | } | 6788 | } |
6836 | 6789 | ||
6837 | 6790 | static void hpsa_rescan_ctlr_worker(struct work_struct *work) | |
6838 | static void hpsa_monitor_ctlr_worker(struct work_struct *work) | ||
6839 | { | 6791 | { |
6840 | unsigned long flags; | 6792 | unsigned long flags; |
6841 | struct ctlr_info *h = container_of(to_delayed_work(work), | 6793 | struct ctlr_info *h = container_of(to_delayed_work(work), |
6842 | struct ctlr_info, monitor_ctlr_work); | 6794 | struct ctlr_info, rescan_ctlr_work); |
6843 | detect_controller_lockup(h); | 6795 | |
6844 | if (lockup_detected(h)) | 6796 | |
6797 | if (h->remove_in_progress) | ||
6845 | return; | 6798 | return; |
6846 | 6799 | ||
6847 | if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { | 6800 | if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { |
6848 | scsi_host_get(h->scsi_host); | 6801 | scsi_host_get(h->scsi_host); |
6849 | h->drv_req_rescan = 0; | ||
6850 | hpsa_ack_ctlr_events(h); | 6802 | hpsa_ack_ctlr_events(h); |
6851 | hpsa_scan_start(h->scsi_host); | 6803 | hpsa_scan_start(h->scsi_host); |
6852 | scsi_host_put(h->scsi_host); | 6804 | scsi_host_put(h->scsi_host); |
6853 | } | 6805 | } |
6854 | |||
6855 | spin_lock_irqsave(&h->lock, flags); | 6806 | spin_lock_irqsave(&h->lock, flags); |
6856 | if (h->remove_in_progress) { | 6807 | if (!h->remove_in_progress) |
6857 | spin_unlock_irqrestore(&h->lock, flags); | 6808 | queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, |
6809 | h->heartbeat_sample_interval); | ||
6810 | spin_unlock_irqrestore(&h->lock, flags); | ||
6811 | } | ||
6812 | |||
6813 | static void hpsa_monitor_ctlr_worker(struct work_struct *work) | ||
6814 | { | ||
6815 | unsigned long flags; | ||
6816 | struct ctlr_info *h = container_of(to_delayed_work(work), | ||
6817 | struct ctlr_info, monitor_ctlr_work); | ||
6818 | |||
6819 | detect_controller_lockup(h); | ||
6820 | if (lockup_detected(h)) | ||
6858 | return; | 6821 | return; |
6859 | } | 6822 | |
6860 | schedule_delayed_work(&h->monitor_ctlr_work, | 6823 | spin_lock_irqsave(&h->lock, flags); |
6824 | if (!h->remove_in_progress) | ||
6825 | schedule_delayed_work(&h->monitor_ctlr_work, | ||
6861 | h->heartbeat_sample_interval); | 6826 | h->heartbeat_sample_interval); |
6862 | spin_unlock_irqrestore(&h->lock, flags); | 6827 | spin_unlock_irqrestore(&h->lock, flags); |
6863 | } | 6828 | } |
6864 | 6829 | ||
6830 | static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, | ||
6831 | char *name) | ||
6832 | { | ||
6833 | struct workqueue_struct *wq = NULL; | ||
6834 | char wq_name[20]; | ||
6835 | |||
6836 | snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr); | ||
6837 | wq = alloc_ordered_workqueue(wq_name, 0); | ||
6838 | if (!wq) | ||
6839 | dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); | ||
6840 | |||
6841 | return wq; | ||
6842 | } | ||
6843 | |||
6865 | static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 6844 | static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
6866 | { | 6845 | { |
6867 | int dac, rc; | 6846 | int dac, rc; |
@@ -6898,13 +6877,23 @@ reinit_after_soft_reset: | |||
6898 | 6877 | ||
6899 | h->pdev = pdev; | 6878 | h->pdev = pdev; |
6900 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; | 6879 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; |
6901 | INIT_LIST_HEAD(&h->cmpQ); | ||
6902 | INIT_LIST_HEAD(&h->reqQ); | ||
6903 | INIT_LIST_HEAD(&h->offline_device_list); | 6880 | INIT_LIST_HEAD(&h->offline_device_list); |
6904 | spin_lock_init(&h->lock); | 6881 | spin_lock_init(&h->lock); |
6905 | spin_lock_init(&h->offline_device_lock); | 6882 | spin_lock_init(&h->offline_device_lock); |
6906 | spin_lock_init(&h->scan_lock); | 6883 | spin_lock_init(&h->scan_lock); |
6907 | spin_lock_init(&h->passthru_count_lock); | 6884 | atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); |
6885 | |||
6886 | h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); | ||
6887 | if (!h->rescan_ctlr_wq) { | ||
6888 | rc = -ENOMEM; | ||
6889 | goto clean1; | ||
6890 | } | ||
6891 | |||
6892 | h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); | ||
6893 | if (!h->resubmit_wq) { | ||
6894 | rc = -ENOMEM; | ||
6895 | goto clean1; | ||
6896 | } | ||
6908 | 6897 | ||
6909 | /* Allocate and clear per-cpu variable lockup_detected */ | 6898 | /* Allocate and clear per-cpu variable lockup_detected */ |
6910 | h->lockup_detected = alloc_percpu(u32); | 6899 | h->lockup_detected = alloc_percpu(u32); |
@@ -6939,13 +6928,14 @@ reinit_after_soft_reset: | |||
6939 | /* make sure the board interrupts are off */ | 6928 | /* make sure the board interrupts are off */ |
6940 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 6929 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
6941 | 6930 | ||
6942 | if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) | 6931 | if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) |
6943 | goto clean2; | 6932 | goto clean2; |
6944 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", | 6933 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", |
6945 | h->devname, pdev->device, | 6934 | h->devname, pdev->device, |
6946 | h->intr[h->intr_mode], dac ? "" : " not"); | 6935 | h->intr[h->intr_mode], dac ? "" : " not"); |
6947 | if (hpsa_allocate_cmd_pool(h)) | 6936 | rc = hpsa_allocate_cmd_pool(h); |
6948 | goto clean4; | 6937 | if (rc) |
6938 | goto clean2_and_free_irqs; | ||
6949 | if (hpsa_allocate_sg_chain_blocks(h)) | 6939 | if (hpsa_allocate_sg_chain_blocks(h)) |
6950 | goto clean4; | 6940 | goto clean4; |
6951 | init_waitqueue_head(&h->scan_wait_queue); | 6941 | init_waitqueue_head(&h->scan_wait_queue); |
@@ -6974,12 +6964,12 @@ reinit_after_soft_reset: | |||
6974 | spin_lock_irqsave(&h->lock, flags); | 6964 | spin_lock_irqsave(&h->lock, flags); |
6975 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 6965 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
6976 | spin_unlock_irqrestore(&h->lock, flags); | 6966 | spin_unlock_irqrestore(&h->lock, flags); |
6977 | free_irqs(h); | 6967 | hpsa_free_irqs(h); |
6978 | rc = hpsa_request_irq(h, hpsa_msix_discard_completions, | 6968 | rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, |
6979 | hpsa_intx_discard_completions); | 6969 | hpsa_intx_discard_completions); |
6980 | if (rc) { | 6970 | if (rc) { |
6981 | dev_warn(&h->pdev->dev, "Failed to request_irq after " | 6971 | dev_warn(&h->pdev->dev, |
6982 | "soft reset.\n"); | 6972 | "Failed to request_irq after soft reset.\n"); |
6983 | goto clean4; | 6973 | goto clean4; |
6984 | } | 6974 | } |
6985 | 6975 | ||
@@ -7016,7 +7006,6 @@ reinit_after_soft_reset: | |||
7016 | /* Enable Accelerated IO path at driver layer */ | 7006 | /* Enable Accelerated IO path at driver layer */ |
7017 | h->acciopath_status = 1; | 7007 | h->acciopath_status = 1; |
7018 | 7008 | ||
7019 | h->drv_req_rescan = 0; | ||
7020 | 7009 | ||
7021 | /* Turn the interrupts on so we can service requests */ | 7010 | /* Turn the interrupts on so we can service requests */ |
7022 | h->access.set_intr_mask(h, HPSA_INTR_ON); | 7011 | h->access.set_intr_mask(h, HPSA_INTR_ON); |
@@ -7029,14 +7018,22 @@ reinit_after_soft_reset: | |||
7029 | INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); | 7018 | INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); |
7030 | schedule_delayed_work(&h->monitor_ctlr_work, | 7019 | schedule_delayed_work(&h->monitor_ctlr_work, |
7031 | h->heartbeat_sample_interval); | 7020 | h->heartbeat_sample_interval); |
7021 | INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); | ||
7022 | queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, | ||
7023 | h->heartbeat_sample_interval); | ||
7032 | return 0; | 7024 | return 0; |
7033 | 7025 | ||
7034 | clean4: | 7026 | clean4: |
7035 | hpsa_free_sg_chain_blocks(h); | 7027 | hpsa_free_sg_chain_blocks(h); |
7036 | hpsa_free_cmd_pool(h); | 7028 | hpsa_free_cmd_pool(h); |
7037 | free_irqs(h); | 7029 | clean2_and_free_irqs: |
7030 | hpsa_free_irqs(h); | ||
7038 | clean2: | 7031 | clean2: |
7039 | clean1: | 7032 | clean1: |
7033 | if (h->resubmit_wq) | ||
7034 | destroy_workqueue(h->resubmit_wq); | ||
7035 | if (h->rescan_ctlr_wq) | ||
7036 | destroy_workqueue(h->rescan_ctlr_wq); | ||
7040 | if (h->lockup_detected) | 7037 | if (h->lockup_detected) |
7041 | free_percpu(h->lockup_detected); | 7038 | free_percpu(h->lockup_detected); |
7042 | kfree(h); | 7039 | kfree(h); |
@@ -7055,9 +7052,9 @@ static void hpsa_flush_cache(struct ctlr_info *h) | |||
7055 | if (!flush_buf) | 7052 | if (!flush_buf) |
7056 | return; | 7053 | return; |
7057 | 7054 | ||
7058 | c = cmd_special_alloc(h); | 7055 | c = cmd_alloc(h); |
7059 | if (!c) { | 7056 | if (!c) { |
7060 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 7057 | dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
7061 | goto out_of_memory; | 7058 | goto out_of_memory; |
7062 | } | 7059 | } |
7063 | if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, | 7060 | if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, |
@@ -7069,7 +7066,7 @@ static void hpsa_flush_cache(struct ctlr_info *h) | |||
7069 | out: | 7066 | out: |
7070 | dev_warn(&h->pdev->dev, | 7067 | dev_warn(&h->pdev->dev, |
7071 | "error flushing cache on controller\n"); | 7068 | "error flushing cache on controller\n"); |
7072 | cmd_special_free(h, c); | 7069 | cmd_free(h, c); |
7073 | out_of_memory: | 7070 | out_of_memory: |
7074 | kfree(flush_buf); | 7071 | kfree(flush_buf); |
7075 | } | 7072 | } |
@@ -7110,9 +7107,11 @@ static void hpsa_remove_one(struct pci_dev *pdev) | |||
7110 | /* Get rid of any controller monitoring work items */ | 7107 | /* Get rid of any controller monitoring work items */ |
7111 | spin_lock_irqsave(&h->lock, flags); | 7108 | spin_lock_irqsave(&h->lock, flags); |
7112 | h->remove_in_progress = 1; | 7109 | h->remove_in_progress = 1; |
7113 | cancel_delayed_work(&h->monitor_ctlr_work); | ||
7114 | spin_unlock_irqrestore(&h->lock, flags); | 7110 | spin_unlock_irqrestore(&h->lock, flags); |
7115 | 7111 | cancel_delayed_work_sync(&h->monitor_ctlr_work); | |
7112 | cancel_delayed_work_sync(&h->rescan_ctlr_work); | ||
7113 | destroy_workqueue(h->rescan_ctlr_wq); | ||
7114 | destroy_workqueue(h->resubmit_wq); | ||
7116 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ | 7115 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ |
7117 | hpsa_shutdown(pdev); | 7116 | hpsa_shutdown(pdev); |
7118 | iounmap(h->vaddr); | 7117 | iounmap(h->vaddr); |
@@ -7172,7 +7171,7 @@ static struct pci_driver hpsa_pci_driver = { | |||
7172 | * bits of the command address. | 7171 | * bits of the command address. |
7173 | */ | 7172 | */ |
7174 | static void calc_bucket_map(int bucket[], int num_buckets, | 7173 | static void calc_bucket_map(int bucket[], int num_buckets, |
7175 | int nsgs, int min_blocks, int *bucket_map) | 7174 | int nsgs, int min_blocks, u32 *bucket_map) |
7176 | { | 7175 | { |
7177 | int i, j, b, size; | 7176 | int i, j, b, size; |
7178 | 7177 | ||
@@ -7193,7 +7192,8 @@ static void calc_bucket_map(int bucket[], int num_buckets, | |||
7193 | } | 7192 | } |
7194 | } | 7193 | } |
7195 | 7194 | ||
7196 | static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | 7195 | /* return -ENODEV or other reason on error, 0 on success */ |
7196 | static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | ||
7197 | { | 7197 | { |
7198 | int i; | 7198 | int i; |
7199 | unsigned long register_value; | 7199 | unsigned long register_value; |
@@ -7285,12 +7285,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7285 | } | 7285 | } |
7286 | } | 7286 | } |
7287 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | 7287 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
7288 | hpsa_wait_for_mode_change_ack(h); | 7288 | if (hpsa_wait_for_mode_change_ack(h)) { |
7289 | dev_err(&h->pdev->dev, | ||
7290 | "performant mode problem - doorbell timeout\n"); | ||
7291 | return -ENODEV; | ||
7292 | } | ||
7289 | register_value = readl(&(h->cfgtable->TransportActive)); | 7293 | register_value = readl(&(h->cfgtable->TransportActive)); |
7290 | if (!(register_value & CFGTBL_Trans_Performant)) { | 7294 | if (!(register_value & CFGTBL_Trans_Performant)) { |
7291 | dev_warn(&h->pdev->dev, "unable to get board into" | 7295 | dev_err(&h->pdev->dev, |
7292 | " performant mode\n"); | 7296 | "performant mode problem - transport not active\n"); |
7293 | return; | 7297 | return -ENODEV; |
7294 | } | 7298 | } |
7295 | /* Change the access methods to the performant access methods */ | 7299 | /* Change the access methods to the performant access methods */ |
7296 | h->access = access; | 7300 | h->access = access; |
@@ -7298,7 +7302,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7298 | 7302 | ||
7299 | if (!((trans_support & CFGTBL_Trans_io_accel1) || | 7303 | if (!((trans_support & CFGTBL_Trans_io_accel1) || |
7300 | (trans_support & CFGTBL_Trans_io_accel2))) | 7304 | (trans_support & CFGTBL_Trans_io_accel2))) |
7301 | return; | 7305 | return 0; |
7302 | 7306 | ||
7303 | if (trans_support & CFGTBL_Trans_io_accel1) { | 7307 | if (trans_support & CFGTBL_Trans_io_accel1) { |
7304 | /* Set up I/O accelerator mode */ | 7308 | /* Set up I/O accelerator mode */ |
@@ -7328,12 +7332,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7328 | (i * sizeof(struct ErrorInfo))); | 7332 | (i * sizeof(struct ErrorInfo))); |
7329 | cp->err_info_len = sizeof(struct ErrorInfo); | 7333 | cp->err_info_len = sizeof(struct ErrorInfo); |
7330 | cp->sgl_offset = IOACCEL1_SGLOFFSET; | 7334 | cp->sgl_offset = IOACCEL1_SGLOFFSET; |
7331 | cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; | 7335 | cp->host_context_flags = |
7336 | cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); | ||
7332 | cp->timeout_sec = 0; | 7337 | cp->timeout_sec = 0; |
7333 | cp->ReplyQueue = 0; | 7338 | cp->ReplyQueue = 0; |
7334 | cp->tag = | 7339 | cp->tag = |
7335 | cpu_to_le64((i << DIRECT_LOOKUP_SHIFT) | | 7340 | cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); |
7336 | DIRECT_LOOKUP_BIT); | ||
7337 | cp->host_addr = | 7341 | cp->host_addr = |
7338 | cpu_to_le64(h->ioaccel_cmd_pool_dhandle + | 7342 | cpu_to_le64(h->ioaccel_cmd_pool_dhandle + |
7339 | (i * sizeof(struct io_accel1_cmd))); | 7343 | (i * sizeof(struct io_accel1_cmd))); |
@@ -7362,7 +7366,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) | |||
7362 | writel(bft2[i], &h->ioaccel2_bft2_regs[i]); | 7366 | writel(bft2[i], &h->ioaccel2_bft2_regs[i]); |
7363 | } | 7367 | } |
7364 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | 7368 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
7365 | hpsa_wait_for_mode_change_ack(h); | 7369 | if (hpsa_wait_for_mode_change_ack(h)) { |
7370 | dev_err(&h->pdev->dev, | ||
7371 | "performant mode problem - enabling ioaccel mode\n"); | ||
7372 | return -ENODEV; | ||
7373 | } | ||
7374 | return 0; | ||
7366 | } | 7375 | } |
7367 | 7376 | ||
7368 | static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) | 7377 | static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) |
@@ -7508,17 +7517,18 @@ static int is_accelerated_cmd(struct CommandList *c) | |||
7508 | static void hpsa_drain_accel_commands(struct ctlr_info *h) | 7517 | static void hpsa_drain_accel_commands(struct ctlr_info *h) |
7509 | { | 7518 | { |
7510 | struct CommandList *c = NULL; | 7519 | struct CommandList *c = NULL; |
7511 | unsigned long flags; | 7520 | int i, accel_cmds_out; |
7512 | int accel_cmds_out; | 7521 | int refcount; |
7513 | 7522 | ||
7514 | do { /* wait for all outstanding commands to drain out */ | 7523 | do { /* wait for all outstanding ioaccel commands to drain out */ |
7515 | accel_cmds_out = 0; | 7524 | accel_cmds_out = 0; |
7516 | spin_lock_irqsave(&h->lock, flags); | 7525 | for (i = 0; i < h->nr_cmds; i++) { |
7517 | list_for_each_entry(c, &h->cmpQ, list) | 7526 | c = h->cmd_pool + i; |
7518 | accel_cmds_out += is_accelerated_cmd(c); | 7527 | refcount = atomic_inc_return(&c->refcount); |
7519 | list_for_each_entry(c, &h->reqQ, list) | 7528 | if (refcount > 1) /* Command is allocated */ |
7520 | accel_cmds_out += is_accelerated_cmd(c); | 7529 | accel_cmds_out += is_accelerated_cmd(c); |
7521 | spin_unlock_irqrestore(&h->lock, flags); | 7530 | cmd_free(h, c); |
7531 | } | ||
7522 | if (accel_cmds_out <= 0) | 7532 | if (accel_cmds_out <= 0) |
7523 | break; | 7533 | break; |
7524 | msleep(100); | 7534 | msleep(100); |
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 8e06d9e280ec..657713050349 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
@@ -32,7 +32,6 @@ struct access_method { | |||
32 | void (*submit_command)(struct ctlr_info *h, | 32 | void (*submit_command)(struct ctlr_info *h, |
33 | struct CommandList *c); | 33 | struct CommandList *c); |
34 | void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); | 34 | void (*set_intr_mask)(struct ctlr_info *h, unsigned long val); |
35 | unsigned long (*fifo_full)(struct ctlr_info *h); | ||
36 | bool (*intr_pending)(struct ctlr_info *h); | 35 | bool (*intr_pending)(struct ctlr_info *h); |
37 | unsigned long (*command_completed)(struct ctlr_info *h, u8 q); | 36 | unsigned long (*command_completed)(struct ctlr_info *h, u8 q); |
38 | }; | 37 | }; |
@@ -47,6 +46,11 @@ struct hpsa_scsi_dev_t { | |||
47 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ | 46 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ |
48 | unsigned char raid_level; /* from inquiry page 0xC1 */ | 47 | unsigned char raid_level; /* from inquiry page 0xC1 */ |
49 | unsigned char volume_offline; /* discovered via TUR or VPD */ | 48 | unsigned char volume_offline; /* discovered via TUR or VPD */ |
49 | u16 queue_depth; /* max queue_depth for this device */ | ||
50 | atomic_t ioaccel_cmds_out; /* Only used for physical devices | ||
51 | * counts commands sent to physical | ||
52 | * device via "ioaccel" path. | ||
53 | */ | ||
50 | u32 ioaccel_handle; | 54 | u32 ioaccel_handle; |
51 | int offload_config; /* I/O accel RAID offload configured */ | 55 | int offload_config; /* I/O accel RAID offload configured */ |
52 | int offload_enabled; /* I/O accel RAID offload enabled */ | 56 | int offload_enabled; /* I/O accel RAID offload enabled */ |
@@ -55,6 +59,15 @@ struct hpsa_scsi_dev_t { | |||
55 | */ | 59 | */ |
56 | struct raid_map_data raid_map; /* I/O accelerator RAID map */ | 60 | struct raid_map_data raid_map; /* I/O accelerator RAID map */ |
57 | 61 | ||
62 | /* | ||
63 | * Pointers from logical drive map indices to the phys drives that | ||
64 | * make those logical drives. Note, multiple logical drives may | ||
65 | * share physical drives. You can have for instance 5 physical | ||
66 | * drives with 3 logical drives each using those same 5 physical | ||
67 | * disks. We need these pointers for counting i/o's out to physical | ||
68 | * devices in order to honor physical device queue depth limits. | ||
69 | */ | ||
70 | struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES]; | ||
58 | }; | 71 | }; |
59 | 72 | ||
60 | struct reply_queue_buffer { | 73 | struct reply_queue_buffer { |
@@ -115,9 +128,12 @@ struct ctlr_info { | |||
115 | void __iomem *vaddr; | 128 | void __iomem *vaddr; |
116 | unsigned long paddr; | 129 | unsigned long paddr; |
117 | int nr_cmds; /* Number of commands allowed on this controller */ | 130 | int nr_cmds; /* Number of commands allowed on this controller */ |
131 | #define HPSA_CMDS_RESERVED_FOR_ABORTS 2 | ||
132 | #define HPSA_CMDS_RESERVED_FOR_DRIVER 1 | ||
118 | struct CfgTable __iomem *cfgtable; | 133 | struct CfgTable __iomem *cfgtable; |
119 | int interrupts_enabled; | 134 | int interrupts_enabled; |
120 | int max_commands; | 135 | int max_commands; |
136 | int last_allocation; | ||
121 | atomic_t commands_outstanding; | 137 | atomic_t commands_outstanding; |
122 | # define PERF_MODE_INT 0 | 138 | # define PERF_MODE_INT 0 |
123 | # define DOORBELL_INT 1 | 139 | # define DOORBELL_INT 1 |
@@ -131,8 +147,6 @@ struct ctlr_info { | |||
131 | char hba_mode_enabled; | 147 | char hba_mode_enabled; |
132 | 148 | ||
133 | /* queue and queue Info */ | 149 | /* queue and queue Info */ |
134 | struct list_head reqQ; | ||
135 | struct list_head cmpQ; | ||
136 | unsigned int Qdepth; | 150 | unsigned int Qdepth; |
137 | unsigned int maxSG; | 151 | unsigned int maxSG; |
138 | spinlock_t lock; | 152 | spinlock_t lock; |
@@ -168,9 +182,8 @@ struct ctlr_info { | |||
168 | unsigned long transMethod; | 182 | unsigned long transMethod; |
169 | 183 | ||
170 | /* cap concurrent passthrus at some reasonable maximum */ | 184 | /* cap concurrent passthrus at some reasonable maximum */ |
171 | #define HPSA_MAX_CONCURRENT_PASSTHRUS (20) | 185 | #define HPSA_MAX_CONCURRENT_PASSTHRUS (10) |
172 | spinlock_t passthru_count_lock; /* protects passthru_count */ | 186 | atomic_t passthru_cmds_avail; |
173 | int passthru_count; | ||
174 | 187 | ||
175 | /* | 188 | /* |
176 | * Performant mode completion buffers | 189 | * Performant mode completion buffers |
@@ -194,8 +207,8 @@ struct ctlr_info { | |||
194 | atomic_t firmware_flash_in_progress; | 207 | atomic_t firmware_flash_in_progress; |
195 | u32 __percpu *lockup_detected; | 208 | u32 __percpu *lockup_detected; |
196 | struct delayed_work monitor_ctlr_work; | 209 | struct delayed_work monitor_ctlr_work; |
210 | struct delayed_work rescan_ctlr_work; | ||
197 | int remove_in_progress; | 211 | int remove_in_progress; |
198 | u32 fifo_recently_full; | ||
199 | /* Address of h->q[x] is passed to intr handler to know which queue */ | 212 | /* Address of h->q[x] is passed to intr handler to know which queue */ |
200 | u8 q[MAX_REPLY_QUEUES]; | 213 | u8 q[MAX_REPLY_QUEUES]; |
201 | u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ | 214 | u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ |
@@ -237,8 +250,9 @@ struct ctlr_info { | |||
237 | spinlock_t offline_device_lock; | 250 | spinlock_t offline_device_lock; |
238 | struct list_head offline_device_list; | 251 | struct list_head offline_device_list; |
239 | int acciopath_status; | 252 | int acciopath_status; |
240 | int drv_req_rescan; /* flag for driver to request rescan event */ | ||
241 | int raid_offload_debug; | 253 | int raid_offload_debug; |
254 | struct workqueue_struct *resubmit_wq; | ||
255 | struct workqueue_struct *rescan_ctlr_wq; | ||
242 | }; | 256 | }; |
243 | 257 | ||
244 | struct offline_device_entry { | 258 | struct offline_device_entry { |
@@ -297,6 +311,8 @@ struct offline_device_entry { | |||
297 | */ | 311 | */ |
298 | #define SA5_DOORBELL 0x20 | 312 | #define SA5_DOORBELL 0x20 |
299 | #define SA5_REQUEST_PORT_OFFSET 0x40 | 313 | #define SA5_REQUEST_PORT_OFFSET 0x40 |
314 | #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0 | ||
315 | #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4 | ||
300 | #define SA5_REPLY_INTR_MASK_OFFSET 0x34 | 316 | #define SA5_REPLY_INTR_MASK_OFFSET 0x34 |
301 | #define SA5_REPLY_PORT_OFFSET 0x44 | 317 | #define SA5_REPLY_PORT_OFFSET 0x44 |
302 | #define SA5_INTR_STATUS 0x30 | 318 | #define SA5_INTR_STATUS 0x30 |
@@ -353,10 +369,7 @@ static void SA5_submit_command_no_read(struct ctlr_info *h, | |||
353 | static void SA5_submit_command_ioaccel2(struct ctlr_info *h, | 369 | static void SA5_submit_command_ioaccel2(struct ctlr_info *h, |
354 | struct CommandList *c) | 370 | struct CommandList *c) |
355 | { | 371 | { |
356 | if (c->cmd_type == CMD_IOACCEL2) | 372 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); |
357 | writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); | ||
358 | else | ||
359 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); | ||
360 | } | 373 | } |
361 | 374 | ||
362 | /* | 375 | /* |
@@ -398,19 +411,19 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) | |||
398 | unsigned long register_value = FIFO_EMPTY; | 411 | unsigned long register_value = FIFO_EMPTY; |
399 | 412 | ||
400 | /* msi auto clears the interrupt pending bit. */ | 413 | /* msi auto clears the interrupt pending bit. */ |
401 | if (!(h->msi_vector || h->msix_vector)) { | 414 | if (unlikely(!(h->msi_vector || h->msix_vector))) { |
402 | /* flush the controller write of the reply queue by reading | 415 | /* flush the controller write of the reply queue by reading |
403 | * outbound doorbell status register. | 416 | * outbound doorbell status register. |
404 | */ | 417 | */ |
405 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | 418 | (void) readl(h->vaddr + SA5_OUTDB_STATUS); |
406 | writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); | 419 | writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); |
407 | /* Do a read in order to flush the write to the controller | 420 | /* Do a read in order to flush the write to the controller |
408 | * (as per spec.) | 421 | * (as per spec.) |
409 | */ | 422 | */ |
410 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | 423 | (void) readl(h->vaddr + SA5_OUTDB_STATUS); |
411 | } | 424 | } |
412 | 425 | ||
413 | if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { | 426 | if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) { |
414 | register_value = rq->head[rq->current_entry]; | 427 | register_value = rq->head[rq->current_entry]; |
415 | rq->current_entry++; | 428 | rq->current_entry++; |
416 | atomic_dec(&h->commands_outstanding); | 429 | atomic_dec(&h->commands_outstanding); |
@@ -426,14 +439,6 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) | |||
426 | } | 439 | } |
427 | 440 | ||
428 | /* | 441 | /* |
429 | * Returns true if fifo is full. | ||
430 | * | ||
431 | */ | ||
432 | static unsigned long SA5_fifo_full(struct ctlr_info *h) | ||
433 | { | ||
434 | return atomic_read(&h->commands_outstanding) >= h->max_commands; | ||
435 | } | ||
436 | /* | ||
437 | * returns value read from hardware. | 442 | * returns value read from hardware. |
438 | * returns FIFO_EMPTY if there is nothing to read | 443 | * returns FIFO_EMPTY if there is nothing to read |
439 | */ | 444 | */ |
@@ -473,9 +478,6 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h) | |||
473 | if (!register_value) | 478 | if (!register_value) |
474 | return false; | 479 | return false; |
475 | 480 | ||
476 | if (h->msi_vector || h->msix_vector) | ||
477 | return true; | ||
478 | |||
479 | /* Read outbound doorbell to flush */ | 481 | /* Read outbound doorbell to flush */ |
480 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); | 482 | register_value = readl(h->vaddr + SA5_OUTDB_STATUS); |
481 | return register_value & SA5_OUTDB_STATUS_PERF_BIT; | 483 | return register_value & SA5_OUTDB_STATUS_PERF_BIT; |
@@ -525,7 +527,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) | |||
525 | static struct access_method SA5_access = { | 527 | static struct access_method SA5_access = { |
526 | SA5_submit_command, | 528 | SA5_submit_command, |
527 | SA5_intr_mask, | 529 | SA5_intr_mask, |
528 | SA5_fifo_full, | ||
529 | SA5_intr_pending, | 530 | SA5_intr_pending, |
530 | SA5_completed, | 531 | SA5_completed, |
531 | }; | 532 | }; |
@@ -533,7 +534,6 @@ static struct access_method SA5_access = { | |||
533 | static struct access_method SA5_ioaccel_mode1_access = { | 534 | static struct access_method SA5_ioaccel_mode1_access = { |
534 | SA5_submit_command, | 535 | SA5_submit_command, |
535 | SA5_performant_intr_mask, | 536 | SA5_performant_intr_mask, |
536 | SA5_fifo_full, | ||
537 | SA5_ioaccel_mode1_intr_pending, | 537 | SA5_ioaccel_mode1_intr_pending, |
538 | SA5_ioaccel_mode1_completed, | 538 | SA5_ioaccel_mode1_completed, |
539 | }; | 539 | }; |
@@ -541,7 +541,6 @@ static struct access_method SA5_ioaccel_mode1_access = { | |||
541 | static struct access_method SA5_ioaccel_mode2_access = { | 541 | static struct access_method SA5_ioaccel_mode2_access = { |
542 | SA5_submit_command_ioaccel2, | 542 | SA5_submit_command_ioaccel2, |
543 | SA5_performant_intr_mask, | 543 | SA5_performant_intr_mask, |
544 | SA5_fifo_full, | ||
545 | SA5_performant_intr_pending, | 544 | SA5_performant_intr_pending, |
546 | SA5_performant_completed, | 545 | SA5_performant_completed, |
547 | }; | 546 | }; |
@@ -549,7 +548,6 @@ static struct access_method SA5_ioaccel_mode2_access = { | |||
549 | static struct access_method SA5_performant_access = { | 548 | static struct access_method SA5_performant_access = { |
550 | SA5_submit_command, | 549 | SA5_submit_command, |
551 | SA5_performant_intr_mask, | 550 | SA5_performant_intr_mask, |
552 | SA5_fifo_full, | ||
553 | SA5_performant_intr_pending, | 551 | SA5_performant_intr_pending, |
554 | SA5_performant_completed, | 552 | SA5_performant_completed, |
555 | }; | 553 | }; |
@@ -557,7 +555,6 @@ static struct access_method SA5_performant_access = { | |||
557 | static struct access_method SA5_performant_access_no_read = { | 555 | static struct access_method SA5_performant_access_no_read = { |
558 | SA5_submit_command_no_read, | 556 | SA5_submit_command_no_read, |
559 | SA5_performant_intr_mask, | 557 | SA5_performant_intr_mask, |
560 | SA5_fifo_full, | ||
561 | SA5_performant_intr_pending, | 558 | SA5_performant_intr_pending, |
562 | SA5_performant_completed, | 559 | SA5_performant_completed, |
563 | }; | 560 | }; |
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index cb988c41cad9..3a621c74b76f 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h | |||
@@ -206,27 +206,27 @@ struct raid_map_disk_data { | |||
206 | }; | 206 | }; |
207 | 207 | ||
208 | struct raid_map_data { | 208 | struct raid_map_data { |
209 | u32 structure_size; /* Size of entire structure in bytes */ | 209 | __le32 structure_size; /* Size of entire structure in bytes */ |
210 | u32 volume_blk_size; /* bytes / block in the volume */ | 210 | __le32 volume_blk_size; /* bytes / block in the volume */ |
211 | u64 volume_blk_cnt; /* logical blocks on the volume */ | 211 | __le64 volume_blk_cnt; /* logical blocks on the volume */ |
212 | u8 phys_blk_shift; /* Shift factor to convert between | 212 | u8 phys_blk_shift; /* Shift factor to convert between |
213 | * units of logical blocks and physical | 213 | * units of logical blocks and physical |
214 | * disk blocks */ | 214 | * disk blocks */ |
215 | u8 parity_rotation_shift; /* Shift factor to convert between units | 215 | u8 parity_rotation_shift; /* Shift factor to convert between units |
216 | * of logical stripes and physical | 216 | * of logical stripes and physical |
217 | * stripes */ | 217 | * stripes */ |
218 | u16 strip_size; /* blocks used on each disk / stripe */ | 218 | __le16 strip_size; /* blocks used on each disk / stripe */ |
219 | u64 disk_starting_blk; /* First disk block used in volume */ | 219 | __le64 disk_starting_blk; /* First disk block used in volume */ |
220 | u64 disk_blk_cnt; /* disk blocks used by volume / disk */ | 220 | __le64 disk_blk_cnt; /* disk blocks used by volume / disk */ |
221 | u16 data_disks_per_row; /* data disk entries / row in the map */ | 221 | __le16 data_disks_per_row; /* data disk entries / row in the map */ |
222 | u16 metadata_disks_per_row; /* mirror/parity disk entries / row | 222 | __le16 metadata_disks_per_row;/* mirror/parity disk entries / row |
223 | * in the map */ | 223 | * in the map */ |
224 | u16 row_cnt; /* rows in each layout map */ | 224 | __le16 row_cnt; /* rows in each layout map */ |
225 | u16 layout_map_count; /* layout maps (1 map per mirror/parity | 225 | __le16 layout_map_count; /* layout maps (1 map per mirror/parity |
226 | * group) */ | 226 | * group) */ |
227 | u16 flags; /* Bit 0 set if encryption enabled */ | 227 | __le16 flags; /* Bit 0 set if encryption enabled */ |
228 | #define RAID_MAP_FLAG_ENCRYPT_ON 0x01 | 228 | #define RAID_MAP_FLAG_ENCRYPT_ON 0x01 |
229 | u16 dekindex; /* Data encryption key index. */ | 229 | __le16 dekindex; /* Data encryption key index. */ |
230 | u8 reserved[16]; | 230 | u8 reserved[16]; |
231 | struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; | 231 | struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; |
232 | }; | 232 | }; |
@@ -240,6 +240,10 @@ struct ReportLUNdata { | |||
240 | 240 | ||
241 | struct ext_report_lun_entry { | 241 | struct ext_report_lun_entry { |
242 | u8 lunid[8]; | 242 | u8 lunid[8]; |
243 | #define GET_BMIC_BUS(lunid) ((lunid)[7] & 0x3F) | ||
244 | #define GET_BMIC_LEVEL_TWO_TARGET(lunid) ((lunid)[6]) | ||
245 | #define GET_BMIC_DRIVE_NUMBER(lunid) (((GET_BMIC_BUS((lunid)) - 1) << 8) + \ | ||
246 | GET_BMIC_LEVEL_TWO_TARGET((lunid))) | ||
243 | u8 wwid[8]; | 247 | u8 wwid[8]; |
244 | u8 device_type; | 248 | u8 device_type; |
245 | u8 device_flags; | 249 | u8 device_flags; |
@@ -268,6 +272,7 @@ struct SenseSubsystem_info { | |||
268 | #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ | 272 | #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ |
269 | #define BMIC_FLASH_FIRMWARE 0xF7 | 273 | #define BMIC_FLASH_FIRMWARE 0xF7 |
270 | #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 | 274 | #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 |
275 | #define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15 | ||
271 | 276 | ||
272 | /* Command List Structure */ | 277 | /* Command List Structure */ |
273 | union SCSI3Addr { | 278 | union SCSI3Addr { |
@@ -313,8 +318,8 @@ union LUNAddr { | |||
313 | struct CommandListHeader { | 318 | struct CommandListHeader { |
314 | u8 ReplyQueue; | 319 | u8 ReplyQueue; |
315 | u8 SGList; | 320 | u8 SGList; |
316 | u16 SGTotal; | 321 | __le16 SGTotal; |
317 | u64 tag; | 322 | __le64 tag; |
318 | union LUNAddr LUN; | 323 | union LUNAddr LUN; |
319 | }; | 324 | }; |
320 | 325 | ||
@@ -338,14 +343,14 @@ struct RequestBlock { | |||
338 | }; | 343 | }; |
339 | 344 | ||
340 | struct ErrDescriptor { | 345 | struct ErrDescriptor { |
341 | u64 Addr; | 346 | __le64 Addr; |
342 | u32 Len; | 347 | __le32 Len; |
343 | }; | 348 | }; |
344 | 349 | ||
345 | struct SGDescriptor { | 350 | struct SGDescriptor { |
346 | u64 Addr; | 351 | __le64 Addr; |
347 | u32 Len; | 352 | __le32 Len; |
348 | u32 Ext; | 353 | __le32 Ext; |
349 | }; | 354 | }; |
350 | 355 | ||
351 | union MoreErrInfo { | 356 | union MoreErrInfo { |
@@ -375,22 +380,19 @@ struct ErrorInfo { | |||
375 | #define CMD_IOACCEL1 0x04 | 380 | #define CMD_IOACCEL1 0x04 |
376 | #define CMD_IOACCEL2 0x05 | 381 | #define CMD_IOACCEL2 0x05 |
377 | 382 | ||
378 | #define DIRECT_LOOKUP_SHIFT 5 | 383 | #define DIRECT_LOOKUP_SHIFT 4 |
379 | #define DIRECT_LOOKUP_BIT 0x10 | ||
380 | #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) | 384 | #define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) |
381 | 385 | ||
382 | #define HPSA_ERROR_BIT 0x02 | 386 | #define HPSA_ERROR_BIT 0x02 |
383 | struct ctlr_info; /* defined in hpsa.h */ | 387 | struct ctlr_info; /* defined in hpsa.h */ |
384 | /* The size of this structure needs to be divisible by 32 | 388 | /* The size of this structure needs to be divisible by 128 |
385 | * on all architectures because low 5 bits of the addresses | 389 | * on all architectures. The low 4 bits of the addresses |
386 | * are used as follows: | 390 | * are used as follows: |
387 | * | 391 | * |
388 | * bit 0: to device, used to indicate "performant mode" command | 392 | * bit 0: to device, used to indicate "performant mode" command |
389 | * from device, indidcates error status. | 393 | * from device, indidcates error status. |
390 | * bit 1-3: to device, indicates block fetch table entry for | 394 | * bit 1-3: to device, indicates block fetch table entry for |
391 | * reducing DMA in fetching commands from host memory. | 395 | * reducing DMA in fetching commands from host memory. |
392 | * bit 4: used to indicate whether tag is "direct lookup" (index), | ||
393 | * or a bus address. | ||
394 | */ | 396 | */ |
395 | 397 | ||
396 | #define COMMANDLIST_ALIGNMENT 128 | 398 | #define COMMANDLIST_ALIGNMENT 128 |
@@ -405,9 +407,21 @@ struct CommandList { | |||
405 | struct ctlr_info *h; | 407 | struct ctlr_info *h; |
406 | int cmd_type; | 408 | int cmd_type; |
407 | long cmdindex; | 409 | long cmdindex; |
408 | struct list_head list; | ||
409 | struct completion *waiting; | 410 | struct completion *waiting; |
410 | void *scsi_cmd; | 411 | struct scsi_cmnd *scsi_cmd; |
412 | struct work_struct work; | ||
413 | |||
414 | /* | ||
415 | * For commands using either of the two "ioaccel" paths to | ||
416 | * bypass the RAID stack and go directly to the physical disk | ||
417 | * phys_disk is a pointer to the hpsa_scsi_dev_t to which the | ||
418 | * i/o is destined. We need to store that here because the command | ||
419 | * may potentially encounter TASK SET FULL and need to be resubmitted | ||
420 | * For "normal" i/o's not using the "ioaccel" paths, phys_disk is | ||
421 | * not used. | ||
422 | */ | ||
423 | struct hpsa_scsi_dev_t *phys_disk; | ||
424 | atomic_t refcount; /* Must be last to avoid memset in cmd_alloc */ | ||
411 | } __aligned(COMMANDLIST_ALIGNMENT); | 425 | } __aligned(COMMANDLIST_ALIGNMENT); |
412 | 426 | ||
413 | /* Max S/G elements in I/O accelerator command */ | 427 | /* Max S/G elements in I/O accelerator command */ |
@@ -420,7 +434,7 @@ struct CommandList { | |||
420 | */ | 434 | */ |
421 | #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 | 435 | #define IOACCEL1_COMMANDLIST_ALIGNMENT 128 |
422 | struct io_accel1_cmd { | 436 | struct io_accel1_cmd { |
423 | u16 dev_handle; /* 0x00 - 0x01 */ | 437 | __le16 dev_handle; /* 0x00 - 0x01 */ |
424 | u8 reserved1; /* 0x02 */ | 438 | u8 reserved1; /* 0x02 */ |
425 | u8 function; /* 0x03 */ | 439 | u8 function; /* 0x03 */ |
426 | u8 reserved2[8]; /* 0x04 - 0x0B */ | 440 | u8 reserved2[8]; /* 0x04 - 0x0B */ |
@@ -430,20 +444,20 @@ struct io_accel1_cmd { | |||
430 | u8 reserved4; /* 0x13 */ | 444 | u8 reserved4; /* 0x13 */ |
431 | u8 sgl_offset; /* 0x14 */ | 445 | u8 sgl_offset; /* 0x14 */ |
432 | u8 reserved5[7]; /* 0x15 - 0x1B */ | 446 | u8 reserved5[7]; /* 0x15 - 0x1B */ |
433 | u32 transfer_len; /* 0x1C - 0x1F */ | 447 | __le32 transfer_len; /* 0x1C - 0x1F */ |
434 | u8 reserved6[4]; /* 0x20 - 0x23 */ | 448 | u8 reserved6[4]; /* 0x20 - 0x23 */ |
435 | u16 io_flags; /* 0x24 - 0x25 */ | 449 | __le16 io_flags; /* 0x24 - 0x25 */ |
436 | u8 reserved7[14]; /* 0x26 - 0x33 */ | 450 | u8 reserved7[14]; /* 0x26 - 0x33 */ |
437 | u8 LUN[8]; /* 0x34 - 0x3B */ | 451 | u8 LUN[8]; /* 0x34 - 0x3B */ |
438 | u32 control; /* 0x3C - 0x3F */ | 452 | __le32 control; /* 0x3C - 0x3F */ |
439 | u8 CDB[16]; /* 0x40 - 0x4F */ | 453 | u8 CDB[16]; /* 0x40 - 0x4F */ |
440 | u8 reserved8[16]; /* 0x50 - 0x5F */ | 454 | u8 reserved8[16]; /* 0x50 - 0x5F */ |
441 | u16 host_context_flags; /* 0x60 - 0x61 */ | 455 | __le16 host_context_flags; /* 0x60 - 0x61 */ |
442 | u16 timeout_sec; /* 0x62 - 0x63 */ | 456 | __le16 timeout_sec; /* 0x62 - 0x63 */ |
443 | u8 ReplyQueue; /* 0x64 */ | 457 | u8 ReplyQueue; /* 0x64 */ |
444 | u8 reserved9[3]; /* 0x65 - 0x67 */ | 458 | u8 reserved9[3]; /* 0x65 - 0x67 */ |
445 | u64 tag; /* 0x68 - 0x6F */ | 459 | __le64 tag; /* 0x68 - 0x6F */ |
446 | u64 host_addr; /* 0x70 - 0x77 */ | 460 | __le64 host_addr; /* 0x70 - 0x77 */ |
447 | u8 CISS_LUN[8]; /* 0x78 - 0x7F */ | 461 | u8 CISS_LUN[8]; /* 0x78 - 0x7F */ |
448 | struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; | 462 | struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; |
449 | } __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); | 463 | } __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); |
@@ -470,8 +484,8 @@ struct io_accel1_cmd { | |||
470 | #define IOACCEL1_BUSADDR_CMDTYPE 0x00000060 | 484 | #define IOACCEL1_BUSADDR_CMDTYPE 0x00000060 |
471 | 485 | ||
472 | struct ioaccel2_sg_element { | 486 | struct ioaccel2_sg_element { |
473 | u64 address; | 487 | __le64 address; |
474 | u32 length; | 488 | __le32 length; |
475 | u8 reserved[3]; | 489 | u8 reserved[3]; |
476 | u8 chain_indicator; | 490 | u8 chain_indicator; |
477 | #define IOACCEL2_CHAIN 0x80 | 491 | #define IOACCEL2_CHAIN 0x80 |
@@ -526,20 +540,20 @@ struct io_accel2_cmd { | |||
526 | /* 0=off, 1=on */ | 540 | /* 0=off, 1=on */ |
527 | u8 reply_queue; /* Reply Queue ID */ | 541 | u8 reply_queue; /* Reply Queue ID */ |
528 | u8 reserved1; /* Reserved */ | 542 | u8 reserved1; /* Reserved */ |
529 | u32 scsi_nexus; /* Device Handle */ | 543 | __le32 scsi_nexus; /* Device Handle */ |
530 | u32 Tag; /* cciss tag, lower 4 bytes only */ | 544 | __le32 Tag; /* cciss tag, lower 4 bytes only */ |
531 | u32 tweak_lower; /* Encryption tweak, lower 4 bytes */ | 545 | __le32 tweak_lower; /* Encryption tweak, lower 4 bytes */ |
532 | u8 cdb[16]; /* SCSI Command Descriptor Block */ | 546 | u8 cdb[16]; /* SCSI Command Descriptor Block */ |
533 | u8 cciss_lun[8]; /* 8 byte SCSI address */ | 547 | u8 cciss_lun[8]; /* 8 byte SCSI address */ |
534 | u32 data_len; /* Total bytes to transfer */ | 548 | __le32 data_len; /* Total bytes to transfer */ |
535 | u8 cmd_priority_task_attr; /* priority and task attrs */ | 549 | u8 cmd_priority_task_attr; /* priority and task attrs */ |
536 | #define IOACCEL2_PRIORITY_MASK 0x78 | 550 | #define IOACCEL2_PRIORITY_MASK 0x78 |
537 | #define IOACCEL2_ATTR_MASK 0x07 | 551 | #define IOACCEL2_ATTR_MASK 0x07 |
538 | u8 sg_count; /* Number of sg elements */ | 552 | u8 sg_count; /* Number of sg elements */ |
539 | u16 dekindex; /* Data encryption key index */ | 553 | __le16 dekindex; /* Data encryption key index */ |
540 | u64 err_ptr; /* Error Pointer */ | 554 | __le64 err_ptr; /* Error Pointer */ |
541 | u32 err_len; /* Error Length*/ | 555 | __le32 err_len; /* Error Length*/ |
542 | u32 tweak_upper; /* Encryption tweak, upper 4 bytes */ | 556 | __le32 tweak_upper; /* Encryption tweak, upper 4 bytes */ |
543 | struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; | 557 | struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; |
544 | struct io_accel2_scsi_response error_data; | 558 | struct io_accel2_scsi_response error_data; |
545 | } __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); | 559 | } __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); |
@@ -563,18 +577,18 @@ struct hpsa_tmf_struct { | |||
563 | u8 reserved1; /* byte 3 Reserved */ | 577 | u8 reserved1; /* byte 3 Reserved */ |
564 | u32 it_nexus; /* SCSI I-T Nexus */ | 578 | u32 it_nexus; /* SCSI I-T Nexus */ |
565 | u8 lun_id[8]; /* LUN ID for TMF request */ | 579 | u8 lun_id[8]; /* LUN ID for TMF request */ |
566 | u64 tag; /* cciss tag associated w/ request */ | 580 | __le64 tag; /* cciss tag associated w/ request */ |
567 | u64 abort_tag; /* cciss tag of SCSI cmd or task to abort */ | 581 | __le64 abort_tag; /* cciss tag of SCSI cmd or TMF to abort */ |
568 | u64 error_ptr; /* Error Pointer */ | 582 | __le64 error_ptr; /* Error Pointer */ |
569 | u32 error_len; /* Error Length */ | 583 | __le32 error_len; /* Error Length */ |
570 | }; | 584 | }; |
571 | 585 | ||
572 | /* Configuration Table Structure */ | 586 | /* Configuration Table Structure */ |
573 | struct HostWrite { | 587 | struct HostWrite { |
574 | u32 TransportRequest; | 588 | __le32 TransportRequest; |
575 | u32 command_pool_addr_hi; | 589 | __le32 command_pool_addr_hi; |
576 | u32 CoalIntDelay; | 590 | __le32 CoalIntDelay; |
577 | u32 CoalIntCount; | 591 | __le32 CoalIntCount; |
578 | }; | 592 | }; |
579 | 593 | ||
580 | #define SIMPLE_MODE 0x02 | 594 | #define SIMPLE_MODE 0x02 |
@@ -585,54 +599,54 @@ struct HostWrite { | |||
585 | #define DRIVER_SUPPORT_UA_ENABLE 0x00000001 | 599 | #define DRIVER_SUPPORT_UA_ENABLE 0x00000001 |
586 | 600 | ||
587 | struct CfgTable { | 601 | struct CfgTable { |
588 | u8 Signature[4]; | 602 | u8 Signature[4]; |
589 | u32 SpecValence; | 603 | __le32 SpecValence; |
590 | u32 TransportSupport; | 604 | __le32 TransportSupport; |
591 | u32 TransportActive; | 605 | __le32 TransportActive; |
592 | struct HostWrite HostWrite; | 606 | struct HostWrite HostWrite; |
593 | u32 CmdsOutMax; | 607 | __le32 CmdsOutMax; |
594 | u32 BusTypes; | 608 | __le32 BusTypes; |
595 | u32 TransMethodOffset; | 609 | __le32 TransMethodOffset; |
596 | u8 ServerName[16]; | 610 | u8 ServerName[16]; |
597 | u32 HeartBeat; | 611 | __le32 HeartBeat; |
598 | u32 driver_support; | 612 | __le32 driver_support; |
599 | #define ENABLE_SCSI_PREFETCH 0x100 | 613 | #define ENABLE_SCSI_PREFETCH 0x100 |
600 | #define ENABLE_UNIT_ATTN 0x01 | 614 | #define ENABLE_UNIT_ATTN 0x01 |
601 | u32 MaxScatterGatherElements; | 615 | __le32 MaxScatterGatherElements; |
602 | u32 MaxLogicalUnits; | 616 | __le32 MaxLogicalUnits; |
603 | u32 MaxPhysicalDevices; | 617 | __le32 MaxPhysicalDevices; |
604 | u32 MaxPhysicalDrivesPerLogicalUnit; | 618 | __le32 MaxPhysicalDrivesPerLogicalUnit; |
605 | u32 MaxPerformantModeCommands; | 619 | __le32 MaxPerformantModeCommands; |
606 | u32 MaxBlockFetch; | 620 | __le32 MaxBlockFetch; |
607 | u32 PowerConservationSupport; | 621 | __le32 PowerConservationSupport; |
608 | u32 PowerConservationEnable; | 622 | __le32 PowerConservationEnable; |
609 | u32 TMFSupportFlags; | 623 | __le32 TMFSupportFlags; |
610 | u8 TMFTagMask[8]; | 624 | u8 TMFTagMask[8]; |
611 | u8 reserved[0x78 - 0x70]; | 625 | u8 reserved[0x78 - 0x70]; |
612 | u32 misc_fw_support; /* offset 0x78 */ | 626 | __le32 misc_fw_support; /* offset 0x78 */ |
613 | #define MISC_FW_DOORBELL_RESET (0x02) | 627 | #define MISC_FW_DOORBELL_RESET 0x02 |
614 | #define MISC_FW_DOORBELL_RESET2 (0x010) | 628 | #define MISC_FW_DOORBELL_RESET2 0x010 |
615 | #define MISC_FW_RAID_OFFLOAD_BASIC (0x020) | 629 | #define MISC_FW_RAID_OFFLOAD_BASIC 0x020 |
616 | #define MISC_FW_EVENT_NOTIFY (0x080) | 630 | #define MISC_FW_EVENT_NOTIFY 0x080 |
617 | u8 driver_version[32]; | 631 | u8 driver_version[32]; |
618 | u32 max_cached_write_size; | 632 | __le32 max_cached_write_size; |
619 | u8 driver_scratchpad[16]; | 633 | u8 driver_scratchpad[16]; |
620 | u32 max_error_info_length; | 634 | __le32 max_error_info_length; |
621 | u32 io_accel_max_embedded_sg_count; | 635 | __le32 io_accel_max_embedded_sg_count; |
622 | u32 io_accel_request_size_offset; | 636 | __le32 io_accel_request_size_offset; |
623 | u32 event_notify; | 637 | __le32 event_notify; |
624 | #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) | 638 | #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) |
625 | #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) | 639 | #define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) |
626 | u32 clear_event_notify; | 640 | __le32 clear_event_notify; |
627 | }; | 641 | }; |
628 | 642 | ||
629 | #define NUM_BLOCKFETCH_ENTRIES 8 | 643 | #define NUM_BLOCKFETCH_ENTRIES 8 |
630 | struct TransTable_struct { | 644 | struct TransTable_struct { |
631 | u32 BlockFetch[NUM_BLOCKFETCH_ENTRIES]; | 645 | __le32 BlockFetch[NUM_BLOCKFETCH_ENTRIES]; |
632 | u32 RepQSize; | 646 | __le32 RepQSize; |
633 | u32 RepQCount; | 647 | __le32 RepQCount; |
634 | u32 RepQCtrAddrLow32; | 648 | __le32 RepQCtrAddrLow32; |
635 | u32 RepQCtrAddrHigh32; | 649 | __le32 RepQCtrAddrHigh32; |
636 | #define MAX_REPLY_QUEUES 64 | 650 | #define MAX_REPLY_QUEUES 64 |
637 | struct vals32 RepQAddr[MAX_REPLY_QUEUES]; | 651 | struct vals32 RepQAddr[MAX_REPLY_QUEUES]; |
638 | }; | 652 | }; |
@@ -644,5 +658,137 @@ struct hpsa_pci_info { | |||
644 | u32 board_id; | 658 | u32 board_id; |
645 | }; | 659 | }; |
646 | 660 | ||
661 | struct bmic_identify_physical_device { | ||
662 | u8 scsi_bus; /* SCSI Bus number on controller */ | ||
663 | u8 scsi_id; /* SCSI ID on this bus */ | ||
664 | __le16 block_size; /* sector size in bytes */ | ||
665 | __le32 total_blocks; /* number for sectors on drive */ | ||
666 | __le32 reserved_blocks; /* controller reserved (RIS) */ | ||
667 | u8 model[40]; /* Physical Drive Model */ | ||
668 | u8 serial_number[40]; /* Drive Serial Number */ | ||
669 | u8 firmware_revision[8]; /* drive firmware revision */ | ||
670 | u8 scsi_inquiry_bits; /* inquiry byte 7 bits */ | ||
671 | u8 compaq_drive_stamp; /* 0 means drive not stamped */ | ||
672 | u8 last_failure_reason; | ||
673 | #define BMIC_LAST_FAILURE_TOO_SMALL_IN_LOAD_CONFIG 0x01 | ||
674 | #define BMIC_LAST_FAILURE_ERROR_ERASING_RIS 0x02 | ||
675 | #define BMIC_LAST_FAILURE_ERROR_SAVING_RIS 0x03 | ||
676 | #define BMIC_LAST_FAILURE_FAIL_DRIVE_COMMAND 0x04 | ||
677 | #define BMIC_LAST_FAILURE_MARK_BAD_FAILED 0x05 | ||
678 | #define BMIC_LAST_FAILURE_MARK_BAD_FAILED_IN_FINISH_REMAP 0x06 | ||
679 | #define BMIC_LAST_FAILURE_TIMEOUT 0x07 | ||
680 | #define BMIC_LAST_FAILURE_AUTOSENSE_FAILED 0x08 | ||
681 | #define BMIC_LAST_FAILURE_MEDIUM_ERROR_1 0x09 | ||
682 | #define BMIC_LAST_FAILURE_MEDIUM_ERROR_2 0x0a | ||
683 | #define BMIC_LAST_FAILURE_NOT_READY_BAD_SENSE 0x0b | ||
684 | #define BMIC_LAST_FAILURE_NOT_READY 0x0c | ||
685 | #define BMIC_LAST_FAILURE_HARDWARE_ERROR 0x0d | ||
686 | #define BMIC_LAST_FAILURE_ABORTED_COMMAND 0x0e | ||
687 | #define BMIC_LAST_FAILURE_WRITE_PROTECTED 0x0f | ||
688 | #define BMIC_LAST_FAILURE_SPIN_UP_FAILURE_IN_RECOVER 0x10 | ||
689 | #define BMIC_LAST_FAILURE_REBUILD_WRITE_ERROR 0x11 | ||
690 | #define BMIC_LAST_FAILURE_TOO_SMALL_IN_HOT_PLUG 0x12 | ||
691 | #define BMIC_LAST_FAILURE_BUS_RESET_RECOVERY_ABORTED 0x13 | ||
692 | #define BMIC_LAST_FAILURE_REMOVED_IN_HOT_PLUG 0x14 | ||
693 | #define BMIC_LAST_FAILURE_INIT_REQUEST_SENSE_FAILED 0x15 | ||
694 | #define BMIC_LAST_FAILURE_INIT_START_UNIT_FAILED 0x16 | ||
695 | #define BMIC_LAST_FAILURE_INQUIRY_FAILED 0x17 | ||
696 | #define BMIC_LAST_FAILURE_NON_DISK_DEVICE 0x18 | ||
697 | #define BMIC_LAST_FAILURE_READ_CAPACITY_FAILED 0x19 | ||
698 | #define BMIC_LAST_FAILURE_INVALID_BLOCK_SIZE 0x1a | ||
699 | #define BMIC_LAST_FAILURE_HOT_PLUG_REQUEST_SENSE_FAILED 0x1b | ||
700 | #define BMIC_LAST_FAILURE_HOT_PLUG_START_UNIT_FAILED 0x1c | ||
701 | #define BMIC_LAST_FAILURE_WRITE_ERROR_AFTER_REMAP 0x1d | ||
702 | #define BMIC_LAST_FAILURE_INIT_RESET_RECOVERY_ABORTED 0x1e | ||
703 | #define BMIC_LAST_FAILURE_DEFERRED_WRITE_ERROR 0x1f | ||
704 | #define BMIC_LAST_FAILURE_MISSING_IN_SAVE_RIS 0x20 | ||
705 | #define BMIC_LAST_FAILURE_WRONG_REPLACE 0x21 | ||
706 | #define BMIC_LAST_FAILURE_GDP_VPD_INQUIRY_FAILED 0x22 | ||
707 | #define BMIC_LAST_FAILURE_GDP_MODE_SENSE_FAILED 0x23 | ||
708 | #define BMIC_LAST_FAILURE_DRIVE_NOT_IN_48BIT_MODE 0x24 | ||
709 | #define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_HOT_PLUG 0x25 | ||
710 | #define BMIC_LAST_FAILURE_DRIVE_TYPE_MIX_IN_LOAD_CFG 0x26 | ||
711 | #define BMIC_LAST_FAILURE_PROTOCOL_ADAPTER_FAILED 0x27 | ||
712 | #define BMIC_LAST_FAILURE_FAULTY_ID_BAY_EMPTY 0x28 | ||
713 | #define BMIC_LAST_FAILURE_FAULTY_ID_BAY_OCCUPIED 0x29 | ||
714 | #define BMIC_LAST_FAILURE_FAULTY_ID_INVALID_BAY 0x2a | ||
715 | #define BMIC_LAST_FAILURE_WRITE_RETRIES_FAILED 0x2b | ||
716 | |||
717 | #define BMIC_LAST_FAILURE_SMART_ERROR_REPORTED 0x37 | ||
718 | #define BMIC_LAST_FAILURE_PHY_RESET_FAILED 0x38 | ||
719 | #define BMIC_LAST_FAILURE_ONLY_ONE_CTLR_CAN_SEE_DRIVE 0x40 | ||
720 | #define BMIC_LAST_FAILURE_KC_VOLUME_FAILED 0x41 | ||
721 | #define BMIC_LAST_FAILURE_UNEXPECTED_REPLACEMENT 0x42 | ||
722 | #define BMIC_LAST_FAILURE_OFFLINE_ERASE 0x80 | ||
723 | #define BMIC_LAST_FAILURE_OFFLINE_TOO_SMALL 0x81 | ||
724 | #define BMIC_LAST_FAILURE_OFFLINE_DRIVE_TYPE_MIX 0x82 | ||
725 | #define BMIC_LAST_FAILURE_OFFLINE_ERASE_COMPLETE 0x83 | ||
726 | |||
727 | u8 flags; | ||
728 | u8 more_flags; | ||
729 | u8 scsi_lun; /* SCSI LUN for phys drive */ | ||
730 | u8 yet_more_flags; | ||
731 | u8 even_more_flags; | ||
732 | __le32 spi_speed_rules;/* SPI Speed data:Ultra disable diagnose */ | ||
733 | u8 phys_connector[2]; /* connector number on controller */ | ||
734 | u8 phys_box_on_bus; /* phys enclosure this drive resides */ | ||
735 | u8 phys_bay_in_box; /* phys drv bay this drive resides */ | ||
736 | __le32 rpm; /* Drive rotational speed in rpm */ | ||
737 | u8 device_type; /* type of drive */ | ||
738 | u8 sata_version; /* only valid when drive_type is SATA */ | ||
739 | __le64 big_total_block_count; | ||
740 | __le64 ris_starting_lba; | ||
741 | __le32 ris_size; | ||
742 | u8 wwid[20]; | ||
743 | u8 controller_phy_map[32]; | ||
744 | __le16 phy_count; | ||
745 | u8 phy_connected_dev_type[256]; | ||
746 | u8 phy_to_drive_bay_num[256]; | ||
747 | __le16 phy_to_attached_dev_index[256]; | ||
748 | u8 box_index; | ||
749 | u8 reserved; | ||
750 | __le16 extra_physical_drive_flags; | ||
751 | #define BMIC_PHYS_DRIVE_SUPPORTS_GAS_GAUGE(idphydrv) \ | ||
752 | (idphydrv->extra_physical_drive_flags & (1 << 10)) | ||
753 | u8 negotiated_link_rate[256]; | ||
754 | u8 phy_to_phy_map[256]; | ||
755 | u8 redundant_path_present_map; | ||
756 | u8 redundant_path_failure_map; | ||
757 | u8 active_path_number; | ||
758 | __le16 alternate_paths_phys_connector[8]; | ||
759 | u8 alternate_paths_phys_box_on_port[8]; | ||
760 | u8 multi_lun_device_lun_count; | ||
761 | u8 minimum_good_fw_revision[8]; | ||
762 | u8 unique_inquiry_bytes[20]; | ||
763 | u8 current_temperature_degreesC; | ||
764 | u8 temperature_threshold_degreesC; | ||
765 | u8 max_temperature_degreesC; | ||
766 | u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */ | ||
767 | __le16 current_queue_depth_limit; | ||
768 | u8 switch_name[10]; | ||
769 | __le16 switch_port; | ||
770 | u8 alternate_paths_switch_name[40]; | ||
771 | u8 alternate_paths_switch_port[8]; | ||
772 | __le16 power_on_hours; /* valid only if gas gauge supported */ | ||
773 | __le16 percent_endurance_used; /* valid only if gas gauge supported. */ | ||
774 | #define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \ | ||
775 | ((idphydrv->percent_endurance_used & 0x80) || \ | ||
776 | (idphydrv->percent_endurance_used > 10000)) | ||
777 | u8 drive_authentication; | ||
778 | #define BMIC_PHYS_DRIVE_AUTHENTICATED(idphydrv) \ | ||
779 | (idphydrv->drive_authentication == 0x80) | ||
780 | u8 smart_carrier_authentication; | ||
781 | #define BMIC_SMART_CARRIER_AUTHENTICATION_SUPPORTED(idphydrv) \ | ||
782 | (idphydrv->smart_carrier_authentication != 0x0) | ||
783 | #define BMIC_SMART_CARRIER_AUTHENTICATED(idphydrv) \ | ||
784 | (idphydrv->smart_carrier_authentication == 0x01) | ||
785 | u8 smart_carrier_app_fw_version; | ||
786 | u8 smart_carrier_bootloader_fw_version; | ||
787 | u8 encryption_key_name[64]; | ||
788 | __le32 misc_drive_flags; | ||
789 | __le16 dek_index; | ||
790 | u8 padding[112]; | ||
791 | }; | ||
792 | |||
647 | #pragma pack() | 793 | #pragma pack() |
648 | #endif /* HPSA_CMD_H */ | 794 | #endif /* HPSA_CMD_H */ |
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index ddf0694d87f0..3882d9f519c8 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c | |||
@@ -2226,36 +2226,36 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2226 | 2226 | ||
2227 | if (hd->proc & PR_INFO) { | 2227 | if (hd->proc & PR_INFO) { |
2228 | seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); | 2228 | seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); |
2229 | seq_printf(m, "\nsync_xfer[] = "); | 2229 | seq_puts(m, "\nsync_xfer[] = "); |
2230 | for (x = 0; x < 7; x++) | 2230 | for (x = 0; x < 7; x++) |
2231 | seq_printf(m, "\t%02x", hd->sync_xfer[x]); | 2231 | seq_printf(m, "\t%02x", hd->sync_xfer[x]); |
2232 | seq_printf(m, "\nsync_stat[] = "); | 2232 | seq_puts(m, "\nsync_stat[] = "); |
2233 | for (x = 0; x < 7; x++) | 2233 | for (x = 0; x < 7; x++) |
2234 | seq_printf(m, "\t%02x", hd->sync_stat[x]); | 2234 | seq_printf(m, "\t%02x", hd->sync_stat[x]); |
2235 | } | 2235 | } |
2236 | #ifdef PROC_STATISTICS | 2236 | #ifdef PROC_STATISTICS |
2237 | if (hd->proc & PR_STATISTICS) { | 2237 | if (hd->proc & PR_STATISTICS) { |
2238 | seq_printf(m, "\ncommands issued: "); | 2238 | seq_puts(m, "\ncommands issued: "); |
2239 | for (x = 0; x < 7; x++) | 2239 | for (x = 0; x < 7; x++) |
2240 | seq_printf(m, "\t%ld", hd->cmd_cnt[x]); | 2240 | seq_printf(m, "\t%ld", hd->cmd_cnt[x]); |
2241 | seq_printf(m, "\ndisconnects allowed:"); | 2241 | seq_puts(m, "\ndisconnects allowed:"); |
2242 | for (x = 0; x < 7; x++) | 2242 | for (x = 0; x < 7; x++) |
2243 | seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); | 2243 | seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); |
2244 | seq_printf(m, "\ndisconnects done: "); | 2244 | seq_puts(m, "\ndisconnects done: "); |
2245 | for (x = 0; x < 7; x++) | 2245 | for (x = 0; x < 7; x++) |
2246 | seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); | 2246 | seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); |
2247 | seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt); | 2247 | seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt); |
2248 | } | 2248 | } |
2249 | #endif | 2249 | #endif |
2250 | if (hd->proc & PR_CONNECTED) { | 2250 | if (hd->proc & PR_CONNECTED) { |
2251 | seq_printf(m, "\nconnected: "); | 2251 | seq_puts(m, "\nconnected: "); |
2252 | if (hd->connected) { | 2252 | if (hd->connected) { |
2253 | cmd = (Scsi_Cmnd *) hd->connected; | 2253 | cmd = (Scsi_Cmnd *) hd->connected; |
2254 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2254 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
2255 | } | 2255 | } |
2256 | } | 2256 | } |
2257 | if (hd->proc & PR_INPUTQ) { | 2257 | if (hd->proc & PR_INPUTQ) { |
2258 | seq_printf(m, "\ninput_Q: "); | 2258 | seq_puts(m, "\ninput_Q: "); |
2259 | cmd = (Scsi_Cmnd *) hd->input_Q; | 2259 | cmd = (Scsi_Cmnd *) hd->input_Q; |
2260 | while (cmd) { | 2260 | while (cmd) { |
2261 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2261 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
@@ -2263,7 +2263,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2263 | } | 2263 | } |
2264 | } | 2264 | } |
2265 | if (hd->proc & PR_DISCQ) { | 2265 | if (hd->proc & PR_DISCQ) { |
2266 | seq_printf(m, "\ndisconnected_Q:"); | 2266 | seq_puts(m, "\ndisconnected_Q:"); |
2267 | cmd = (Scsi_Cmnd *) hd->disconnected_Q; | 2267 | cmd = (Scsi_Cmnd *) hd->disconnected_Q; |
2268 | while (cmd) { | 2268 | while (cmd) { |
2269 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); | 2269 | seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); |
@@ -2273,7 +2273,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2273 | if (hd->proc & PR_TEST) { | 2273 | if (hd->proc & PR_TEST) { |
2274 | ; /* insert your own custom function here */ | 2274 | ; /* insert your own custom function here */ |
2275 | } | 2275 | } |
2276 | seq_printf(m, "\n"); | 2276 | seq_putc(m, '\n'); |
2277 | spin_unlock_irqrestore(instance->host_lock, flags); | 2277 | spin_unlock_irqrestore(instance->host_lock, flags); |
2278 | #endif /* PROC_INTERFACE */ | 2278 | #endif /* PROC_INTERFACE */ |
2279 | return 0; | 2279 | return 0; |
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index e5c28435d768..7542f11d3fcd 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c | |||
@@ -2038,15 +2038,14 @@ ips_host_info(ips_ha_t *ha, struct seq_file *m) | |||
2038 | { | 2038 | { |
2039 | METHOD_TRACE("ips_host_info", 1); | 2039 | METHOD_TRACE("ips_host_info", 1); |
2040 | 2040 | ||
2041 | seq_printf(m, "\nIBM ServeRAID General Information:\n\n"); | 2041 | seq_puts(m, "\nIBM ServeRAID General Information:\n\n"); |
2042 | 2042 | ||
2043 | if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && | 2043 | if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && |
2044 | (le16_to_cpu(ha->nvram->adapter_type) != 0)) | 2044 | (le16_to_cpu(ha->nvram->adapter_type) != 0)) |
2045 | seq_printf(m, "\tController Type : %s\n", | 2045 | seq_printf(m, "\tController Type : %s\n", |
2046 | ips_adapter_name[ha->ad_type - 1]); | 2046 | ips_adapter_name[ha->ad_type - 1]); |
2047 | else | 2047 | else |
2048 | seq_printf(m, | 2048 | seq_puts(m, "\tController Type : Unknown\n"); |
2049 | "\tController Type : Unknown\n"); | ||
2050 | 2049 | ||
2051 | if (ha->io_addr) | 2050 | if (ha->io_addr) |
2052 | seq_printf(m, | 2051 | seq_printf(m, |
@@ -2138,7 +2137,7 @@ ips_host_info(ips_ha_t *ha, struct seq_file *m) | |||
2138 | seq_printf(m, "\tCurrent Active PT Commands : %d\n", | 2137 | seq_printf(m, "\tCurrent Active PT Commands : %d\n", |
2139 | ha->num_ioctl); | 2138 | ha->num_ioctl); |
2140 | 2139 | ||
2141 | seq_printf(m, "\n"); | 2140 | seq_putc(m, '\n'); |
2142 | 2141 | ||
2143 | return 0; | 2142 | return 0; |
2144 | } | 2143 | } |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4c25485aa934..c66088d0fd2a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -2225,6 +2225,15 @@ lpfc_adisc_done(struct lpfc_vport *vport) | |||
2225 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | 2225 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
2226 | !(vport->fc_flag & FC_RSCN_MODE) && | 2226 | !(vport->fc_flag & FC_RSCN_MODE) && |
2227 | (phba->sli_rev < LPFC_SLI_REV4)) { | 2227 | (phba->sli_rev < LPFC_SLI_REV4)) { |
2228 | /* The ADISCs are complete. Doesn't matter if they | ||
2229 | * succeeded or failed because the ADISC completion | ||
2230 | * routine guarantees to call the state machine and | ||
2231 | * the RPI is either unregistered (failed ADISC response) | ||
2232 | * or the RPI is still valid and the node is marked | ||
2233 | * mapped for a target. The exchanges should be in the | ||
2234 | * correct state. This code is specific to SLI3. | ||
2235 | */ | ||
2236 | lpfc_issue_clear_la(phba, vport); | ||
2228 | lpfc_issue_reg_vpi(phba, vport); | 2237 | lpfc_issue_reg_vpi(phba, vport); |
2229 | return; | 2238 | return; |
2230 | } | 2239 | } |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 2485255f3414..bc7b34c02723 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -2240,7 +2240,7 @@ proc_show_battery(struct seq_file *m, void *v) | |||
2240 | goto free_pdev; | 2240 | goto free_pdev; |
2241 | 2241 | ||
2242 | if( mega_adapinq(adapter, dma_handle) != 0 ) { | 2242 | if( mega_adapinq(adapter, dma_handle) != 0 ) { |
2243 | seq_printf(m, "Adapter inquiry failed.\n"); | 2243 | seq_puts(m, "Adapter inquiry failed.\n"); |
2244 | printk(KERN_WARNING "megaraid: inquiry failed.\n"); | 2244 | printk(KERN_WARNING "megaraid: inquiry failed.\n"); |
2245 | goto free_inquiry; | 2245 | goto free_inquiry; |
2246 | } | 2246 | } |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 0d44d91c2fce..14e5c7cea929 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -35,7 +35,7 @@ | |||
35 | /* | 35 | /* |
36 | * MegaRAID SAS Driver meta data | 36 | * MegaRAID SAS Driver meta data |
37 | */ | 37 | */ |
38 | #define MEGASAS_VERSION "06.805.06.01-rc1" | 38 | #define MEGASAS_VERSION "06.806.08.00-rc1" |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * Device IDs | 41 | * Device IDs |
@@ -969,7 +969,20 @@ struct megasas_ctrl_info { | |||
969 | 969 | ||
970 | struct { | 970 | struct { |
971 | #if defined(__BIG_ENDIAN_BITFIELD) | 971 | #if defined(__BIG_ENDIAN_BITFIELD) |
972 | u32 reserved:25; | 972 | u32 reserved:12; |
973 | u32 discardCacheDuringLDDelete:1; | ||
974 | u32 supportSecurityonJBOD:1; | ||
975 | u32 supportCacheBypassModes:1; | ||
976 | u32 supportDisableSESMonitoring:1; | ||
977 | u32 supportForceFlash:1; | ||
978 | u32 supportNVDRAM:1; | ||
979 | u32 supportDrvActivityLEDSetting:1; | ||
980 | u32 supportAllowedOpsforDrvRemoval:1; | ||
981 | u32 supportHOQRebuild:1; | ||
982 | u32 supportForceTo512e:1; | ||
983 | u32 supportNVCacheErase:1; | ||
984 | u32 supportDebugQueue:1; | ||
985 | u32 supportSwZone:1; | ||
973 | u32 supportCrashDump:1; | 986 | u32 supportCrashDump:1; |
974 | u32 supportMaxExtLDs:1; | 987 | u32 supportMaxExtLDs:1; |
975 | u32 supportT10RebuildAssist:1; | 988 | u32 supportT10RebuildAssist:1; |
@@ -981,9 +994,22 @@ struct megasas_ctrl_info { | |||
981 | u32 supportThermalPollInterval:1; | 994 | u32 supportThermalPollInterval:1; |
982 | u32 supportDisableImmediateIO:1; | 995 | u32 supportDisableImmediateIO:1; |
983 | u32 supportT10RebuildAssist:1; | 996 | u32 supportT10RebuildAssist:1; |
984 | u32 supportMaxExtLDs:1; | 997 | u32 supportMaxExtLDs:1; |
985 | u32 supportCrashDump:1; | 998 | u32 supportCrashDump:1; |
986 | u32 reserved:25; | 999 | u32 supportSwZone:1; |
1000 | u32 supportDebugQueue:1; | ||
1001 | u32 supportNVCacheErase:1; | ||
1002 | u32 supportForceTo512e:1; | ||
1003 | u32 supportHOQRebuild:1; | ||
1004 | u32 supportAllowedOpsforDrvRemoval:1; | ||
1005 | u32 supportDrvActivityLEDSetting:1; | ||
1006 | u32 supportNVDRAM:1; | ||
1007 | u32 supportForceFlash:1; | ||
1008 | u32 supportDisableSESMonitoring:1; | ||
1009 | u32 supportCacheBypassModes:1; | ||
1010 | u32 supportSecurityonJBOD:1; | ||
1011 | u32 discardCacheDuringLDDelete:1; | ||
1012 | u32 reserved:12; | ||
987 | #endif | 1013 | #endif |
988 | } adapterOperations3; | 1014 | } adapterOperations3; |
989 | 1015 | ||
@@ -1022,6 +1048,13 @@ enum MR_MFI_MPT_PTHR_FLAGS { | |||
1022 | MFI_MPT_ATTACHED = 2, | 1048 | MFI_MPT_ATTACHED = 2, |
1023 | }; | 1049 | }; |
1024 | 1050 | ||
1051 | enum MR_SCSI_CMD_TYPE { | ||
1052 | READ_WRITE_LDIO = 0, | ||
1053 | NON_READ_WRITE_LDIO = 1, | ||
1054 | READ_WRITE_SYSPDIO = 2, | ||
1055 | NON_READ_WRITE_SYSPDIO = 3, | ||
1056 | }; | ||
1057 | |||
1025 | /* Frame Type */ | 1058 | /* Frame Type */ |
1026 | #define IO_FRAME 0 | 1059 | #define IO_FRAME 0 |
1027 | #define PTHRU_FRAME 1 | 1060 | #define PTHRU_FRAME 1 |
@@ -1049,6 +1082,8 @@ enum MR_MFI_MPT_PTHR_FLAGS { | |||
1049 | */ | 1082 | */ |
1050 | #define MEGASAS_INT_CMDS 32 | 1083 | #define MEGASAS_INT_CMDS 32 |
1051 | #define MEGASAS_SKINNY_INT_CMDS 5 | 1084 | #define MEGASAS_SKINNY_INT_CMDS 5 |
1085 | #define MEGASAS_FUSION_INTERNAL_CMDS 5 | ||
1086 | #define MEGASAS_FUSION_IOCTL_CMDS 3 | ||
1052 | 1087 | ||
1053 | #define MEGASAS_MAX_MSIX_QUEUES 128 | 1088 | #define MEGASAS_MAX_MSIX_QUEUES 128 |
1054 | /* | 1089 | /* |
@@ -1194,19 +1229,23 @@ union megasas_sgl_frame { | |||
1194 | typedef union _MFI_CAPABILITIES { | 1229 | typedef union _MFI_CAPABILITIES { |
1195 | struct { | 1230 | struct { |
1196 | #if defined(__BIG_ENDIAN_BITFIELD) | 1231 | #if defined(__BIG_ENDIAN_BITFIELD) |
1197 | u32 reserved:27; | 1232 | u32 reserved:25; |
1233 | u32 security_protocol_cmds_fw:1; | ||
1234 | u32 support_core_affinity:1; | ||
1198 | u32 support_ndrive_r1_lb:1; | 1235 | u32 support_ndrive_r1_lb:1; |
1199 | u32 support_max_255lds:1; | 1236 | u32 support_max_255lds:1; |
1200 | u32 reserved1:1; | 1237 | u32 support_fastpath_wb:1; |
1201 | u32 support_additional_msix:1; | 1238 | u32 support_additional_msix:1; |
1202 | u32 support_fp_remote_lun:1; | 1239 | u32 support_fp_remote_lun:1; |
1203 | #else | 1240 | #else |
1204 | u32 support_fp_remote_lun:1; | 1241 | u32 support_fp_remote_lun:1; |
1205 | u32 support_additional_msix:1; | 1242 | u32 support_additional_msix:1; |
1206 | u32 reserved1:1; | 1243 | u32 support_fastpath_wb:1; |
1207 | u32 support_max_255lds:1; | 1244 | u32 support_max_255lds:1; |
1208 | u32 support_ndrive_r1_lb:1; | 1245 | u32 support_ndrive_r1_lb:1; |
1209 | u32 reserved:27; | 1246 | u32 support_core_affinity:1; |
1247 | u32 security_protocol_cmds_fw:1; | ||
1248 | u32 reserved:25; | ||
1210 | #endif | 1249 | #endif |
1211 | } mfi_capabilities; | 1250 | } mfi_capabilities; |
1212 | u32 reg; | 1251 | u32 reg; |
@@ -1638,20 +1677,20 @@ struct megasas_instance { | |||
1638 | u32 crash_dump_fw_support; | 1677 | u32 crash_dump_fw_support; |
1639 | u32 crash_dump_drv_support; | 1678 | u32 crash_dump_drv_support; |
1640 | u32 crash_dump_app_support; | 1679 | u32 crash_dump_app_support; |
1680 | u32 secure_jbod_support; | ||
1641 | spinlock_t crashdump_lock; | 1681 | spinlock_t crashdump_lock; |
1642 | 1682 | ||
1643 | struct megasas_register_set __iomem *reg_set; | 1683 | struct megasas_register_set __iomem *reg_set; |
1644 | u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; | 1684 | u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; |
1645 | struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; | 1685 | struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; |
1646 | struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; | 1686 | struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; |
1647 | u8 ld_ids[MEGASAS_MAX_LD_IDS]; | 1687 | u8 ld_ids[MEGASAS_MAX_LD_IDS]; |
1648 | s8 init_id; | 1688 | s8 init_id; |
1649 | 1689 | ||
1650 | u16 max_num_sge; | 1690 | u16 max_num_sge; |
1651 | u16 max_fw_cmds; | 1691 | u16 max_fw_cmds; |
1652 | /* For Fusion its num IOCTL cmds, for others MFI based its | ||
1653 | max_fw_cmds */ | ||
1654 | u16 max_mfi_cmds; | 1692 | u16 max_mfi_cmds; |
1693 | u16 max_scsi_cmds; | ||
1655 | u32 max_sectors_per_req; | 1694 | u32 max_sectors_per_req; |
1656 | struct megasas_aen_event *ev; | 1695 | struct megasas_aen_event *ev; |
1657 | 1696 | ||
@@ -1727,7 +1766,7 @@ struct megasas_instance { | |||
1727 | u8 requestorId; | 1766 | u8 requestorId; |
1728 | char PlasmaFW111; | 1767 | char PlasmaFW111; |
1729 | char mpio; | 1768 | char mpio; |
1730 | int throttlequeuedepth; | 1769 | u16 throttlequeuedepth; |
1731 | u8 mask_interrupts; | 1770 | u8 mask_interrupts; |
1732 | u8 is_imr; | 1771 | u8 is_imr; |
1733 | }; | 1772 | }; |
@@ -1946,5 +1985,6 @@ void __megasas_return_cmd(struct megasas_instance *instance, | |||
1946 | 1985 | ||
1947 | void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, | 1986 | void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, |
1948 | struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion); | 1987 | struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion); |
1988 | int megasas_cmd_type(struct scsi_cmnd *cmd); | ||
1949 | 1989 | ||
1950 | #endif /*LSI_MEGARAID_SAS_H */ | 1990 | #endif /*LSI_MEGARAID_SAS_H */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index ff283d23788a..890637fdd61e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -78,7 +78,7 @@ static int allow_vf_ioctls; | |||
78 | module_param(allow_vf_ioctls, int, S_IRUGO); | 78 | module_param(allow_vf_ioctls, int, S_IRUGO); |
79 | MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); | 79 | MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); |
80 | 80 | ||
81 | static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; | 81 | static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; |
82 | module_param(throttlequeuedepth, int, S_IRUGO); | 82 | module_param(throttlequeuedepth, int, S_IRUGO); |
83 | MODULE_PARM_DESC(throttlequeuedepth, | 83 | MODULE_PARM_DESC(throttlequeuedepth, |
84 | "Adapter queue depth when throttled due to I/O timeout. Default: 16"); | 84 | "Adapter queue depth when throttled due to I/O timeout. Default: 16"); |
@@ -1417,16 +1417,15 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1417 | } | 1417 | } |
1418 | 1418 | ||
1419 | /** | 1419 | /** |
1420 | * megasas_is_ldio - Checks if the cmd is for logical drive | 1420 | * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD |
1421 | * and whether it's RW or non RW | ||
1421 | * @scmd: SCSI command | 1422 | * @scmd: SCSI command |
1422 | * | 1423 | * |
1423 | * Called by megasas_queue_command to find out if the command to be queued | ||
1424 | * is a logical drive command | ||
1425 | */ | 1424 | */ |
1426 | inline int megasas_is_ldio(struct scsi_cmnd *cmd) | 1425 | inline int megasas_cmd_type(struct scsi_cmnd *cmd) |
1427 | { | 1426 | { |
1428 | if (!MEGASAS_IS_LOGICAL(cmd)) | 1427 | int ret; |
1429 | return 0; | 1428 | |
1430 | switch (cmd->cmnd[0]) { | 1429 | switch (cmd->cmnd[0]) { |
1431 | case READ_10: | 1430 | case READ_10: |
1432 | case WRITE_10: | 1431 | case WRITE_10: |
@@ -1436,10 +1435,14 @@ inline int megasas_is_ldio(struct scsi_cmnd *cmd) | |||
1436 | case WRITE_6: | 1435 | case WRITE_6: |
1437 | case READ_16: | 1436 | case READ_16: |
1438 | case WRITE_16: | 1437 | case WRITE_16: |
1439 | return 1; | 1438 | ret = (MEGASAS_IS_LOGICAL(cmd)) ? |
1439 | READ_WRITE_LDIO : READ_WRITE_SYSPDIO; | ||
1440 | break; | ||
1440 | default: | 1441 | default: |
1441 | return 0; | 1442 | ret = (MEGASAS_IS_LOGICAL(cmd)) ? |
1443 | NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; | ||
1442 | } | 1444 | } |
1445 | return ret; | ||
1443 | } | 1446 | } |
1444 | 1447 | ||
1445 | /** | 1448 | /** |
@@ -1471,7 +1474,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance) | |||
1471 | if(!cmd->scmd) | 1474 | if(!cmd->scmd) |
1472 | continue; | 1475 | continue; |
1473 | printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); | 1476 | printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); |
1474 | if (megasas_is_ldio(cmd->scmd)){ | 1477 | if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { |
1475 | ldio = (struct megasas_io_frame *)cmd->frame; | 1478 | ldio = (struct megasas_io_frame *)cmd->frame; |
1476 | mfi_sgl = &ldio->sgl; | 1479 | mfi_sgl = &ldio->sgl; |
1477 | sgcount = ldio->sge_count; | 1480 | sgcount = ldio->sge_count; |
@@ -1531,7 +1534,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance, | |||
1531 | /* | 1534 | /* |
1532 | * Logical drive command | 1535 | * Logical drive command |
1533 | */ | 1536 | */ |
1534 | if (megasas_is_ldio(scmd)) | 1537 | if (megasas_cmd_type(scmd) == READ_WRITE_LDIO) |
1535 | frame_count = megasas_build_ldio(instance, scmd, cmd); | 1538 | frame_count = megasas_build_ldio(instance, scmd, cmd); |
1536 | else | 1539 | else |
1537 | frame_count = megasas_build_dcdb(instance, scmd, cmd); | 1540 | frame_count = megasas_build_dcdb(instance, scmd, cmd); |
@@ -1689,22 +1692,66 @@ static int megasas_slave_alloc(struct scsi_device *sdev) | |||
1689 | return 0; | 1692 | return 0; |
1690 | } | 1693 | } |
1691 | 1694 | ||
1695 | /* | ||
1696 | * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a | ||
1697 | * kill adapter | ||
1698 | * @instance: Adapter soft state | ||
1699 | * | ||
1700 | */ | ||
1701 | void megasas_complete_outstanding_ioctls(struct megasas_instance *instance) | ||
1702 | { | ||
1703 | int i; | ||
1704 | struct megasas_cmd *cmd_mfi; | ||
1705 | struct megasas_cmd_fusion *cmd_fusion; | ||
1706 | struct fusion_context *fusion = instance->ctrl_context; | ||
1707 | |||
1708 | /* Find all outstanding ioctls */ | ||
1709 | if (fusion) { | ||
1710 | for (i = 0; i < instance->max_fw_cmds; i++) { | ||
1711 | cmd_fusion = fusion->cmd_list[i]; | ||
1712 | if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) { | ||
1713 | cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; | ||
1714 | if (cmd_mfi->sync_cmd && | ||
1715 | cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) | ||
1716 | megasas_complete_cmd(instance, | ||
1717 | cmd_mfi, DID_OK); | ||
1718 | } | ||
1719 | } | ||
1720 | } else { | ||
1721 | for (i = 0; i < instance->max_fw_cmds; i++) { | ||
1722 | cmd_mfi = instance->cmd_list[i]; | ||
1723 | if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != | ||
1724 | MFI_CMD_ABORT) | ||
1725 | megasas_complete_cmd(instance, cmd_mfi, DID_OK); | ||
1726 | } | ||
1727 | } | ||
1728 | } | ||
1729 | |||
1730 | |||
1692 | void megaraid_sas_kill_hba(struct megasas_instance *instance) | 1731 | void megaraid_sas_kill_hba(struct megasas_instance *instance) |
1693 | { | 1732 | { |
1733 | /* Set critical error to block I/O & ioctls in case caller didn't */ | ||
1734 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
1735 | /* Wait 1 second to ensure IO or ioctls in build have posted */ | ||
1736 | msleep(1000); | ||
1694 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | 1737 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
1695 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || | 1738 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || |
1696 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || | 1739 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || |
1697 | (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || | 1740 | (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || |
1698 | (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || | 1741 | (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || |
1699 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { | 1742 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { |
1700 | writel(MFI_STOP_ADP, &instance->reg_set->doorbell); | 1743 | writel(MFI_STOP_ADP, |
1744 | &instance->reg_set->doorbell); | ||
1701 | /* Flush */ | 1745 | /* Flush */ |
1702 | readl(&instance->reg_set->doorbell); | 1746 | readl(&instance->reg_set->doorbell); |
1703 | if (instance->mpio && instance->requestorId) | 1747 | if (instance->mpio && instance->requestorId) |
1704 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); | 1748 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); |
1705 | } else { | 1749 | } else { |
1706 | writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); | 1750 | writel(MFI_STOP_ADP, |
1751 | &instance->reg_set->inbound_doorbell); | ||
1707 | } | 1752 | } |
1753 | /* Complete outstanding ioctls when adapter is killed */ | ||
1754 | megasas_complete_outstanding_ioctls(instance); | ||
1708 | } | 1755 | } |
1709 | 1756 | ||
1710 | /** | 1757 | /** |
@@ -1717,6 +1764,7 @@ void | |||
1717 | megasas_check_and_restore_queue_depth(struct megasas_instance *instance) | 1764 | megasas_check_and_restore_queue_depth(struct megasas_instance *instance) |
1718 | { | 1765 | { |
1719 | unsigned long flags; | 1766 | unsigned long flags; |
1767 | |||
1720 | if (instance->flag & MEGASAS_FW_BUSY | 1768 | if (instance->flag & MEGASAS_FW_BUSY |
1721 | && time_after(jiffies, instance->last_time + 5 * HZ) | 1769 | && time_after(jiffies, instance->last_time + 5 * HZ) |
1722 | && atomic_read(&instance->fw_outstanding) < | 1770 | && atomic_read(&instance->fw_outstanding) < |
@@ -1724,13 +1772,8 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance) | |||
1724 | 1772 | ||
1725 | spin_lock_irqsave(instance->host->host_lock, flags); | 1773 | spin_lock_irqsave(instance->host->host_lock, flags); |
1726 | instance->flag &= ~MEGASAS_FW_BUSY; | 1774 | instance->flag &= ~MEGASAS_FW_BUSY; |
1727 | if (instance->is_imr) { | ||
1728 | instance->host->can_queue = | ||
1729 | instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; | ||
1730 | } else | ||
1731 | instance->host->can_queue = | ||
1732 | instance->max_fw_cmds - MEGASAS_INT_CMDS; | ||
1733 | 1775 | ||
1776 | instance->host->can_queue = instance->max_scsi_cmds; | ||
1734 | spin_unlock_irqrestore(instance->host->host_lock, flags); | 1777 | spin_unlock_irqrestore(instance->host->host_lock, flags); |
1735 | } | 1778 | } |
1736 | } | 1779 | } |
@@ -3028,10 +3071,9 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance) | |||
3028 | "was tried multiple times during reset." | 3071 | "was tried multiple times during reset." |
3029 | "Shutting down the HBA\n", | 3072 | "Shutting down the HBA\n", |
3030 | cmd, cmd->scmd, cmd->sync_cmd); | 3073 | cmd, cmd->scmd, cmd->sync_cmd); |
3074 | instance->instancet->disable_intr(instance); | ||
3075 | atomic_set(&instance->fw_reset_no_pci_access, 1); | ||
3031 | megaraid_sas_kill_hba(instance); | 3076 | megaraid_sas_kill_hba(instance); |
3032 | |||
3033 | instance->adprecovery = | ||
3034 | MEGASAS_HW_CRITICAL_ERROR; | ||
3035 | return; | 3077 | return; |
3036 | } | 3078 | } |
3037 | } | 3079 | } |
@@ -3165,8 +3207,8 @@ process_fw_state_change_wq(struct work_struct *work) | |||
3165 | if (megasas_transition_to_ready(instance, 1)) { | 3207 | if (megasas_transition_to_ready(instance, 1)) { |
3166 | printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); | 3208 | printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); |
3167 | 3209 | ||
3210 | atomic_set(&instance->fw_reset_no_pci_access, 1); | ||
3168 | megaraid_sas_kill_hba(instance); | 3211 | megaraid_sas_kill_hba(instance); |
3169 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
3170 | return ; | 3212 | return ; |
3171 | } | 3213 | } |
3172 | 3214 | ||
@@ -3547,7 +3589,6 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
3547 | int i; | 3589 | int i; |
3548 | u32 max_cmd; | 3590 | u32 max_cmd; |
3549 | u32 sge_sz; | 3591 | u32 sge_sz; |
3550 | u32 sgl_sz; | ||
3551 | u32 total_sz; | 3592 | u32 total_sz; |
3552 | u32 frame_count; | 3593 | u32 frame_count; |
3553 | struct megasas_cmd *cmd; | 3594 | struct megasas_cmd *cmd; |
@@ -3566,24 +3607,23 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
3566 | } | 3607 | } |
3567 | 3608 | ||
3568 | /* | 3609 | /* |
3569 | * Calculated the number of 64byte frames required for SGL | 3610 | * For MFI controllers. |
3570 | */ | 3611 | * max_num_sge = 60 |
3571 | sgl_sz = sge_sz * instance->max_num_sge; | 3612 | * max_sge_sz = 16 byte (sizeof megasas_sge_skinny) |
3572 | frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; | 3613 | * Total 960 byte (15 MFI frame of 64 byte) |
3573 | frame_count = 15; | 3614 | * |
3574 | 3615 | * Fusion adapter require only 3 extra frame. | |
3575 | /* | 3616 | * max_num_sge = 16 (defined as MAX_IOCTL_SGE) |
3576 | * We need one extra frame for the MFI command | 3617 | * max_sge_sz = 12 byte (sizeof megasas_sge64) |
3618 | * Total 192 byte (3 MFI frame of 64 byte) | ||
3577 | */ | 3619 | */ |
3578 | frame_count++; | 3620 | frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1); |
3579 | |||
3580 | total_sz = MEGAMFI_FRAME_SIZE * frame_count; | 3621 | total_sz = MEGAMFI_FRAME_SIZE * frame_count; |
3581 | /* | 3622 | /* |
3582 | * Use DMA pool facility provided by PCI layer | 3623 | * Use DMA pool facility provided by PCI layer |
3583 | */ | 3624 | */ |
3584 | instance->frame_dma_pool = pci_pool_create("megasas frame pool", | 3625 | instance->frame_dma_pool = pci_pool_create("megasas frame pool", |
3585 | instance->pdev, total_sz, 64, | 3626 | instance->pdev, total_sz, 256, 0); |
3586 | 0); | ||
3587 | 3627 | ||
3588 | if (!instance->frame_dma_pool) { | 3628 | if (!instance->frame_dma_pool) { |
3589 | printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); | 3629 | printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); |
@@ -4631,28 +4671,48 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
4631 | instance->crash_dump_h); | 4671 | instance->crash_dump_h); |
4632 | instance->crash_dump_buf = NULL; | 4672 | instance->crash_dump_buf = NULL; |
4633 | } | 4673 | } |
4674 | |||
4675 | instance->secure_jbod_support = | ||
4676 | ctrl_info->adapterOperations3.supportSecurityonJBOD; | ||
4677 | if (instance->secure_jbod_support) | ||
4678 | dev_info(&instance->pdev->dev, "Firmware supports Secure JBOD\n"); | ||
4634 | instance->max_sectors_per_req = instance->max_num_sge * | 4679 | instance->max_sectors_per_req = instance->max_num_sge * |
4635 | PAGE_SIZE / 512; | 4680 | PAGE_SIZE / 512; |
4636 | if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) | 4681 | if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) |
4637 | instance->max_sectors_per_req = tmp_sectors; | 4682 | instance->max_sectors_per_req = tmp_sectors; |
4638 | 4683 | ||
4639 | /* Check for valid throttlequeuedepth module parameter */ | 4684 | /* |
4640 | if (instance->is_imr) { | 4685 | * 1. For fusion adapters, 3 commands for IOCTL and 5 commands |
4641 | if (throttlequeuedepth > (instance->max_fw_cmds - | 4686 | * for driver's internal DCMDs. |
4642 | MEGASAS_SKINNY_INT_CMDS)) | 4687 | * 2. For MFI skinny adapters, 5 commands for IOCTL + driver's |
4643 | instance->throttlequeuedepth = | 4688 | * internal DCMDs. |
4644 | MEGASAS_THROTTLE_QUEUE_DEPTH; | 4689 | * 3. For rest of MFI adapters, 27 commands reserved for IOCTLs |
4645 | else | 4690 | * and 5 commands for drivers's internal DCMD. |
4646 | instance->throttlequeuedepth = throttlequeuedepth; | 4691 | */ |
4692 | if (instance->ctrl_context) { | ||
4693 | instance->max_scsi_cmds = instance->max_fw_cmds - | ||
4694 | (MEGASAS_FUSION_INTERNAL_CMDS + | ||
4695 | MEGASAS_FUSION_IOCTL_CMDS); | ||
4696 | sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS); | ||
4697 | } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
4698 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
4699 | instance->max_scsi_cmds = instance->max_fw_cmds - | ||
4700 | MEGASAS_SKINNY_INT_CMDS; | ||
4701 | sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); | ||
4647 | } else { | 4702 | } else { |
4648 | if (throttlequeuedepth > (instance->max_fw_cmds - | 4703 | instance->max_scsi_cmds = instance->max_fw_cmds - |
4649 | MEGASAS_INT_CMDS)) | 4704 | MEGASAS_INT_CMDS; |
4650 | instance->throttlequeuedepth = | 4705 | sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5)); |
4651 | MEGASAS_THROTTLE_QUEUE_DEPTH; | ||
4652 | else | ||
4653 | instance->throttlequeuedepth = throttlequeuedepth; | ||
4654 | } | 4706 | } |
4655 | 4707 | ||
4708 | /* Check for valid throttlequeuedepth module parameter */ | ||
4709 | if (throttlequeuedepth && | ||
4710 | throttlequeuedepth <= instance->max_scsi_cmds) | ||
4711 | instance->throttlequeuedepth = throttlequeuedepth; | ||
4712 | else | ||
4713 | instance->throttlequeuedepth = | ||
4714 | MEGASAS_THROTTLE_QUEUE_DEPTH; | ||
4715 | |||
4656 | /* | 4716 | /* |
4657 | * Setup tasklet for cmd completion | 4717 | * Setup tasklet for cmd completion |
4658 | */ | 4718 | */ |
@@ -4947,12 +5007,7 @@ static int megasas_io_attach(struct megasas_instance *instance) | |||
4947 | */ | 5007 | */ |
4948 | host->irq = instance->pdev->irq; | 5008 | host->irq = instance->pdev->irq; |
4949 | host->unique_id = instance->unique_id; | 5009 | host->unique_id = instance->unique_id; |
4950 | if (instance->is_imr) { | 5010 | host->can_queue = instance->max_scsi_cmds; |
4951 | host->can_queue = | ||
4952 | instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; | ||
4953 | } else | ||
4954 | host->can_queue = | ||
4955 | instance->max_fw_cmds - MEGASAS_INT_CMDS; | ||
4956 | host->this_id = instance->init_id; | 5011 | host->this_id = instance->init_id; |
4957 | host->sg_tablesize = instance->max_num_sge; | 5012 | host->sg_tablesize = instance->max_num_sge; |
4958 | 5013 | ||
@@ -5130,8 +5185,6 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
5130 | ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); | 5185 | ((1 << PAGE_SHIFT) << instance->ctrl_context_pages)); |
5131 | INIT_LIST_HEAD(&fusion->cmd_pool); | 5186 | INIT_LIST_HEAD(&fusion->cmd_pool); |
5132 | spin_lock_init(&fusion->mpt_pool_lock); | 5187 | spin_lock_init(&fusion->mpt_pool_lock); |
5133 | memset(fusion->load_balance_info, 0, | ||
5134 | sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); | ||
5135 | } | 5188 | } |
5136 | break; | 5189 | break; |
5137 | default: /* For all other supported controllers */ | 5190 | default: /* For all other supported controllers */ |
@@ -5215,12 +5268,10 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
5215 | instance->init_id = MEGASAS_DEFAULT_INIT_ID; | 5268 | instance->init_id = MEGASAS_DEFAULT_INIT_ID; |
5216 | instance->ctrl_info = NULL; | 5269 | instance->ctrl_info = NULL; |
5217 | 5270 | ||
5271 | |||
5218 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | 5272 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
5219 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | 5273 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) |
5220 | instance->flag_ieee = 1; | 5274 | instance->flag_ieee = 1; |
5221 | sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); | ||
5222 | } else | ||
5223 | sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5)); | ||
5224 | 5275 | ||
5225 | megasas_dbg_lvl = 0; | 5276 | megasas_dbg_lvl = 0; |
5226 | instance->flag = 0; | 5277 | instance->flag = 0; |
@@ -6215,9 +6266,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) | |||
6215 | goto out_kfree_ioc; | 6266 | goto out_kfree_ioc; |
6216 | } | 6267 | } |
6217 | 6268 | ||
6218 | /* | ||
6219 | * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds | ||
6220 | */ | ||
6221 | if (down_interruptible(&instance->ioctl_sem)) { | 6269 | if (down_interruptible(&instance->ioctl_sem)) { |
6222 | error = -ERESTARTSYS; | 6270 | error = -ERESTARTSYS; |
6223 | goto out_kfree_ioc; | 6271 | goto out_kfree_ioc; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 460c6a3d4ade..4f72287860ee 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c | |||
@@ -172,6 +172,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) | |||
172 | struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; | 172 | struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL; |
173 | struct MR_FW_RAID_MAP *pFwRaidMap = NULL; | 173 | struct MR_FW_RAID_MAP *pFwRaidMap = NULL; |
174 | int i; | 174 | int i; |
175 | u16 ld_count; | ||
175 | 176 | ||
176 | 177 | ||
177 | struct MR_DRV_RAID_MAP_ALL *drv_map = | 178 | struct MR_DRV_RAID_MAP_ALL *drv_map = |
@@ -191,9 +192,10 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) | |||
191 | fw_map_old = (struct MR_FW_RAID_MAP_ALL *) | 192 | fw_map_old = (struct MR_FW_RAID_MAP_ALL *) |
192 | fusion->ld_map[(instance->map_id & 1)]; | 193 | fusion->ld_map[(instance->map_id & 1)]; |
193 | pFwRaidMap = &fw_map_old->raidMap; | 194 | pFwRaidMap = &fw_map_old->raidMap; |
195 | ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount); | ||
194 | 196 | ||
195 | #if VD_EXT_DEBUG | 197 | #if VD_EXT_DEBUG |
196 | for (i = 0; i < le16_to_cpu(pFwRaidMap->ldCount); i++) { | 198 | for (i = 0; i < ld_count; i++) { |
197 | dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " | 199 | dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x " |
198 | "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", | 200 | "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n", |
199 | instance->unique_id, i, | 201 | instance->unique_id, i, |
@@ -205,12 +207,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance) | |||
205 | 207 | ||
206 | memset(drv_map, 0, fusion->drv_map_sz); | 208 | memset(drv_map, 0, fusion->drv_map_sz); |
207 | pDrvRaidMap->totalSize = pFwRaidMap->totalSize; | 209 | pDrvRaidMap->totalSize = pFwRaidMap->totalSize; |
208 | pDrvRaidMap->ldCount = (__le16)pFwRaidMap->ldCount; | 210 | pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count); |
209 | pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; | 211 | pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec; |
210 | for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) | 212 | for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++) |
211 | pDrvRaidMap->ldTgtIdToLd[i] = | 213 | pDrvRaidMap->ldTgtIdToLd[i] = |
212 | (u8)pFwRaidMap->ldTgtIdToLd[i]; | 214 | (u8)pFwRaidMap->ldTgtIdToLd[i]; |
213 | for (i = 0; i < le16_to_cpu(pDrvRaidMap->ldCount); i++) { | 215 | for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS); |
216 | i < MAX_LOGICAL_DRIVES_EXT; i++) | ||
217 | pDrvRaidMap->ldTgtIdToLd[i] = 0xff; | ||
218 | for (i = 0; i < ld_count; i++) { | ||
214 | pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; | 219 | pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i]; |
215 | #if VD_EXT_DEBUG | 220 | #if VD_EXT_DEBUG |
216 | dev_dbg(&instance->pdev->dev, | 221 | dev_dbg(&instance->pdev->dev, |
@@ -252,7 +257,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) | |||
252 | struct LD_LOAD_BALANCE_INFO *lbInfo; | 257 | struct LD_LOAD_BALANCE_INFO *lbInfo; |
253 | PLD_SPAN_INFO ldSpanInfo; | 258 | PLD_SPAN_INFO ldSpanInfo; |
254 | struct MR_LD_RAID *raid; | 259 | struct MR_LD_RAID *raid; |
255 | int ldCount, num_lds; | 260 | u16 ldCount, num_lds; |
256 | u16 ld; | 261 | u16 ld; |
257 | u32 expected_size; | 262 | u32 expected_size; |
258 | 263 | ||
@@ -356,7 +361,7 @@ static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map, | |||
356 | 361 | ||
357 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { | 362 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { |
358 | ld = MR_TargetIdToLdGet(ldCount, map); | 363 | ld = MR_TargetIdToLdGet(ldCount, map); |
359 | if (ld >= MAX_LOGICAL_DRIVES_EXT) | 364 | if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) |
360 | continue; | 365 | continue; |
361 | raid = MR_LdRaidGet(ld, map); | 366 | raid = MR_LdRaidGet(ld, map); |
362 | dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", | 367 | dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", |
@@ -1157,7 +1162,7 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, | |||
1157 | 1162 | ||
1158 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { | 1163 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) { |
1159 | ld = MR_TargetIdToLdGet(ldCount, map); | 1164 | ld = MR_TargetIdToLdGet(ldCount, map); |
1160 | if (ld >= MAX_LOGICAL_DRIVES_EXT) | 1165 | if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) |
1161 | continue; | 1166 | continue; |
1162 | raid = MR_LdRaidGet(ld, map); | 1167 | raid = MR_LdRaidGet(ld, map); |
1163 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { | 1168 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 71557f64bb5e..675b5e7aba94 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -63,7 +63,6 @@ extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance | |||
63 | extern void | 63 | extern void |
64 | megasas_complete_cmd(struct megasas_instance *instance, | 64 | megasas_complete_cmd(struct megasas_instance *instance, |
65 | struct megasas_cmd *cmd, u8 alt_status); | 65 | struct megasas_cmd *cmd, u8 alt_status); |
66 | int megasas_is_ldio(struct scsi_cmnd *cmd); | ||
67 | int | 66 | int |
68 | wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, | 67 | wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, |
69 | int seconds); | 68 | int seconds); |
@@ -103,6 +102,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance) | |||
103 | { | 102 | { |
104 | struct megasas_register_set __iomem *regs; | 103 | struct megasas_register_set __iomem *regs; |
105 | regs = instance->reg_set; | 104 | regs = instance->reg_set; |
105 | |||
106 | instance->mask_interrupts = 0; | ||
106 | /* For Thunderbolt/Invader also clear intr on enable */ | 107 | /* For Thunderbolt/Invader also clear intr on enable */ |
107 | writel(~0, ®s->outbound_intr_status); | 108 | writel(~0, ®s->outbound_intr_status); |
108 | readl(®s->outbound_intr_status); | 109 | readl(®s->outbound_intr_status); |
@@ -111,7 +112,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance) | |||
111 | 112 | ||
112 | /* Dummy readl to force pci flush */ | 113 | /* Dummy readl to force pci flush */ |
113 | readl(®s->outbound_intr_mask); | 114 | readl(®s->outbound_intr_mask); |
114 | instance->mask_interrupts = 0; | ||
115 | } | 115 | } |
116 | 116 | ||
117 | /** | 117 | /** |
@@ -196,6 +196,7 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, | |||
196 | 196 | ||
197 | cmd->scmd = NULL; | 197 | cmd->scmd = NULL; |
198 | cmd->sync_cmd_idx = (u32)ULONG_MAX; | 198 | cmd->sync_cmd_idx = (u32)ULONG_MAX; |
199 | memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); | ||
199 | list_add(&cmd->list, (&fusion->cmd_pool)->next); | 200 | list_add(&cmd->list, (&fusion->cmd_pool)->next); |
200 | 201 | ||
201 | spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); | 202 | spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); |
@@ -689,6 +690,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
689 | = 1; | 690 | = 1; |
690 | init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb | 691 | init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb |
691 | = 1; | 692 | = 1; |
693 | init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw | ||
694 | = 1; | ||
692 | /* Convert capability to LE32 */ | 695 | /* Convert capability to LE32 */ |
693 | cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); | 696 | cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); |
694 | 697 | ||
@@ -698,12 +701,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
698 | cpu_to_le32(lower_32_bits(ioc_init_handle)); | 701 | cpu_to_le32(lower_32_bits(ioc_init_handle)); |
699 | init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); | 702 | init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); |
700 | 703 | ||
701 | req_desc.Words = 0; | 704 | req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr)); |
705 | req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr)); | ||
702 | req_desc.MFAIo.RequestFlags = | 706 | req_desc.MFAIo.RequestFlags = |
703 | (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << | 707 | (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << |
704 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 708 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
705 | cpu_to_le32s((u32 *)&req_desc.MFAIo); | ||
706 | req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr); | ||
707 | 709 | ||
708 | /* | 710 | /* |
709 | * disable the intr before firing the init frame | 711 | * disable the intr before firing the init frame |
@@ -1017,8 +1019,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) | |||
1017 | * does not exceed max cmds that the FW can support | 1019 | * does not exceed max cmds that the FW can support |
1018 | */ | 1020 | */ |
1019 | instance->max_fw_cmds = instance->max_fw_cmds-1; | 1021 | instance->max_fw_cmds = instance->max_fw_cmds-1; |
1020 | /* Only internal cmds (DCMD) need to have MFI frames */ | 1022 | |
1021 | instance->max_mfi_cmds = MEGASAS_INT_CMDS; | 1023 | /* |
1024 | * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames | ||
1025 | */ | ||
1026 | instance->max_mfi_cmds = | ||
1027 | MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS; | ||
1022 | 1028 | ||
1023 | max_cmd = instance->max_fw_cmds; | 1029 | max_cmd = instance->max_fw_cmds; |
1024 | 1030 | ||
@@ -1285,6 +1291,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, | |||
1285 | 1291 | ||
1286 | sgl_ptr = | 1292 | sgl_ptr = |
1287 | (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; | 1293 | (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; |
1294 | memset(sgl_ptr, 0, MEGASAS_MAX_SZ_CHAIN_FRAME); | ||
1288 | } | 1295 | } |
1289 | } | 1296 | } |
1290 | 1297 | ||
@@ -1658,6 +1665,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
1658 | u32 device_id; | 1665 | u32 device_id; |
1659 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; | 1666 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; |
1660 | u16 pd_index = 0; | 1667 | u16 pd_index = 0; |
1668 | u16 os_timeout_value; | ||
1669 | u16 timeout_limit; | ||
1661 | struct MR_DRV_RAID_MAP_ALL *local_map_ptr; | 1670 | struct MR_DRV_RAID_MAP_ALL *local_map_ptr; |
1662 | struct fusion_context *fusion = instance->ctrl_context; | 1671 | struct fusion_context *fusion = instance->ctrl_context; |
1663 | u8 span, physArm; | 1672 | u8 span, physArm; |
@@ -1674,52 +1683,66 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
1674 | 1683 | ||
1675 | io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); | 1684 | io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); |
1676 | 1685 | ||
1677 | |||
1678 | /* Check if this is a system PD I/O */ | ||
1679 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && | 1686 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && |
1680 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { | 1687 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { |
1681 | io_request->Function = 0; | ||
1682 | if (fusion->fast_path_io) | 1688 | if (fusion->fast_path_io) |
1683 | io_request->DevHandle = | 1689 | io_request->DevHandle = |
1684 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; | 1690 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; |
1685 | io_request->RaidContext.timeoutValue = | ||
1686 | local_map_ptr->raidMap.fpPdIoTimeoutSec; | ||
1687 | io_request->RaidContext.regLockFlags = 0; | ||
1688 | io_request->RaidContext.regLockRowLBA = 0; | ||
1689 | io_request->RaidContext.regLockLength = 0; | ||
1690 | io_request->RaidContext.RAIDFlags = | 1691 | io_request->RaidContext.RAIDFlags = |
1691 | MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << | 1692 | MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD |
1692 | MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; | 1693 | << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; |
1693 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || | 1694 | cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; |
1694 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | ||
1695 | io_request->IoFlags |= cpu_to_le16( | ||
1696 | MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); | ||
1697 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1698 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << | ||
1699 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1700 | cmd->request_desc->SCSIIO.DevHandle = | ||
1701 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; | ||
1702 | cmd->request_desc->SCSIIO.MSIxIndex = | 1695 | cmd->request_desc->SCSIIO.MSIxIndex = |
1703 | instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0; | 1696 | instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0; |
1704 | /* | 1697 | os_timeout_value = scmd->request->timeout / HZ; |
1705 | * If the command is for the tape device, set the | 1698 | |
1706 | * FP timeout to the os layer timeout value. | 1699 | if (instance->secure_jbod_support && |
1707 | */ | 1700 | (megasas_cmd_type(scmd) == NON_READ_WRITE_SYSPDIO)) { |
1708 | if (scmd->device->type == TYPE_TAPE) { | 1701 | /* system pd firmware path */ |
1709 | if ((scmd->request->timeout / HZ) > 0xFFFF) | 1702 | io_request->Function = |
1710 | io_request->RaidContext.timeoutValue = | 1703 | MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; |
1711 | 0xFFFF; | 1704 | cmd->request_desc->SCSIIO.RequestFlags = |
1712 | else | 1705 | (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << |
1713 | io_request->RaidContext.timeoutValue = | 1706 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
1714 | scmd->request->timeout / HZ; | 1707 | io_request->RaidContext.timeoutValue = |
1708 | cpu_to_le16(os_timeout_value); | ||
1709 | } else { | ||
1710 | /* system pd Fast Path */ | ||
1711 | io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; | ||
1712 | io_request->RaidContext.regLockFlags = 0; | ||
1713 | io_request->RaidContext.regLockRowLBA = 0; | ||
1714 | io_request->RaidContext.regLockLength = 0; | ||
1715 | timeout_limit = (scmd->device->type == TYPE_DISK) ? | ||
1716 | 255 : 0xFFFF; | ||
1717 | io_request->RaidContext.timeoutValue = | ||
1718 | cpu_to_le16((os_timeout_value > timeout_limit) ? | ||
1719 | timeout_limit : os_timeout_value); | ||
1720 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || | ||
1721 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | ||
1722 | io_request->IoFlags |= | ||
1723 | cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); | ||
1724 | |||
1725 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1726 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << | ||
1727 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1715 | } | 1728 | } |
1716 | } else { | 1729 | } else { |
1717 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) | 1730 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) |
1718 | goto NonFastPath; | 1731 | goto NonFastPath; |
1719 | 1732 | ||
1733 | /* | ||
1734 | * For older firmware, Driver should not access ldTgtIdToLd | ||
1735 | * beyond index 127 and for Extended VD firmware, ldTgtIdToLd | ||
1736 | * should not go beyond 255. | ||
1737 | */ | ||
1738 | |||
1739 | if ((!fusion->fast_path_io) || | ||
1740 | (device_id >= instance->fw_supported_vd_count)) | ||
1741 | goto NonFastPath; | ||
1742 | |||
1720 | ld = MR_TargetIdToLdGet(device_id, local_map_ptr); | 1743 | ld = MR_TargetIdToLdGet(device_id, local_map_ptr); |
1721 | if ((ld >= instance->fw_supported_vd_count) || | 1744 | |
1722 | (!fusion->fast_path_io)) | 1745 | if (ld >= instance->fw_supported_vd_count) |
1723 | goto NonFastPath; | 1746 | goto NonFastPath; |
1724 | 1747 | ||
1725 | raid = MR_LdRaidGet(ld, local_map_ptr); | 1748 | raid = MR_LdRaidGet(ld, local_map_ptr); |
@@ -1811,7 +1834,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, | |||
1811 | */ | 1834 | */ |
1812 | io_request->IoFlags = cpu_to_le16(scp->cmd_len); | 1835 | io_request->IoFlags = cpu_to_le16(scp->cmd_len); |
1813 | 1836 | ||
1814 | if (megasas_is_ldio(scp)) | 1837 | if (megasas_cmd_type(scp) == READ_WRITE_LDIO) |
1815 | megasas_build_ldio_fusion(instance, scp, cmd); | 1838 | megasas_build_ldio_fusion(instance, scp, cmd); |
1816 | else | 1839 | else |
1817 | megasas_build_dcdb_fusion(instance, scp, cmd); | 1840 | megasas_build_dcdb_fusion(instance, scp, cmd); |
@@ -2612,7 +2635,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) | |||
2612 | instance->host->host_no); | 2635 | instance->host->host_no); |
2613 | megaraid_sas_kill_hba(instance); | 2636 | megaraid_sas_kill_hba(instance); |
2614 | instance->skip_heartbeat_timer_del = 1; | 2637 | instance->skip_heartbeat_timer_del = 1; |
2615 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
2616 | retval = FAILED; | 2638 | retval = FAILED; |
2617 | goto out; | 2639 | goto out; |
2618 | } | 2640 | } |
@@ -2808,8 +2830,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) | |||
2808 | dev_info(&instance->pdev->dev, | 2830 | dev_info(&instance->pdev->dev, |
2809 | "Failed from %s %d\n", | 2831 | "Failed from %s %d\n", |
2810 | __func__, __LINE__); | 2832 | __func__, __LINE__); |
2811 | instance->adprecovery = | ||
2812 | MEGASAS_HW_CRITICAL_ERROR; | ||
2813 | megaraid_sas_kill_hba(instance); | 2833 | megaraid_sas_kill_hba(instance); |
2814 | retval = FAILED; | 2834 | retval = FAILED; |
2815 | } | 2835 | } |
@@ -2858,7 +2878,6 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) | |||
2858 | "adapter scsi%d.\n", instance->host->host_no); | 2878 | "adapter scsi%d.\n", instance->host->host_no); |
2859 | megaraid_sas_kill_hba(instance); | 2879 | megaraid_sas_kill_hba(instance); |
2860 | instance->skip_heartbeat_timer_del = 1; | 2880 | instance->skip_heartbeat_timer_del = 1; |
2861 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
2862 | retval = FAILED; | 2881 | retval = FAILED; |
2863 | } else { | 2882 | } else { |
2864 | /* For VF: Restart HB timer if we didn't OCR */ | 2883 | /* For VF: Restart HB timer if we didn't OCR */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 5ab7daee11be..56e6db2d5874 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h | |||
@@ -306,14 +306,9 @@ struct MPI2_RAID_SCSI_IO_REQUEST { | |||
306 | * MPT RAID MFA IO Descriptor. | 306 | * MPT RAID MFA IO Descriptor. |
307 | */ | 307 | */ |
308 | struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { | 308 | struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { |
309 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
310 | u32 MessageAddress1:24; /* bits 31:8*/ | ||
311 | u32 RequestFlags:8; | ||
312 | #else | ||
313 | u32 RequestFlags:8; | 309 | u32 RequestFlags:8; |
314 | u32 MessageAddress1:24; /* bits 31:8*/ | 310 | u32 MessageAddress1:24; |
315 | #endif | 311 | u32 MessageAddress2; |
316 | u32 MessageAddress2; /* bits 61:32 */ | ||
317 | }; | 312 | }; |
318 | 313 | ||
319 | /* Default Request Descriptor */ | 314 | /* Default Request Descriptor */ |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 088eefa67da8..7fc6f23bd9dc 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * scatter/gather formats. | 8 | * scatter/gather formats. |
9 | * Creation Date: June 21, 2006 | 9 | * Creation Date: June 21, 2006 |
10 | * | 10 | * |
11 | * mpi2.h Version: 02.00.32 | 11 | * mpi2.h Version: 02.00.35 |
12 | * | 12 | * |
13 | * Version History | 13 | * Version History |
14 | * --------------- | 14 | * --------------- |
@@ -83,6 +83,9 @@ | |||
83 | * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. | 83 | * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. |
84 | * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. | 84 | * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. |
85 | * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. | 85 | * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. |
86 | * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. | ||
87 | * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT. | ||
88 | * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. | ||
86 | * -------------------------------------------------------------------------- | 89 | * -------------------------------------------------------------------------- |
87 | */ | 90 | */ |
88 | 91 | ||
@@ -108,7 +111,7 @@ | |||
108 | #define MPI2_VERSION_02_00 (0x0200) | 111 | #define MPI2_VERSION_02_00 (0x0200) |
109 | 112 | ||
110 | /* versioning for this MPI header set */ | 113 | /* versioning for this MPI header set */ |
111 | #define MPI2_HEADER_VERSION_UNIT (0x20) | 114 | #define MPI2_HEADER_VERSION_UNIT (0x23) |
112 | #define MPI2_HEADER_VERSION_DEV (0x00) | 115 | #define MPI2_HEADER_VERSION_DEV (0x00) |
113 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) | 116 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) |
114 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) | 117 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index 510ef0dc8d7b..ee8d2d695d55 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI Configuration messages and pages | 6 | * Title: MPI Configuration messages and pages |
7 | * Creation Date: November 10, 2006 | 7 | * Creation Date: November 10, 2006 |
8 | * | 8 | * |
9 | * mpi2_cnfg.h Version: 02.00.26 | 9 | * mpi2_cnfg.h Version: 02.00.29 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -157,6 +157,20 @@ | |||
157 | * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. | 157 | * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. |
158 | * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to | 158 | * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to |
159 | * match the specification. | 159 | * match the specification. |
160 | * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for | ||
161 | * MPI2_CONFIG_PAGE_MAN_7. | ||
162 | * Added EnclosureLevel and ConnectorName fields to | ||
163 | * MPI2_CONFIG_PAGE_SAS_DEV_0. | ||
164 | * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for | ||
165 | * MPI2_CONFIG_PAGE_SAS_DEV_0. | ||
166 | * Added EnclosureLevel field to | ||
167 | * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. | ||
168 | * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for | ||
169 | * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. | ||
170 | * 01-08-14 02.00.28 Added more defines for the BiosOptions field of | ||
171 | * MPI2_CONFIG_PAGE_BIOS_1. | ||
172 | * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and | ||
173 | * more defines for the BiosOptions field. | ||
160 | * -------------------------------------------------------------------------- | 174 | * -------------------------------------------------------------------------- |
161 | */ | 175 | */ |
162 | 176 | ||
@@ -706,6 +720,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 | |||
706 | #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) | 720 | #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) |
707 | 721 | ||
708 | /* defines for the Flags field */ | 722 | /* defines for the Flags field */ |
723 | #define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) | ||
709 | #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) | 724 | #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) |
710 | #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) | 725 | #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) |
711 | 726 | ||
@@ -1224,7 +1239,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 | |||
1224 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ | 1239 | MPI2_CONFIG_PAGE_HEADER Header; /* 0x00 */ |
1225 | U32 BiosOptions; /* 0x04 */ | 1240 | U32 BiosOptions; /* 0x04 */ |
1226 | U32 IOCSettings; /* 0x08 */ | 1241 | U32 IOCSettings; /* 0x08 */ |
1227 | U32 Reserved1; /* 0x0C */ | 1242 | U8 SSUTimeout; /* 0x0C */ |
1243 | U8 Reserved1; /* 0x0D */ | ||
1244 | U16 Reserved2; /* 0x0E */ | ||
1228 | U32 DeviceSettings; /* 0x10 */ | 1245 | U32 DeviceSettings; /* 0x10 */ |
1229 | U16 NumberOfDevices; /* 0x14 */ | 1246 | U16 NumberOfDevices; /* 0x14 */ |
1230 | U16 UEFIVersion; /* 0x16 */ | 1247 | U16 UEFIVersion; /* 0x16 */ |
@@ -1235,9 +1252,24 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 | |||
1235 | } MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1, | 1252 | } MPI2_CONFIG_PAGE_BIOS_1, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_BIOS_1, |
1236 | Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t; | 1253 | Mpi2BiosPage1_t, MPI2_POINTER pMpi2BiosPage1_t; |
1237 | 1254 | ||
1238 | #define MPI2_BIOSPAGE1_PAGEVERSION (0x05) | 1255 | #define MPI2_BIOSPAGE1_PAGEVERSION (0x07) |
1239 | 1256 | ||
1240 | /* values for BIOS Page 1 BiosOptions field */ | 1257 | /* values for BIOS Page 1 BiosOptions field */ |
1258 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) | ||
1259 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) | ||
1260 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) | ||
1261 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) | ||
1262 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) | ||
1263 | #define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) | ||
1264 | |||
1265 | #define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) | ||
1266 | |||
1267 | #define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) | ||
1268 | #define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) | ||
1269 | #define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) | ||
1270 | #define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) | ||
1271 | #define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) | ||
1272 | |||
1241 | #define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) | 1273 | #define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) |
1242 | #define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) | 1274 | #define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) |
1243 | 1275 | ||
@@ -2420,13 +2452,13 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 | |||
2420 | U8 PortGroups; /* 0x2C */ | 2452 | U8 PortGroups; /* 0x2C */ |
2421 | U8 DmaGroup; /* 0x2D */ | 2453 | U8 DmaGroup; /* 0x2D */ |
2422 | U8 ControlGroup; /* 0x2E */ | 2454 | U8 ControlGroup; /* 0x2E */ |
2423 | U8 Reserved1; /* 0x2F */ | 2455 | U8 EnclosureLevel; /* 0x2F */ |
2424 | U32 Reserved2; /* 0x30 */ | 2456 | U8 ConnectorName[4]; /* 0x30 */ |
2425 | U32 Reserved3; /* 0x34 */ | 2457 | U32 Reserved3; /* 0x34 */ |
2426 | } MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, | 2458 | } MPI2_CONFIG_PAGE_SAS_DEV_0, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, |
2427 | Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t; | 2459 | Mpi2SasDevicePage0_t, MPI2_POINTER pMpi2SasDevicePage0_t; |
2428 | 2460 | ||
2429 | #define MPI2_SASDEVICE0_PAGEVERSION (0x08) | 2461 | #define MPI2_SASDEVICE0_PAGEVERSION (0x09) |
2430 | 2462 | ||
2431 | /* values for SAS Device Page 0 AccessStatus field */ | 2463 | /* values for SAS Device Page 0 AccessStatus field */ |
2432 | #define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) | 2464 | #define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) |
@@ -2464,6 +2496,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 | |||
2464 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) | 2496 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) |
2465 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) | 2497 | #define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) |
2466 | #define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) | 2498 | #define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) |
2499 | #define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) | ||
2467 | #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) | 2500 | #define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) |
2468 | 2501 | ||
2469 | 2502 | ||
@@ -2732,7 +2765,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 | |||
2732 | U16 EnclosureHandle; /* 0x16 */ | 2765 | U16 EnclosureHandle; /* 0x16 */ |
2733 | U16 NumSlots; /* 0x18 */ | 2766 | U16 NumSlots; /* 0x18 */ |
2734 | U16 StartSlot; /* 0x1A */ | 2767 | U16 StartSlot; /* 0x1A */ |
2735 | U16 Reserved2; /* 0x1C */ | 2768 | U8 Reserved2; /* 0x1C */ |
2769 | U8 EnclosureLevel; /* 0x1D */ | ||
2736 | U16 SEPDevHandle; /* 0x1E */ | 2770 | U16 SEPDevHandle; /* 0x1E */ |
2737 | U32 Reserved3; /* 0x20 */ | 2771 | U32 Reserved3; /* 0x20 */ |
2738 | U32 Reserved4; /* 0x24 */ | 2772 | U32 Reserved4; /* 0x24 */ |
@@ -2740,9 +2774,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 | |||
2740 | MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, | 2774 | MPI2_POINTER PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, |
2741 | Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t; | 2775 | Mpi2SasEnclosurePage0_t, MPI2_POINTER pMpi2SasEnclosurePage0_t; |
2742 | 2776 | ||
2743 | #define MPI2_SASENCLOSURE0_PAGEVERSION (0x03) | 2777 | #define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) |
2744 | 2778 | ||
2745 | /* values for SAS Enclosure Page 0 Flags field */ | 2779 | /* values for SAS Enclosure Page 0 Flags field */ |
2780 | #define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) | ||
2746 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) | 2781 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) |
2747 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) | 2782 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) |
2748 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) | 2783 | #define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index 2c3b0f28576b..b02de48be204 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages | 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages |
7 | * Creation Date: October 11, 2006 | 7 | * Creation Date: October 11, 2006 |
8 | * | 8 | * |
9 | * mpi2_ioc.h Version: 02.00.23 | 9 | * mpi2_ioc.h Version: 02.00.24 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -126,6 +126,7 @@ | |||
126 | * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. | 126 | * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. |
127 | * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. | 127 | * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. |
128 | * Added Encrypted Hash Extended Image. | 128 | * Added Encrypted Hash Extended Image. |
129 | * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. | ||
129 | * -------------------------------------------------------------------------- | 130 | * -------------------------------------------------------------------------- |
130 | */ | 131 | */ |
131 | 132 | ||
@@ -1589,6 +1590,7 @@ Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t; | |||
1589 | /* values for HashImageType */ | 1590 | /* values for HashImageType */ |
1590 | #define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) | 1591 | #define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) |
1591 | #define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) | 1592 | #define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) |
1593 | #define MPI25_HASH_IMAGE_TYPE_BIOS (0x02) | ||
1592 | 1594 | ||
1593 | /* values for HashAlgorithm */ | 1595 | /* values for HashAlgorithm */ |
1594 | #define MPI25_HASH_ALGORITHM_UNUSED (0x00) | 1596 | #define MPI25_HASH_ALGORITHM_UNUSED (0x00) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 9be03ed46180..659b8ac83ceb 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI diagnostic tool structures and definitions | 6 | * Title: MPI diagnostic tool structures and definitions |
7 | * Creation Date: March 26, 2007 | 7 | * Creation Date: March 26, 2007 |
8 | * | 8 | * |
9 | * mpi2_tool.h Version: 02.00.11 | 9 | * mpi2_tool.h Version: 02.00.12 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -29,7 +29,8 @@ | |||
29 | * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. | 29 | * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. |
30 | * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that | 30 | * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that |
31 | * it uses MPI Chain SGE as well as MPI Simple SGE. | 31 | * it uses MPI Chain SGE as well as MPI Simple SGE. |
32 | * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. | 32 | * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. |
33 | * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. | ||
33 | * -------------------------------------------------------------------------- | 34 | * -------------------------------------------------------------------------- |
34 | */ | 35 | */ |
35 | 36 | ||
@@ -101,6 +102,7 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST | |||
101 | #define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) | 102 | #define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) |
102 | #define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) | 103 | #define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) |
103 | #define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) | 104 | #define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) |
105 | #define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) | ||
104 | #define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) | 106 | #define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) |
105 | #define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) | 107 | #define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) |
106 | #define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) | 108 | #define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 58e45216d1ec..11248de92b3b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.c | 5 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.c |
6 | * Copyright (C) 2007-2014 LSI Corporation | 6 | * Copyright (C) 2007-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 20013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
@@ -639,6 +640,9 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc, | |||
639 | if (!ioc->hide_ir_msg) | 640 | if (!ioc->hide_ir_msg) |
640 | desc = "Log Entry Added"; | 641 | desc = "Log Entry Added"; |
641 | break; | 642 | break; |
643 | case MPI2_EVENT_TEMP_THRESHOLD: | ||
644 | desc = "Temperature Threshold"; | ||
645 | break; | ||
642 | } | 646 | } |
643 | 647 | ||
644 | if (!desc) | 648 | if (!desc) |
@@ -1296,6 +1300,8 @@ _base_free_irq(struct MPT2SAS_ADAPTER *ioc) | |||
1296 | 1300 | ||
1297 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { | 1301 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { |
1298 | list_del(&reply_q->list); | 1302 | list_del(&reply_q->list); |
1303 | irq_set_affinity_hint(reply_q->vector, NULL); | ||
1304 | free_cpumask_var(reply_q->affinity_hint); | ||
1299 | synchronize_irq(reply_q->vector); | 1305 | synchronize_irq(reply_q->vector); |
1300 | free_irq(reply_q->vector, reply_q); | 1306 | free_irq(reply_q->vector, reply_q); |
1301 | kfree(reply_q); | 1307 | kfree(reply_q); |
@@ -1325,6 +1331,11 @@ _base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector) | |||
1325 | reply_q->ioc = ioc; | 1331 | reply_q->ioc = ioc; |
1326 | reply_q->msix_index = index; | 1332 | reply_q->msix_index = index; |
1327 | reply_q->vector = vector; | 1333 | reply_q->vector = vector; |
1334 | |||
1335 | if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) | ||
1336 | return -ENOMEM; | ||
1337 | cpumask_clear(reply_q->affinity_hint); | ||
1338 | |||
1328 | atomic_set(&reply_q->busy, 0); | 1339 | atomic_set(&reply_q->busy, 0); |
1329 | if (ioc->msix_enable) | 1340 | if (ioc->msix_enable) |
1330 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", | 1341 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", |
@@ -1359,6 +1370,7 @@ static void | |||
1359 | _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) | 1370 | _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) |
1360 | { | 1371 | { |
1361 | unsigned int cpu, nr_cpus, nr_msix, index = 0; | 1372 | unsigned int cpu, nr_cpus, nr_msix, index = 0; |
1373 | struct adapter_reply_queue *reply_q; | ||
1362 | 1374 | ||
1363 | if (!_base_is_controller_msix_enabled(ioc)) | 1375 | if (!_base_is_controller_msix_enabled(ioc)) |
1364 | return; | 1376 | return; |
@@ -1373,20 +1385,30 @@ _base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) | |||
1373 | 1385 | ||
1374 | cpu = cpumask_first(cpu_online_mask); | 1386 | cpu = cpumask_first(cpu_online_mask); |
1375 | 1387 | ||
1376 | do { | 1388 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
1389 | |||
1377 | unsigned int i, group = nr_cpus / nr_msix; | 1390 | unsigned int i, group = nr_cpus / nr_msix; |
1378 | 1391 | ||
1392 | if (cpu >= nr_cpus) | ||
1393 | break; | ||
1394 | |||
1379 | if (index < nr_cpus % nr_msix) | 1395 | if (index < nr_cpus % nr_msix) |
1380 | group++; | 1396 | group++; |
1381 | 1397 | ||
1382 | for (i = 0 ; i < group ; i++) { | 1398 | for (i = 0 ; i < group ; i++) { |
1383 | ioc->cpu_msix_table[cpu] = index; | 1399 | ioc->cpu_msix_table[cpu] = index; |
1400 | cpumask_or(reply_q->affinity_hint, | ||
1401 | reply_q->affinity_hint, get_cpu_mask(cpu)); | ||
1384 | cpu = cpumask_next(cpu, cpu_online_mask); | 1402 | cpu = cpumask_next(cpu, cpu_online_mask); |
1385 | } | 1403 | } |
1386 | 1404 | ||
1405 | if (irq_set_affinity_hint(reply_q->vector, | ||
1406 | reply_q->affinity_hint)) | ||
1407 | dinitprintk(ioc, pr_info(MPT2SAS_FMT | ||
1408 | "error setting affinity hint for irq vector %d\n", | ||
1409 | ioc->name, reply_q->vector)); | ||
1387 | index++; | 1410 | index++; |
1388 | 1411 | } | |
1389 | } while (cpu < nr_cpus); | ||
1390 | } | 1412 | } |
1391 | 1413 | ||
1392 | /** | 1414 | /** |
@@ -2338,6 +2360,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) | |||
2338 | mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); | 2360 | mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); |
2339 | mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); | 2361 | mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); |
2340 | mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 2362 | mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
2363 | mpt2sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); | ||
2341 | _base_display_ioc_capabilities(ioc); | 2364 | _base_display_ioc_capabilities(ioc); |
2342 | 2365 | ||
2343 | /* | 2366 | /* |
@@ -2355,6 +2378,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) | |||
2355 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); | 2378 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); |
2356 | mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 2379 | mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
2357 | 2380 | ||
2381 | if (ioc->iounit_pg8.NumSensors) | ||
2382 | ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; | ||
2358 | } | 2383 | } |
2359 | 2384 | ||
2360 | /** | 2385 | /** |
@@ -2486,9 +2511,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
2486 | 2511 | ||
2487 | /* command line tunables for max sgl entries */ | 2512 | /* command line tunables for max sgl entries */ |
2488 | if (max_sgl_entries != -1) { | 2513 | if (max_sgl_entries != -1) { |
2489 | ioc->shost->sg_tablesize = (max_sgl_entries < | 2514 | ioc->shost->sg_tablesize = min_t(unsigned short, |
2490 | MPT2SAS_SG_DEPTH) ? max_sgl_entries : | 2515 | max_sgl_entries, SCSI_MAX_SG_CHAIN_SEGMENTS); |
2491 | MPT2SAS_SG_DEPTH; | 2516 | if (ioc->shost->sg_tablesize > MPT2SAS_SG_DEPTH) |
2517 | printk(MPT2SAS_WARN_FMT | ||
2518 | "sg_tablesize(%u) is bigger than kernel defined" | ||
2519 | " SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name, | ||
2520 | ioc->shost->sg_tablesize, MPT2SAS_SG_DEPTH); | ||
2492 | } else { | 2521 | } else { |
2493 | ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH; | 2522 | ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH; |
2494 | } | 2523 | } |
@@ -3236,7 +3265,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc, | |||
3236 | u16 smid; | 3265 | u16 smid; |
3237 | u32 ioc_state; | 3266 | u32 ioc_state; |
3238 | unsigned long timeleft; | 3267 | unsigned long timeleft; |
3239 | u8 issue_reset; | 3268 | bool issue_reset = false; |
3240 | int rc; | 3269 | int rc; |
3241 | void *request; | 3270 | void *request; |
3242 | u16 wait_state_count; | 3271 | u16 wait_state_count; |
@@ -3300,7 +3329,7 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc, | |||
3300 | _debug_dump_mf(mpi_request, | 3329 | _debug_dump_mf(mpi_request, |
3301 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); | 3330 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); |
3302 | if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) | 3331 | if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) |
3303 | issue_reset = 1; | 3332 | issue_reset = true; |
3304 | goto issue_host_reset; | 3333 | goto issue_host_reset; |
3305 | } | 3334 | } |
3306 | if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) | 3335 | if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) |
@@ -3341,7 +3370,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc, | |||
3341 | u16 smid; | 3370 | u16 smid; |
3342 | u32 ioc_state; | 3371 | u32 ioc_state; |
3343 | unsigned long timeleft; | 3372 | unsigned long timeleft; |
3344 | u8 issue_reset; | 3373 | bool issue_reset = false; |
3345 | int rc; | 3374 | int rc; |
3346 | void *request; | 3375 | void *request; |
3347 | u16 wait_state_count; | 3376 | u16 wait_state_count; |
@@ -3398,7 +3427,7 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc, | |||
3398 | _debug_dump_mf(mpi_request, | 3427 | _debug_dump_mf(mpi_request, |
3399 | sizeof(Mpi2SepRequest_t)/4); | 3428 | sizeof(Mpi2SepRequest_t)/4); |
3400 | if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) | 3429 | if (!(ioc->base_cmds.status & MPT2_CMD_RESET)) |
3401 | issue_reset = 1; | 3430 | issue_reset = true; |
3402 | goto issue_host_reset; | 3431 | goto issue_host_reset; |
3403 | } | 3432 | } |
3404 | if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) | 3433 | if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID) |
@@ -4594,6 +4623,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
4594 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); | 4623 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); |
4595 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); | 4624 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); |
4596 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); | 4625 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); |
4626 | _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); | ||
4597 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); | 4627 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); |
4598 | if (r) | 4628 | if (r) |
4599 | goto out_free_resources; | 4629 | goto out_free_resources; |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 239f169b0673..caff8d10cca4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.h | 5 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.h |
6 | * Copyright (C) 2007-2014 LSI Corporation | 6 | * Copyright (C) 2007-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 20013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
@@ -67,10 +68,10 @@ | |||
67 | 68 | ||
68 | /* driver versioning info */ | 69 | /* driver versioning info */ |
69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" | 70 | #define MPT2SAS_DRIVER_NAME "mpt2sas" |
70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 71 | #define MPT2SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" |
71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" | 72 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" |
72 | #define MPT2SAS_DRIVER_VERSION "18.100.00.00" | 73 | #define MPT2SAS_DRIVER_VERSION "20.100.00.00" |
73 | #define MPT2SAS_MAJOR_VERSION 18 | 74 | #define MPT2SAS_MAJOR_VERSION 20 |
74 | #define MPT2SAS_MINOR_VERSION 100 | 75 | #define MPT2SAS_MINOR_VERSION 100 |
75 | #define MPT2SAS_BUILD_VERSION 00 | 76 | #define MPT2SAS_BUILD_VERSION 00 |
76 | #define MPT2SAS_RELEASE_VERSION 00 | 77 | #define MPT2SAS_RELEASE_VERSION 00 |
@@ -586,6 +587,7 @@ struct adapter_reply_queue { | |||
586 | Mpi2ReplyDescriptorsUnion_t *reply_post_free; | 587 | Mpi2ReplyDescriptorsUnion_t *reply_post_free; |
587 | char name[MPT_NAME_LENGTH]; | 588 | char name[MPT_NAME_LENGTH]; |
588 | atomic_t busy; | 589 | atomic_t busy; |
590 | cpumask_var_t affinity_hint; | ||
589 | struct list_head list; | 591 | struct list_head list; |
590 | }; | 592 | }; |
591 | 593 | ||
@@ -725,6 +727,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc); | |||
725 | * @ioc_pg8: static ioc page 8 | 727 | * @ioc_pg8: static ioc page 8 |
726 | * @iounit_pg0: static iounit page 0 | 728 | * @iounit_pg0: static iounit page 0 |
727 | * @iounit_pg1: static iounit page 1 | 729 | * @iounit_pg1: static iounit page 1 |
730 | * @iounit_pg8: static iounit page 8 | ||
728 | * @sas_hba: sas host object | 731 | * @sas_hba: sas host object |
729 | * @sas_expander_list: expander object list | 732 | * @sas_expander_list: expander object list |
730 | * @sas_node_lock: | 733 | * @sas_node_lock: |
@@ -795,6 +798,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc); | |||
795 | * @reply_post_host_index: head index in the pool where FW completes IO | 798 | * @reply_post_host_index: head index in the pool where FW completes IO |
796 | * @delayed_tr_list: target reset link list | 799 | * @delayed_tr_list: target reset link list |
797 | * @delayed_tr_volume_list: volume target reset link list | 800 | * @delayed_tr_volume_list: volume target reset link list |
801 | * @@temp_sensors_count: flag to carry the number of temperature sensors | ||
798 | */ | 802 | */ |
799 | struct MPT2SAS_ADAPTER { | 803 | struct MPT2SAS_ADAPTER { |
800 | struct list_head list; | 804 | struct list_head list; |
@@ -892,6 +896,7 @@ struct MPT2SAS_ADAPTER { | |||
892 | Mpi2IOCPage8_t ioc_pg8; | 896 | Mpi2IOCPage8_t ioc_pg8; |
893 | Mpi2IOUnitPage0_t iounit_pg0; | 897 | Mpi2IOUnitPage0_t iounit_pg0; |
894 | Mpi2IOUnitPage1_t iounit_pg1; | 898 | Mpi2IOUnitPage1_t iounit_pg1; |
899 | Mpi2IOUnitPage8_t iounit_pg8; | ||
895 | 900 | ||
896 | struct _boot_device req_boot_device; | 901 | struct _boot_device req_boot_device; |
897 | struct _boot_device req_alt_boot_device; | 902 | struct _boot_device req_alt_boot_device; |
@@ -992,6 +997,7 @@ struct MPT2SAS_ADAPTER { | |||
992 | 997 | ||
993 | struct list_head delayed_tr_list; | 998 | struct list_head delayed_tr_list; |
994 | struct list_head delayed_tr_volume_list; | 999 | struct list_head delayed_tr_volume_list; |
1000 | u8 temp_sensors_count; | ||
995 | 1001 | ||
996 | /* diag buffer support */ | 1002 | /* diag buffer support */ |
997 | u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; | 1003 | u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; |
@@ -1120,6 +1126,8 @@ int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1120 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 1126 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
1121 | int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1127 | int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1122 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 1128 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
1129 | int mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc, | ||
1130 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page); | ||
1123 | int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc, | 1131 | int mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc, |
1124 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); | 1132 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); |
1125 | int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1133 | int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index c72a2fff5dbb..c43815b1a485 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.c | 4 | * This code is based on drivers/scsi/mpt2sas/mpt2_base.c |
5 | * Copyright (C) 2007-2014 LSI Corporation | 5 | * Copyright (C) 2007-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 20013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -719,6 +720,42 @@ mpt2sas_config_get_iounit_pg3(struct MPT2SAS_ADAPTER *ioc, | |||
719 | } | 720 | } |
720 | 721 | ||
721 | /** | 722 | /** |
723 | * mpt2sas_config_get_iounit_pg8 - obtain iounit page 8 | ||
724 | * @ioc: per adapter object | ||
725 | * @mpi_reply: reply mf payload returned from firmware | ||
726 | * @config_page: contents of the config page | ||
727 | * Context: sleep. | ||
728 | * | ||
729 | * Returns 0 for success, non-zero for failure. | ||
730 | */ | ||
731 | int | ||
732 | mpt2sas_config_get_iounit_pg8(struct MPT2SAS_ADAPTER *ioc, | ||
733 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page) | ||
734 | { | ||
735 | Mpi2ConfigRequest_t mpi_request; | ||
736 | int r; | ||
737 | |||
738 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | ||
739 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | ||
740 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; | ||
741 | mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; | ||
742 | mpi_request.Header.PageNumber = 8; | ||
743 | mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; | ||
744 | mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE); | ||
745 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
746 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); | ||
747 | if (r) | ||
748 | goto out; | ||
749 | |||
750 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; | ||
751 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
752 | MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, | ||
753 | sizeof(*config_page)); | ||
754 | out: | ||
755 | return r; | ||
756 | } | ||
757 | |||
758 | /** | ||
722 | * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8 | 759 | * mpt2sas_config_get_ioc_pg8 - obtain ioc page 8 |
723 | * @ioc: per adapter object | 760 | * @ioc: per adapter object |
724 | * @mpi_reply: reply mf payload returned from firmware | 761 | * @mpi_reply: reply mf payload returned from firmware |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index ca4e563c01dd..4e509604b571 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c | 5 | * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c |
6 | * Copyright (C) 2007-2014 LSI Corporation | 6 | * Copyright (C) 2007-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 20013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h index 7f842c88abd2..46b2fc5b74af 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h | 5 | * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h |
6 | * Copyright (C) 2007-2014 LSI Corporation | 6 | * Copyright (C) 2007-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 20013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h index cc57ef31d0fe..277120d45648 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_debug.h +++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c | 4 | * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c |
5 | * Copyright (C) 2007-2014 LSI Corporation | 5 | * Copyright (C) 2007-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 20013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 6a1c036a6f3f..3f26147bbc64 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c | 4 | * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c |
5 | * Copyright (C) 2007-2014 LSI Corporation | 5 | * Copyright (C) 2007-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 20013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -2729,9 +2730,18 @@ _scsih_host_reset(struct scsi_cmnd *scmd) | |||
2729 | ioc->name, scmd); | 2730 | ioc->name, scmd); |
2730 | scsi_print_command(scmd); | 2731 | scsi_print_command(scmd); |
2731 | 2732 | ||
2733 | if (ioc->is_driver_loading) { | ||
2734 | printk(MPT2SAS_INFO_FMT "Blocking the host reset\n", | ||
2735 | ioc->name); | ||
2736 | r = FAILED; | ||
2737 | goto out; | ||
2738 | } | ||
2739 | |||
2732 | retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, | 2740 | retval = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, |
2733 | FORCE_BIG_HAMMER); | 2741 | FORCE_BIG_HAMMER); |
2734 | r = (retval < 0) ? FAILED : SUCCESS; | 2742 | r = (retval < 0) ? FAILED : SUCCESS; |
2743 | |||
2744 | out: | ||
2735 | printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n", | 2745 | printk(MPT2SAS_INFO_FMT "host reset: %s scmd(%p)\n", |
2736 | ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); | 2746 | ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); |
2737 | 2747 | ||
@@ -3647,6 +3657,31 @@ _scsih_check_volume_delete_events(struct MPT2SAS_ADAPTER *ioc, | |||
3647 | } | 3657 | } |
3648 | 3658 | ||
3649 | /** | 3659 | /** |
3660 | * _scsih_temp_threshold_events - display temperature threshold exceeded events | ||
3661 | * @ioc: per adapter object | ||
3662 | * @event_data: the temp threshold event data | ||
3663 | * Context: interrupt time. | ||
3664 | * | ||
3665 | * Return nothing. | ||
3666 | */ | ||
3667 | static void | ||
3668 | _scsih_temp_threshold_events(struct MPT2SAS_ADAPTER *ioc, | ||
3669 | Mpi2EventDataTemperature_t *event_data) | ||
3670 | { | ||
3671 | if (ioc->temp_sensors_count >= event_data->SensorNum) { | ||
3672 | printk(MPT2SAS_ERR_FMT "Temperature Threshold flags %s%s%s%s" | ||
3673 | " exceeded for Sensor: %d !!!\n", ioc->name, | ||
3674 | ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ", | ||
3675 | ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ", | ||
3676 | ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ", | ||
3677 | ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ", | ||
3678 | event_data->SensorNum); | ||
3679 | printk(MPT2SAS_ERR_FMT "Current Temp In Celsius: %d\n", | ||
3680 | ioc->name, event_data->CurrentTemperature); | ||
3681 | } | ||
3682 | } | ||
3683 | |||
3684 | /** | ||
3650 | * _scsih_flush_running_cmds - completing outstanding commands. | 3685 | * _scsih_flush_running_cmds - completing outstanding commands. |
3651 | * @ioc: per adapter object | 3686 | * @ioc: per adapter object |
3652 | * | 3687 | * |
@@ -4509,6 +4544,10 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
4509 | scmd->result = DID_TRANSPORT_DISRUPTED << 16; | 4544 | scmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4510 | goto out; | 4545 | goto out; |
4511 | } | 4546 | } |
4547 | if (log_info == 0x32010081) { | ||
4548 | scmd->result = DID_RESET << 16; | ||
4549 | break; | ||
4550 | } | ||
4512 | scmd->result = DID_SOFT_ERROR << 16; | 4551 | scmd->result = DID_SOFT_ERROR << 16; |
4513 | break; | 4552 | break; |
4514 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: | 4553 | case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: |
@@ -7557,6 +7596,12 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, | |||
7557 | case MPI2_EVENT_IR_PHYSICAL_DISK: | 7596 | case MPI2_EVENT_IR_PHYSICAL_DISK: |
7558 | break; | 7597 | break; |
7559 | 7598 | ||
7599 | case MPI2_EVENT_TEMP_THRESHOLD: | ||
7600 | _scsih_temp_threshold_events(ioc, | ||
7601 | (Mpi2EventDataTemperature_t *) | ||
7602 | mpi_reply->EventData); | ||
7603 | break; | ||
7604 | |||
7560 | default: /* ignore the rest */ | 7605 | default: /* ignore the rest */ |
7561 | return; | 7606 | return; |
7562 | } | 7607 | } |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index e689bf20a3ea..ff2500ab9ba4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c | 4 | * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c |
5 | * Copyright (C) 2007-2014 LSI Corporation | 5 | * Copyright (C) 2007-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 20013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1560115079c7..14a781b6b88d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
@@ -619,6 +620,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, | |||
619 | case MPI2_EVENT_LOG_ENTRY_ADDED: | 620 | case MPI2_EVENT_LOG_ENTRY_ADDED: |
620 | desc = "Log Entry Added"; | 621 | desc = "Log Entry Added"; |
621 | break; | 622 | break; |
623 | case MPI2_EVENT_TEMP_THRESHOLD: | ||
624 | desc = "Temperature Threshold"; | ||
625 | break; | ||
622 | } | 626 | } |
623 | 627 | ||
624 | if (!desc) | 628 | if (!desc) |
@@ -1580,6 +1584,8 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc) | |||
1580 | 1584 | ||
1581 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { | 1585 | list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { |
1582 | list_del(&reply_q->list); | 1586 | list_del(&reply_q->list); |
1587 | irq_set_affinity_hint(reply_q->vector, NULL); | ||
1588 | free_cpumask_var(reply_q->affinity_hint); | ||
1583 | synchronize_irq(reply_q->vector); | 1589 | synchronize_irq(reply_q->vector); |
1584 | free_irq(reply_q->vector, reply_q); | 1590 | free_irq(reply_q->vector, reply_q); |
1585 | kfree(reply_q); | 1591 | kfree(reply_q); |
@@ -1609,6 +1615,11 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) | |||
1609 | reply_q->ioc = ioc; | 1615 | reply_q->ioc = ioc; |
1610 | reply_q->msix_index = index; | 1616 | reply_q->msix_index = index; |
1611 | reply_q->vector = vector; | 1617 | reply_q->vector = vector; |
1618 | |||
1619 | if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) | ||
1620 | return -ENOMEM; | ||
1621 | cpumask_clear(reply_q->affinity_hint); | ||
1622 | |||
1612 | atomic_set(&reply_q->busy, 0); | 1623 | atomic_set(&reply_q->busy, 0); |
1613 | if (ioc->msix_enable) | 1624 | if (ioc->msix_enable) |
1614 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", | 1625 | snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", |
@@ -1643,6 +1654,7 @@ static void | |||
1643 | _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) | 1654 | _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) |
1644 | { | 1655 | { |
1645 | unsigned int cpu, nr_cpus, nr_msix, index = 0; | 1656 | unsigned int cpu, nr_cpus, nr_msix, index = 0; |
1657 | struct adapter_reply_queue *reply_q; | ||
1646 | 1658 | ||
1647 | if (!_base_is_controller_msix_enabled(ioc)) | 1659 | if (!_base_is_controller_msix_enabled(ioc)) |
1648 | return; | 1660 | return; |
@@ -1657,20 +1669,30 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) | |||
1657 | 1669 | ||
1658 | cpu = cpumask_first(cpu_online_mask); | 1670 | cpu = cpumask_first(cpu_online_mask); |
1659 | 1671 | ||
1660 | do { | 1672 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { |
1673 | |||
1661 | unsigned int i, group = nr_cpus / nr_msix; | 1674 | unsigned int i, group = nr_cpus / nr_msix; |
1662 | 1675 | ||
1676 | if (cpu >= nr_cpus) | ||
1677 | break; | ||
1678 | |||
1663 | if (index < nr_cpus % nr_msix) | 1679 | if (index < nr_cpus % nr_msix) |
1664 | group++; | 1680 | group++; |
1665 | 1681 | ||
1666 | for (i = 0 ; i < group ; i++) { | 1682 | for (i = 0 ; i < group ; i++) { |
1667 | ioc->cpu_msix_table[cpu] = index; | 1683 | ioc->cpu_msix_table[cpu] = index; |
1684 | cpumask_or(reply_q->affinity_hint, | ||
1685 | reply_q->affinity_hint, get_cpu_mask(cpu)); | ||
1668 | cpu = cpumask_next(cpu, cpu_online_mask); | 1686 | cpu = cpumask_next(cpu, cpu_online_mask); |
1669 | } | 1687 | } |
1670 | 1688 | ||
1689 | if (irq_set_affinity_hint(reply_q->vector, | ||
1690 | reply_q->affinity_hint)) | ||
1691 | dinitprintk(ioc, pr_info(MPT3SAS_FMT | ||
1692 | "error setting affinity hint for irq vector %d\n", | ||
1693 | ioc->name, reply_q->vector)); | ||
1671 | index++; | 1694 | index++; |
1672 | 1695 | } | |
1673 | } while (cpu < nr_cpus); | ||
1674 | } | 1696 | } |
1675 | 1697 | ||
1676 | /** | 1698 | /** |
@@ -2500,6 +2522,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) | |||
2500 | mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); | 2522 | mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); |
2501 | mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); | 2523 | mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); |
2502 | mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 2524 | mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
2525 | mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); | ||
2503 | _base_display_ioc_capabilities(ioc); | 2526 | _base_display_ioc_capabilities(ioc); |
2504 | 2527 | ||
2505 | /* | 2528 | /* |
@@ -2516,6 +2539,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) | |||
2516 | MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; | 2539 | MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; |
2517 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); | 2540 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); |
2518 | mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 2541 | mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
2542 | |||
2543 | if (ioc->iounit_pg8.NumSensors) | ||
2544 | ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; | ||
2519 | } | 2545 | } |
2520 | 2546 | ||
2521 | /** | 2547 | /** |
@@ -2659,8 +2685,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) | |||
2659 | 2685 | ||
2660 | if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS) | 2686 | if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS) |
2661 | sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS; | 2687 | sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS; |
2662 | else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) | 2688 | else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) { |
2663 | sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS; | 2689 | sg_tablesize = min_t(unsigned short, sg_tablesize, |
2690 | SCSI_MAX_SG_CHAIN_SEGMENTS); | ||
2691 | pr_warn(MPT3SAS_FMT | ||
2692 | "sg_tablesize(%u) is bigger than kernel" | ||
2693 | " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name, | ||
2694 | sg_tablesize, MPT3SAS_MAX_PHYS_SEGMENTS); | ||
2695 | } | ||
2664 | ioc->shost->sg_tablesize = sg_tablesize; | 2696 | ioc->shost->sg_tablesize = sg_tablesize; |
2665 | 2697 | ||
2666 | ioc->hi_priority_depth = facts->HighPriorityCredit; | 2698 | ioc->hi_priority_depth = facts->HighPriorityCredit; |
@@ -3419,7 +3451,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | |||
3419 | u16 smid; | 3451 | u16 smid; |
3420 | u32 ioc_state; | 3452 | u32 ioc_state; |
3421 | unsigned long timeleft; | 3453 | unsigned long timeleft; |
3422 | u8 issue_reset; | 3454 | bool issue_reset = false; |
3423 | int rc; | 3455 | int rc; |
3424 | void *request; | 3456 | void *request; |
3425 | u16 wait_state_count; | 3457 | u16 wait_state_count; |
@@ -3483,7 +3515,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | |||
3483 | _debug_dump_mf(mpi_request, | 3515 | _debug_dump_mf(mpi_request, |
3484 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); | 3516 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); |
3485 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) | 3517 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) |
3486 | issue_reset = 1; | 3518 | issue_reset = true; |
3487 | goto issue_host_reset; | 3519 | goto issue_host_reset; |
3488 | } | 3520 | } |
3489 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) | 3521 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) |
@@ -3523,7 +3555,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | |||
3523 | u16 smid; | 3555 | u16 smid; |
3524 | u32 ioc_state; | 3556 | u32 ioc_state; |
3525 | unsigned long timeleft; | 3557 | unsigned long timeleft; |
3526 | u8 issue_reset; | 3558 | bool issue_reset = false; |
3527 | int rc; | 3559 | int rc; |
3528 | void *request; | 3560 | void *request; |
3529 | u16 wait_state_count; | 3561 | u16 wait_state_count; |
@@ -3581,7 +3613,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | |||
3581 | _debug_dump_mf(mpi_request, | 3613 | _debug_dump_mf(mpi_request, |
3582 | sizeof(Mpi2SepRequest_t)/4); | 3614 | sizeof(Mpi2SepRequest_t)/4); |
3583 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) | 3615 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) |
3584 | issue_reset = 1; | 3616 | issue_reset = false; |
3585 | goto issue_host_reset; | 3617 | goto issue_host_reset; |
3586 | } | 3618 | } |
3587 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) | 3619 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) |
@@ -4720,6 +4752,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | |||
4720 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); | 4752 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); |
4721 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); | 4753 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); |
4722 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); | 4754 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); |
4755 | _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); | ||
4723 | 4756 | ||
4724 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); | 4757 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); |
4725 | if (r) | 4758 | if (r) |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 40926aa9b24d..afa881682bef 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
@@ -68,7 +69,7 @@ | |||
68 | 69 | ||
69 | /* driver versioning info */ | 70 | /* driver versioning info */ |
70 | #define MPT3SAS_DRIVER_NAME "mpt3sas" | 71 | #define MPT3SAS_DRIVER_NAME "mpt3sas" |
71 | #define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 72 | #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" |
72 | #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" | 73 | #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" |
73 | #define MPT3SAS_DRIVER_VERSION "04.100.00.00" | 74 | #define MPT3SAS_DRIVER_VERSION "04.100.00.00" |
74 | #define MPT3SAS_MAJOR_VERSION 4 | 75 | #define MPT3SAS_MAJOR_VERSION 4 |
@@ -506,6 +507,7 @@ struct adapter_reply_queue { | |||
506 | Mpi2ReplyDescriptorsUnion_t *reply_post_free; | 507 | Mpi2ReplyDescriptorsUnion_t *reply_post_free; |
507 | char name[MPT_NAME_LENGTH]; | 508 | char name[MPT_NAME_LENGTH]; |
508 | atomic_t busy; | 509 | atomic_t busy; |
510 | cpumask_var_t affinity_hint; | ||
509 | struct list_head list; | 511 | struct list_head list; |
510 | }; | 512 | }; |
511 | 513 | ||
@@ -659,6 +661,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); | |||
659 | * @ioc_pg8: static ioc page 8 | 661 | * @ioc_pg8: static ioc page 8 |
660 | * @iounit_pg0: static iounit page 0 | 662 | * @iounit_pg0: static iounit page 0 |
661 | * @iounit_pg1: static iounit page 1 | 663 | * @iounit_pg1: static iounit page 1 |
664 | * @iounit_pg8: static iounit page 8 | ||
662 | * @sas_hba: sas host object | 665 | * @sas_hba: sas host object |
663 | * @sas_expander_list: expander object list | 666 | * @sas_expander_list: expander object list |
664 | * @sas_node_lock: | 667 | * @sas_node_lock: |
@@ -728,6 +731,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); | |||
728 | * @reply_post_host_index: head index in the pool where FW completes IO | 731 | * @reply_post_host_index: head index in the pool where FW completes IO |
729 | * @delayed_tr_list: target reset link list | 732 | * @delayed_tr_list: target reset link list |
730 | * @delayed_tr_volume_list: volume target reset link list | 733 | * @delayed_tr_volume_list: volume target reset link list |
734 | * @@temp_sensors_count: flag to carry the number of temperature sensors | ||
731 | */ | 735 | */ |
732 | struct MPT3SAS_ADAPTER { | 736 | struct MPT3SAS_ADAPTER { |
733 | struct list_head list; | 737 | struct list_head list; |
@@ -834,6 +838,7 @@ struct MPT3SAS_ADAPTER { | |||
834 | Mpi2IOCPage8_t ioc_pg8; | 838 | Mpi2IOCPage8_t ioc_pg8; |
835 | Mpi2IOUnitPage0_t iounit_pg0; | 839 | Mpi2IOUnitPage0_t iounit_pg0; |
836 | Mpi2IOUnitPage1_t iounit_pg1; | 840 | Mpi2IOUnitPage1_t iounit_pg1; |
841 | Mpi2IOUnitPage8_t iounit_pg8; | ||
837 | 842 | ||
838 | struct _boot_device req_boot_device; | 843 | struct _boot_device req_boot_device; |
839 | struct _boot_device req_alt_boot_device; | 844 | struct _boot_device req_alt_boot_device; |
@@ -934,6 +939,7 @@ struct MPT3SAS_ADAPTER { | |||
934 | 939 | ||
935 | struct list_head delayed_tr_list; | 940 | struct list_head delayed_tr_list; |
936 | struct list_head delayed_tr_volume_list; | 941 | struct list_head delayed_tr_volume_list; |
942 | u8 temp_sensors_count; | ||
937 | 943 | ||
938 | /* diag buffer support */ | 944 | /* diag buffer support */ |
939 | u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; | 945 | u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; |
@@ -1082,6 +1088,8 @@ int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1082 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 1088 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
1083 | int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1089 | int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
1084 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); | 1090 | *mpi_reply, Mpi2IOUnitPage1_t *config_page); |
1091 | int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | ||
1092 | *mpi_reply, Mpi2IOUnitPage8_t *config_page); | ||
1085 | int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | 1093 | int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, |
1086 | Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, | 1094 | Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, |
1087 | u16 sz); | 1095 | u16 sz); |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 4472c2af9255..e45c4613ef0c 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c | 4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c |
5 | * Copyright (C) 2012-2014 LSI Corporation | 5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -871,6 +872,42 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | |||
871 | } | 872 | } |
872 | 873 | ||
873 | /** | 874 | /** |
875 | * mpt3sas_config_get_iounit_pg8 - obtain iounit page 8 | ||
876 | * @ioc: per adapter object | ||
877 | * @mpi_reply: reply mf payload returned from firmware | ||
878 | * @config_page: contents of the config page | ||
879 | * Context: sleep. | ||
880 | * | ||
881 | * Returns 0 for success, non-zero for failure. | ||
882 | */ | ||
883 | int | ||
884 | mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, | ||
885 | Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page) | ||
886 | { | ||
887 | Mpi2ConfigRequest_t mpi_request; | ||
888 | int r; | ||
889 | |||
890 | memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); | ||
891 | mpi_request.Function = MPI2_FUNCTION_CONFIG; | ||
892 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; | ||
893 | mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; | ||
894 | mpi_request.Header.PageNumber = 8; | ||
895 | mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; | ||
896 | ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); | ||
897 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
898 | MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); | ||
899 | if (r) | ||
900 | goto out; | ||
901 | |||
902 | mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; | ||
903 | r = _config_request(ioc, &mpi_request, mpi_reply, | ||
904 | MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, | ||
905 | sizeof(*config_page)); | ||
906 | out: | ||
907 | return r; | ||
908 | } | ||
909 | |||
910 | /** | ||
874 | * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 | 911 | * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 |
875 | * @ioc: per adapter object | 912 | * @ioc: per adapter object |
876 | * @mpi_reply: reply mf payload returned from firmware | 913 | * @mpi_reply: reply mf payload returned from firmware |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index dca14877d5ab..080c8a76d23d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index 5f3d7fd7c2f8..aee99ce67e54 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h index 4778e7dd98bd..4e8a63fdb304 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_debug.h +++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c | 4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c |
5 | * Copyright (C) 2012-2014 LSI Corporation | 5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 94261ee9e72d..5a97e3286719 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c |
5 | * Copyright (C) 2012-2014 LSI Corporation | 5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
@@ -2392,9 +2393,17 @@ _scsih_host_reset(struct scsi_cmnd *scmd) | |||
2392 | ioc->name, scmd); | 2393 | ioc->name, scmd); |
2393 | scsi_print_command(scmd); | 2394 | scsi_print_command(scmd); |
2394 | 2395 | ||
2396 | if (ioc->is_driver_loading) { | ||
2397 | pr_info(MPT3SAS_FMT "Blocking the host reset\n", | ||
2398 | ioc->name); | ||
2399 | r = FAILED; | ||
2400 | goto out; | ||
2401 | } | ||
2402 | |||
2395 | retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, | 2403 | retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, |
2396 | FORCE_BIG_HAMMER); | 2404 | FORCE_BIG_HAMMER); |
2397 | r = (retval < 0) ? FAILED : SUCCESS; | 2405 | r = (retval < 0) ? FAILED : SUCCESS; |
2406 | out: | ||
2398 | pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", | 2407 | pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", |
2399 | ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); | 2408 | ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); |
2400 | 2409 | ||
@@ -3342,6 +3351,31 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, | |||
3342 | } | 3351 | } |
3343 | 3352 | ||
3344 | /** | 3353 | /** |
3354 | * _scsih_temp_threshold_events - display temperature threshold exceeded events | ||
3355 | * @ioc: per adapter object | ||
3356 | * @event_data: the temp threshold event data | ||
3357 | * Context: interrupt time. | ||
3358 | * | ||
3359 | * Return nothing. | ||
3360 | */ | ||
3361 | static void | ||
3362 | _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, | ||
3363 | Mpi2EventDataTemperature_t *event_data) | ||
3364 | { | ||
3365 | if (ioc->temp_sensors_count >= event_data->SensorNum) { | ||
3366 | pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s" | ||
3367 | " exceeded for Sensor: %d !!!\n", ioc->name, | ||
3368 | ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ", | ||
3369 | ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ", | ||
3370 | ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ", | ||
3371 | ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ", | ||
3372 | event_data->SensorNum); | ||
3373 | pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n", | ||
3374 | ioc->name, event_data->CurrentTemperature); | ||
3375 | } | ||
3376 | } | ||
3377 | |||
3378 | /** | ||
3345 | * _scsih_flush_running_cmds - completing outstanding commands. | 3379 | * _scsih_flush_running_cmds - completing outstanding commands. |
3346 | * @ioc: per adapter object | 3380 | * @ioc: per adapter object |
3347 | * | 3381 | * |
@@ -7194,6 +7228,12 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
7194 | case MPI2_EVENT_IR_PHYSICAL_DISK: | 7228 | case MPI2_EVENT_IR_PHYSICAL_DISK: |
7195 | break; | 7229 | break; |
7196 | 7230 | ||
7231 | case MPI2_EVENT_TEMP_THRESHOLD: | ||
7232 | _scsih_temp_threshold_events(ioc, | ||
7233 | (Mpi2EventDataTemperature_t *) | ||
7234 | mpi_reply->EventData); | ||
7235 | break; | ||
7236 | |||
7197 | default: /* ignore the rest */ | 7237 | default: /* ignore the rest */ |
7198 | return 1; | 7238 | return 1; |
7199 | } | 7239 | } |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 3637ae6c0171..efb98afc46e0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c | |||
@@ -3,7 +3,8 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c | 4 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c |
5 | * Copyright (C) 2012-2014 LSI Corporation | 5 | * Copyright (C) 2012-2014 LSI Corporation |
6 | * (mailto:DL-MPTFusionLinux@lsi.com) | 6 | * Copyright (C) 2013-2014 Avago Technologies |
7 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c index 8a2dd113f401..b60fd7a3b571 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * | 4 | * |
5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c | 5 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c |
6 | * Copyright (C) 2012-2014 LSI Corporation | 6 | * Copyright (C) 2012-2014 LSI Corporation |
7 | * (mailto:DL-MPTFusionLinux@lsi.com) | 7 | * Copyright (C) 2013-2014 Avago Technologies |
8 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or | 10 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 11 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h index f681db56c53b..6586a463bea9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h | |||
@@ -5,7 +5,8 @@ | |||
5 | * | 5 | * |
6 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h | 6 | * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h |
7 | * Copyright (C) 2012-2014 LSI Corporation | 7 | * Copyright (C) 2012-2014 LSI Corporation |
8 | * (mailto:DL-MPTFusionLinux@lsi.com) | 8 | * Copyright (C) 2013-2014 Avago Technologies |
9 | * (mailto: MPT-FusionLinux.pdl@avagotech.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 90abb03c9074..c6077cefbeca 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c | |||
@@ -1441,8 +1441,6 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id) | |||
1441 | return IRQ_RETVAL(handled); | 1441 | return IRQ_RETVAL(handled); |
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | #undef SPRINTF | ||
1445 | #define SPRINTF(args...) seq_printf(m, ##args) | ||
1446 | 1444 | ||
1447 | static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) | 1445 | static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) |
1448 | { | 1446 | { |
@@ -1458,64 +1456,63 @@ static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
1458 | data = (nsp32_hw_data *)host->hostdata; | 1456 | data = (nsp32_hw_data *)host->hostdata; |
1459 | base = host->io_port; | 1457 | base = host->io_port; |
1460 | 1458 | ||
1461 | SPRINTF("NinjaSCSI-32 status\n\n"); | 1459 | seq_puts(m, "NinjaSCSI-32 status\n\n"); |
1462 | SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version); | 1460 | seq_printf(m, "Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version); |
1463 | SPRINTF("SCSI host No.: %d\n", hostno); | 1461 | seq_printf(m, "SCSI host No.: %d\n", hostno); |
1464 | SPRINTF("IRQ: %d\n", host->irq); | 1462 | seq_printf(m, "IRQ: %d\n", host->irq); |
1465 | SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); | 1463 | seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); |
1466 | SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); | 1464 | seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); |
1467 | SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); | 1465 | seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize); |
1468 | SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff); | 1466 | seq_printf(m, "Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff); |
1469 | 1467 | ||
1470 | mode_reg = nsp32_index_read1(base, CHIP_MODE); | 1468 | mode_reg = nsp32_index_read1(base, CHIP_MODE); |
1471 | model = data->pci_devid->driver_data; | 1469 | model = data->pci_devid->driver_data; |
1472 | 1470 | ||
1473 | #ifdef CONFIG_PM | 1471 | #ifdef CONFIG_PM |
1474 | SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no"); | 1472 | seq_printf(m, "Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no"); |
1475 | #endif | 1473 | #endif |
1476 | SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]); | 1474 | seq_printf(m, "OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]); |
1477 | 1475 | ||
1478 | spin_lock_irqsave(&(data->Lock), flags); | 1476 | spin_lock_irqsave(&(data->Lock), flags); |
1479 | SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); | 1477 | seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC); |
1480 | spin_unlock_irqrestore(&(data->Lock), flags); | 1478 | spin_unlock_irqrestore(&(data->Lock), flags); |
1481 | 1479 | ||
1482 | 1480 | ||
1483 | SPRINTF("SDTR status\n"); | 1481 | seq_puts(m, "SDTR status\n"); |
1484 | for (id = 0; id < ARRAY_SIZE(data->target); id++) { | 1482 | for (id = 0; id < ARRAY_SIZE(data->target); id++) { |
1485 | 1483 | ||
1486 | SPRINTF("id %d: ", id); | 1484 | seq_printf(m, "id %d: ", id); |
1487 | 1485 | ||
1488 | if (id == host->this_id) { | 1486 | if (id == host->this_id) { |
1489 | SPRINTF("----- NinjaSCSI-32 host adapter\n"); | 1487 | seq_puts(m, "----- NinjaSCSI-32 host adapter\n"); |
1490 | continue; | 1488 | continue; |
1491 | } | 1489 | } |
1492 | 1490 | ||
1493 | if (data->target[id].sync_flag == SDTR_DONE) { | 1491 | if (data->target[id].sync_flag == SDTR_DONE) { |
1494 | if (data->target[id].period == 0 && | 1492 | if (data->target[id].period == 0 && |
1495 | data->target[id].offset == ASYNC_OFFSET ) { | 1493 | data->target[id].offset == ASYNC_OFFSET ) { |
1496 | SPRINTF("async"); | 1494 | seq_puts(m, "async"); |
1497 | } else { | 1495 | } else { |
1498 | SPRINTF(" sync"); | 1496 | seq_puts(m, " sync"); |
1499 | } | 1497 | } |
1500 | } else { | 1498 | } else { |
1501 | SPRINTF(" none"); | 1499 | seq_puts(m, " none"); |
1502 | } | 1500 | } |
1503 | 1501 | ||
1504 | if (data->target[id].period != 0) { | 1502 | if (data->target[id].period != 0) { |
1505 | 1503 | ||
1506 | speed = 1000000 / (data->target[id].period * 4); | 1504 | speed = 1000000 / (data->target[id].period * 4); |
1507 | 1505 | ||
1508 | SPRINTF(" transfer %d.%dMB/s, offset %d", | 1506 | seq_printf(m, " transfer %d.%dMB/s, offset %d", |
1509 | speed / 1000, | 1507 | speed / 1000, |
1510 | speed % 1000, | 1508 | speed % 1000, |
1511 | data->target[id].offset | 1509 | data->target[id].offset |
1512 | ); | 1510 | ); |
1513 | } | 1511 | } |
1514 | SPRINTF("\n"); | 1512 | seq_putc(m, '\n'); |
1515 | } | 1513 | } |
1516 | return 0; | 1514 | return 0; |
1517 | } | 1515 | } |
1518 | #undef SPRINTF | ||
1519 | 1516 | ||
1520 | 1517 | ||
1521 | 1518 | ||
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 34aad32829f5..1b6c8833a304 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c | |||
@@ -1364,9 +1364,6 @@ static const char *nsp_info(struct Scsi_Host *shpnt) | |||
1364 | return data->nspinfo; | 1364 | return data->nspinfo; |
1365 | } | 1365 | } |
1366 | 1366 | ||
1367 | #undef SPRINTF | ||
1368 | #define SPRINTF(args...) seq_printf(m, ##args) | ||
1369 | |||
1370 | static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) | 1367 | static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) |
1371 | { | 1368 | { |
1372 | int id; | 1369 | int id; |
@@ -1378,75 +1375,74 @@ static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
1378 | hostno = host->host_no; | 1375 | hostno = host->host_no; |
1379 | data = (nsp_hw_data *)host->hostdata; | 1376 | data = (nsp_hw_data *)host->hostdata; |
1380 | 1377 | ||
1381 | SPRINTF("NinjaSCSI status\n\n"); | 1378 | seq_puts(m, "NinjaSCSI status\n\n" |
1382 | SPRINTF("Driver version: $Revision: 1.23 $\n"); | 1379 | "Driver version: $Revision: 1.23 $\n"); |
1383 | SPRINTF("SCSI host No.: %d\n", hostno); | 1380 | seq_printf(m, "SCSI host No.: %d\n", hostno); |
1384 | SPRINTF("IRQ: %d\n", host->irq); | 1381 | seq_printf(m, "IRQ: %d\n", host->irq); |
1385 | SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); | 1382 | seq_printf(m, "IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); |
1386 | SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); | 1383 | seq_printf(m, "MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); |
1387 | SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); | 1384 | seq_printf(m, "sg_tablesize: %d\n", host->sg_tablesize); |
1388 | 1385 | ||
1389 | SPRINTF("burst transfer mode: "); | 1386 | seq_puts(m, "burst transfer mode: "); |
1390 | switch (nsp_burst_mode) { | 1387 | switch (nsp_burst_mode) { |
1391 | case BURST_IO8: | 1388 | case BURST_IO8: |
1392 | SPRINTF("io8"); | 1389 | seq_puts(m, "io8"); |
1393 | break; | 1390 | break; |
1394 | case BURST_IO32: | 1391 | case BURST_IO32: |
1395 | SPRINTF("io32"); | 1392 | seq_puts(m, "io32"); |
1396 | break; | 1393 | break; |
1397 | case BURST_MEM32: | 1394 | case BURST_MEM32: |
1398 | SPRINTF("mem32"); | 1395 | seq_puts(m, "mem32"); |
1399 | break; | 1396 | break; |
1400 | default: | 1397 | default: |
1401 | SPRINTF("???"); | 1398 | seq_puts(m, "???"); |
1402 | break; | 1399 | break; |
1403 | } | 1400 | } |
1404 | SPRINTF("\n"); | 1401 | seq_putc(m, '\n'); |
1405 | 1402 | ||
1406 | 1403 | ||
1407 | spin_lock_irqsave(&(data->Lock), flags); | 1404 | spin_lock_irqsave(&(data->Lock), flags); |
1408 | SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); | 1405 | seq_printf(m, "CurrentSC: 0x%p\n\n", data->CurrentSC); |
1409 | spin_unlock_irqrestore(&(data->Lock), flags); | 1406 | spin_unlock_irqrestore(&(data->Lock), flags); |
1410 | 1407 | ||
1411 | SPRINTF("SDTR status\n"); | 1408 | seq_puts(m, "SDTR status\n"); |
1412 | for(id = 0; id < ARRAY_SIZE(data->Sync); id++) { | 1409 | for(id = 0; id < ARRAY_SIZE(data->Sync); id++) { |
1413 | 1410 | ||
1414 | SPRINTF("id %d: ", id); | 1411 | seq_printf(m, "id %d: ", id); |
1415 | 1412 | ||
1416 | if (id == host->this_id) { | 1413 | if (id == host->this_id) { |
1417 | SPRINTF("----- NinjaSCSI-3 host adapter\n"); | 1414 | seq_puts(m, "----- NinjaSCSI-3 host adapter\n"); |
1418 | continue; | 1415 | continue; |
1419 | } | 1416 | } |
1420 | 1417 | ||
1421 | switch(data->Sync[id].SyncNegotiation) { | 1418 | switch(data->Sync[id].SyncNegotiation) { |
1422 | case SYNC_OK: | 1419 | case SYNC_OK: |
1423 | SPRINTF(" sync"); | 1420 | seq_puts(m, " sync"); |
1424 | break; | 1421 | break; |
1425 | case SYNC_NG: | 1422 | case SYNC_NG: |
1426 | SPRINTF("async"); | 1423 | seq_puts(m, "async"); |
1427 | break; | 1424 | break; |
1428 | case SYNC_NOT_YET: | 1425 | case SYNC_NOT_YET: |
1429 | SPRINTF(" none"); | 1426 | seq_puts(m, " none"); |
1430 | break; | 1427 | break; |
1431 | default: | 1428 | default: |
1432 | SPRINTF("?????"); | 1429 | seq_puts(m, "?????"); |
1433 | break; | 1430 | break; |
1434 | } | 1431 | } |
1435 | 1432 | ||
1436 | if (data->Sync[id].SyncPeriod != 0) { | 1433 | if (data->Sync[id].SyncPeriod != 0) { |
1437 | speed = 1000000 / (data->Sync[id].SyncPeriod * 4); | 1434 | speed = 1000000 / (data->Sync[id].SyncPeriod * 4); |
1438 | 1435 | ||
1439 | SPRINTF(" transfer %d.%dMB/s, offset %d", | 1436 | seq_printf(m, " transfer %d.%dMB/s, offset %d", |
1440 | speed / 1000, | 1437 | speed / 1000, |
1441 | speed % 1000, | 1438 | speed % 1000, |
1442 | data->Sync[id].SyncOffset | 1439 | data->Sync[id].SyncOffset |
1443 | ); | 1440 | ); |
1444 | } | 1441 | } |
1445 | SPRINTF("\n"); | 1442 | seq_putc(m, '\n'); |
1446 | } | 1443 | } |
1447 | return 0; | 1444 | return 0; |
1448 | } | 1445 | } |
1449 | #undef SPRINTF | ||
1450 | 1446 | ||
1451 | /*---------------------------------------------------------------*/ | 1447 | /*---------------------------------------------------------------*/ |
1452 | /* error handler */ | 1448 | /* error handler */ |
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 2ca39b8e7166..15cf074ffa3c 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
@@ -23,10 +23,10 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) | |||
23 | 23 | ||
24 | mutex_lock(&ha->fce_mutex); | 24 | mutex_lock(&ha->fce_mutex); |
25 | 25 | ||
26 | seq_printf(s, "FCE Trace Buffer\n"); | 26 | seq_puts(s, "FCE Trace Buffer\n"); |
27 | seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); | 27 | seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); |
28 | seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); | 28 | seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); |
29 | seq_printf(s, "FCE Enable Registers\n"); | 29 | seq_puts(s, "FCE Enable Registers\n"); |
30 | seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", | 30 | seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", |
31 | ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], | 31 | ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], |
32 | ha->fce_mb[5], ha->fce_mb[6]); | 32 | ha->fce_mb[5], ha->fce_mb[6]); |
@@ -38,11 +38,11 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) | |||
38 | seq_printf(s, "\n%llx: ", | 38 | seq_printf(s, "\n%llx: ", |
39 | (unsigned long long)((cnt * 4) + fce_start)); | 39 | (unsigned long long)((cnt * 4) + fce_start)); |
40 | else | 40 | else |
41 | seq_printf(s, " "); | 41 | seq_putc(s, ' '); |
42 | seq_printf(s, "%08x", *fce++); | 42 | seq_printf(s, "%08x", *fce++); |
43 | } | 43 | } |
44 | 44 | ||
45 | seq_printf(s, "\nEnd\n"); | 45 | seq_puts(s, "\nEnd\n"); |
46 | 46 | ||
47 | mutex_unlock(&ha->fce_mutex); | 47 | mutex_unlock(&ha->fce_mutex); |
48 | 48 | ||
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 9b3829931f40..c9c3b579eece 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -531,7 +531,7 @@ void scsi_log_send(struct scsi_cmnd *cmd) | |||
531 | * | 531 | * |
532 | * 3: same as 2 | 532 | * 3: same as 2 |
533 | * | 533 | * |
534 | * 4: same as 3 plus dump extra junk | 534 | * 4: same as 3 |
535 | */ | 535 | */ |
536 | if (unlikely(scsi_logging_level)) { | 536 | if (unlikely(scsi_logging_level)) { |
537 | level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, | 537 | level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, |
@@ -540,13 +540,6 @@ void scsi_log_send(struct scsi_cmnd *cmd) | |||
540 | scmd_printk(KERN_INFO, cmd, | 540 | scmd_printk(KERN_INFO, cmd, |
541 | "Send: scmd 0x%p\n", cmd); | 541 | "Send: scmd 0x%p\n", cmd); |
542 | scsi_print_command(cmd); | 542 | scsi_print_command(cmd); |
543 | if (level > 3) { | ||
544 | printk(KERN_INFO "buffer = 0x%p, bufflen = %d," | ||
545 | " queuecommand 0x%p\n", | ||
546 | scsi_sglist(cmd), scsi_bufflen(cmd), | ||
547 | cmd->device->host->hostt->queuecommand); | ||
548 | |||
549 | } | ||
550 | } | 543 | } |
551 | } | 544 | } |
552 | } | 545 | } |
@@ -572,7 +565,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
572 | SCSI_LOG_MLCOMPLETE_BITS); | 565 | SCSI_LOG_MLCOMPLETE_BITS); |
573 | if (((level > 0) && (cmd->result || disposition != SUCCESS)) || | 566 | if (((level > 0) && (cmd->result || disposition != SUCCESS)) || |
574 | (level > 1)) { | 567 | (level > 1)) { |
575 | scsi_print_result(cmd, "Done: ", disposition); | 568 | scsi_print_result(cmd, "Done", disposition); |
576 | scsi_print_command(cmd); | 569 | scsi_print_command(cmd); |
577 | if (status_byte(cmd->result) & CHECK_CONDITION) | 570 | if (status_byte(cmd->result) & CHECK_CONDITION) |
578 | scsi_print_sense(cmd); | 571 | scsi_print_sense(cmd); |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 4aca1b0378c2..113232135d27 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -80,6 +80,8 @@ static const char *scsi_debug_version_date = "20141022"; | |||
80 | #define INVALID_FIELD_IN_PARAM_LIST 0x26 | 80 | #define INVALID_FIELD_IN_PARAM_LIST 0x26 |
81 | #define UA_RESET_ASC 0x29 | 81 | #define UA_RESET_ASC 0x29 |
82 | #define UA_CHANGED_ASC 0x2a | 82 | #define UA_CHANGED_ASC 0x2a |
83 | #define TARGET_CHANGED_ASC 0x3f | ||
84 | #define LUNS_CHANGED_ASCQ 0x0e | ||
83 | #define INSUFF_RES_ASC 0x55 | 85 | #define INSUFF_RES_ASC 0x55 |
84 | #define INSUFF_RES_ASCQ 0x3 | 86 | #define INSUFF_RES_ASCQ 0x3 |
85 | #define POWER_ON_RESET_ASCQ 0x0 | 87 | #define POWER_ON_RESET_ASCQ 0x0 |
@@ -91,6 +93,8 @@ static const char *scsi_debug_version_date = "20141022"; | |||
91 | #define THRESHOLD_EXCEEDED 0x5d | 93 | #define THRESHOLD_EXCEEDED 0x5d |
92 | #define LOW_POWER_COND_ON 0x5e | 94 | #define LOW_POWER_COND_ON 0x5e |
93 | #define MISCOMPARE_VERIFY_ASC 0x1d | 95 | #define MISCOMPARE_VERIFY_ASC 0x1d |
96 | #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */ | ||
97 | #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16 | ||
94 | 98 | ||
95 | /* Additional Sense Code Qualifier (ASCQ) */ | 99 | /* Additional Sense Code Qualifier (ASCQ) */ |
96 | #define ACK_NAK_TO 0x3 | 100 | #define ACK_NAK_TO 0x3 |
@@ -180,7 +184,10 @@ static const char *scsi_debug_version_date = "20141022"; | |||
180 | #define SDEBUG_UA_BUS_RESET 1 | 184 | #define SDEBUG_UA_BUS_RESET 1 |
181 | #define SDEBUG_UA_MODE_CHANGED 2 | 185 | #define SDEBUG_UA_MODE_CHANGED 2 |
182 | #define SDEBUG_UA_CAPACITY_CHANGED 3 | 186 | #define SDEBUG_UA_CAPACITY_CHANGED 3 |
183 | #define SDEBUG_NUM_UAS 4 | 187 | #define SDEBUG_UA_LUNS_CHANGED 4 |
188 | #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */ | ||
189 | #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6 | ||
190 | #define SDEBUG_NUM_UAS 7 | ||
184 | 191 | ||
185 | /* for check_readiness() */ | 192 | /* for check_readiness() */ |
186 | #define UAS_ONLY 1 /* check for UAs only */ | 193 | #define UAS_ONLY 1 /* check for UAs only */ |
@@ -326,6 +333,7 @@ static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *); | |||
326 | static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *); | 333 | static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *); |
327 | static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); | 334 | static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); |
328 | static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); | 335 | static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); |
336 | static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); | ||
329 | 337 | ||
330 | struct opcode_info_t { | 338 | struct opcode_info_t { |
331 | u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff | 339 | u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff |
@@ -480,8 +488,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { | |||
480 | {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10, | 488 | {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10, |
481 | NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, | 489 | NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, |
482 | 0, 0, 0, 0, 0, 0} }, | 490 | 0, 0, 0, 0, 0, 0} }, |
483 | {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */ | 491 | {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, |
484 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, | 492 | {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, |
493 | 0, 0, 0, 0} }, /* WRITE_BUFFER */ | ||
485 | {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10, | 494 | {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10, |
486 | write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, | 495 | write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, |
487 | 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, | 496 | 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, |
@@ -782,6 +791,22 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) | |||
782 | /* return -ENOTTY; // correct return but upsets fdisk */ | 791 | /* return -ENOTTY; // correct return but upsets fdisk */ |
783 | } | 792 | } |
784 | 793 | ||
794 | static void clear_luns_changed_on_target(struct sdebug_dev_info *devip) | ||
795 | { | ||
796 | struct sdebug_host_info *sdhp; | ||
797 | struct sdebug_dev_info *dp; | ||
798 | |||
799 | spin_lock(&sdebug_host_list_lock); | ||
800 | list_for_each_entry(sdhp, &sdebug_host_list, host_list) { | ||
801 | list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) { | ||
802 | if ((devip->sdbg_host == dp->sdbg_host) && | ||
803 | (devip->target == dp->target)) | ||
804 | clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm); | ||
805 | } | ||
806 | } | ||
807 | spin_unlock(&sdebug_host_list_lock); | ||
808 | } | ||
809 | |||
785 | static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, | 810 | static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, |
786 | struct sdebug_dev_info * devip) | 811 | struct sdebug_dev_info * devip) |
787 | { | 812 | { |
@@ -817,6 +842,36 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only, | |||
817 | if (debug) | 842 | if (debug) |
818 | cp = "capacity data changed"; | 843 | cp = "capacity data changed"; |
819 | break; | 844 | break; |
845 | case SDEBUG_UA_MICROCODE_CHANGED: | ||
846 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
847 | TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ); | ||
848 | if (debug) | ||
849 | cp = "microcode has been changed"; | ||
850 | break; | ||
851 | case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET: | ||
852 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
853 | TARGET_CHANGED_ASC, | ||
854 | MICROCODE_CHANGED_WO_RESET_ASCQ); | ||
855 | if (debug) | ||
856 | cp = "microcode has been changed without reset"; | ||
857 | break; | ||
858 | case SDEBUG_UA_LUNS_CHANGED: | ||
859 | /* | ||
860 | * SPC-3 behavior is to report a UNIT ATTENTION with | ||
861 | * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN | ||
862 | * on the target, until a REPORT LUNS command is | ||
863 | * received. SPC-4 behavior is to report it only once. | ||
864 | * NOTE: scsi_debug_scsi_level does not use the same | ||
865 | * values as struct scsi_device->scsi_level. | ||
866 | */ | ||
867 | if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */ | ||
868 | clear_luns_changed_on_target(devip); | ||
869 | mk_sense_buffer(SCpnt, UNIT_ATTENTION, | ||
870 | TARGET_CHANGED_ASC, | ||
871 | LUNS_CHANGED_ASCQ); | ||
872 | if (debug) | ||
873 | cp = "reported luns data has changed"; | ||
874 | break; | ||
820 | default: | 875 | default: |
821 | pr_warn("%s: unexpected unit attention code=%d\n", | 876 | pr_warn("%s: unexpected unit attention code=%d\n", |
822 | __func__, k); | 877 | __func__, k); |
@@ -3033,6 +3088,55 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
3033 | return resp_write_same(scp, lba, num, ei_lba, unmap, ndob); | 3088 | return resp_write_same(scp, lba, num, ei_lba, unmap, ndob); |
3034 | } | 3089 | } |
3035 | 3090 | ||
3091 | /* Note the mode field is in the same position as the (lower) service action | ||
3092 | * field. For the Report supported operation codes command, SPC-4 suggests | ||
3093 | * each mode of this command should be reported separately; for future. */ | ||
3094 | static int | ||
3095 | resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | ||
3096 | { | ||
3097 | u8 *cmd = scp->cmnd; | ||
3098 | struct scsi_device *sdp = scp->device; | ||
3099 | struct sdebug_dev_info *dp; | ||
3100 | u8 mode; | ||
3101 | |||
3102 | mode = cmd[1] & 0x1f; | ||
3103 | switch (mode) { | ||
3104 | case 0x4: /* download microcode (MC) and activate (ACT) */ | ||
3105 | /* set UAs on this device only */ | ||
3106 | set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm); | ||
3107 | set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm); | ||
3108 | break; | ||
3109 | case 0x5: /* download MC, save and ACT */ | ||
3110 | set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm); | ||
3111 | break; | ||
3112 | case 0x6: /* download MC with offsets and ACT */ | ||
3113 | /* set UAs on most devices (LUs) in this target */ | ||
3114 | list_for_each_entry(dp, | ||
3115 | &devip->sdbg_host->dev_info_list, | ||
3116 | dev_list) | ||
3117 | if (dp->target == sdp->id) { | ||
3118 | set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm); | ||
3119 | if (devip != dp) | ||
3120 | set_bit(SDEBUG_UA_MICROCODE_CHANGED, | ||
3121 | dp->uas_bm); | ||
3122 | } | ||
3123 | break; | ||
3124 | case 0x7: /* download MC with offsets, save, and ACT */ | ||
3125 | /* set UA on all devices (LUs) in this target */ | ||
3126 | list_for_each_entry(dp, | ||
3127 | &devip->sdbg_host->dev_info_list, | ||
3128 | dev_list) | ||
3129 | if (dp->target == sdp->id) | ||
3130 | set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, | ||
3131 | dp->uas_bm); | ||
3132 | break; | ||
3133 | default: | ||
3134 | /* do nothing for this command for other mode values */ | ||
3135 | break; | ||
3136 | } | ||
3137 | return 0; | ||
3138 | } | ||
3139 | |||
3036 | static int | 3140 | static int |
3037 | resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | 3141 | resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) |
3038 | { | 3142 | { |
@@ -3229,6 +3333,7 @@ static int resp_report_luns(struct scsi_cmnd * scp, | |||
3229 | unsigned char arr[SDEBUG_RLUN_ARR_SZ]; | 3333 | unsigned char arr[SDEBUG_RLUN_ARR_SZ]; |
3230 | unsigned char * max_addr; | 3334 | unsigned char * max_addr; |
3231 | 3335 | ||
3336 | clear_luns_changed_on_target(devip); | ||
3232 | alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); | 3337 | alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); |
3233 | shortish = (alloc_len < 4); | 3338 | shortish = (alloc_len < 4); |
3234 | if (shortish || (select_report > 2)) { | 3339 | if (shortish || (select_report > 2)) { |
@@ -4369,10 +4474,27 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, | |||
4369 | size_t count) | 4474 | size_t count) |
4370 | { | 4475 | { |
4371 | int n; | 4476 | int n; |
4477 | bool changed; | ||
4372 | 4478 | ||
4373 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { | 4479 | if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { |
4480 | changed = (scsi_debug_max_luns != n); | ||
4374 | scsi_debug_max_luns = n; | 4481 | scsi_debug_max_luns = n; |
4375 | sdebug_max_tgts_luns(); | 4482 | sdebug_max_tgts_luns(); |
4483 | if (changed && (scsi_debug_scsi_level >= 5)) { /* >= SPC-3 */ | ||
4484 | struct sdebug_host_info *sdhp; | ||
4485 | struct sdebug_dev_info *dp; | ||
4486 | |||
4487 | spin_lock(&sdebug_host_list_lock); | ||
4488 | list_for_each_entry(sdhp, &sdebug_host_list, | ||
4489 | host_list) { | ||
4490 | list_for_each_entry(dp, &sdhp->dev_info_list, | ||
4491 | dev_list) { | ||
4492 | set_bit(SDEBUG_UA_LUNS_CHANGED, | ||
4493 | dp->uas_bm); | ||
4494 | } | ||
4495 | } | ||
4496 | spin_unlock(&sdebug_host_list_lock); | ||
4497 | } | ||
4376 | return count; | 4498 | return count; |
4377 | } | 4499 | } |
4378 | return -EINVAL; | 4500 | return -EINVAL; |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 8afb01604d51..4cdaffca17fc 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -124,41 +124,37 @@ scmd_eh_abort_handler(struct work_struct *work) | |||
124 | if (scsi_host_eh_past_deadline(sdev->host)) { | 124 | if (scsi_host_eh_past_deadline(sdev->host)) { |
125 | SCSI_LOG_ERROR_RECOVERY(3, | 125 | SCSI_LOG_ERROR_RECOVERY(3, |
126 | scmd_printk(KERN_INFO, scmd, | 126 | scmd_printk(KERN_INFO, scmd, |
127 | "scmd %p eh timeout, not aborting\n", | 127 | "eh timeout, not aborting\n")); |
128 | scmd)); | ||
129 | } else { | 128 | } else { |
130 | SCSI_LOG_ERROR_RECOVERY(3, | 129 | SCSI_LOG_ERROR_RECOVERY(3, |
131 | scmd_printk(KERN_INFO, scmd, | 130 | scmd_printk(KERN_INFO, scmd, |
132 | "aborting command %p\n", scmd)); | 131 | "aborting command\n")); |
133 | rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); | 132 | rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); |
134 | if (rtn == SUCCESS) { | 133 | if (rtn == SUCCESS) { |
135 | set_host_byte(scmd, DID_TIME_OUT); | 134 | set_host_byte(scmd, DID_TIME_OUT); |
136 | if (scsi_host_eh_past_deadline(sdev->host)) { | 135 | if (scsi_host_eh_past_deadline(sdev->host)) { |
137 | SCSI_LOG_ERROR_RECOVERY(3, | 136 | SCSI_LOG_ERROR_RECOVERY(3, |
138 | scmd_printk(KERN_INFO, scmd, | 137 | scmd_printk(KERN_INFO, scmd, |
139 | "scmd %p eh timeout, " | 138 | "eh timeout, not retrying " |
140 | "not retrying aborted " | 139 | "aborted command\n")); |
141 | "command\n", scmd)); | ||
142 | } else if (!scsi_noretry_cmd(scmd) && | 140 | } else if (!scsi_noretry_cmd(scmd) && |
143 | (++scmd->retries <= scmd->allowed)) { | 141 | (++scmd->retries <= scmd->allowed)) { |
144 | SCSI_LOG_ERROR_RECOVERY(3, | 142 | SCSI_LOG_ERROR_RECOVERY(3, |
145 | scmd_printk(KERN_WARNING, scmd, | 143 | scmd_printk(KERN_WARNING, scmd, |
146 | "scmd %p retry " | 144 | "retry aborted command\n")); |
147 | "aborted command\n", scmd)); | ||
148 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); | 145 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); |
149 | return; | 146 | return; |
150 | } else { | 147 | } else { |
151 | SCSI_LOG_ERROR_RECOVERY(3, | 148 | SCSI_LOG_ERROR_RECOVERY(3, |
152 | scmd_printk(KERN_WARNING, scmd, | 149 | scmd_printk(KERN_WARNING, scmd, |
153 | "scmd %p finish " | 150 | "finish aborted command\n")); |
154 | "aborted command\n", scmd)); | ||
155 | scsi_finish_command(scmd); | 151 | scsi_finish_command(scmd); |
156 | return; | 152 | return; |
157 | } | 153 | } |
158 | } else { | 154 | } else { |
159 | SCSI_LOG_ERROR_RECOVERY(3, | 155 | SCSI_LOG_ERROR_RECOVERY(3, |
160 | scmd_printk(KERN_INFO, scmd, | 156 | scmd_printk(KERN_INFO, scmd, |
161 | "scmd %p abort %s\n", scmd, | 157 | "cmd abort %s\n", |
162 | (rtn == FAST_IO_FAIL) ? | 158 | (rtn == FAST_IO_FAIL) ? |
163 | "not send" : "failed")); | 159 | "not send" : "failed")); |
164 | } | 160 | } |
@@ -167,8 +163,7 @@ scmd_eh_abort_handler(struct work_struct *work) | |||
167 | if (!scsi_eh_scmd_add(scmd, 0)) { | 163 | if (!scsi_eh_scmd_add(scmd, 0)) { |
168 | SCSI_LOG_ERROR_RECOVERY(3, | 164 | SCSI_LOG_ERROR_RECOVERY(3, |
169 | scmd_printk(KERN_WARNING, scmd, | 165 | scmd_printk(KERN_WARNING, scmd, |
170 | "scmd %p terminate " | 166 | "terminate aborted command\n")); |
171 | "aborted command\n", scmd)); | ||
172 | set_host_byte(scmd, DID_TIME_OUT); | 167 | set_host_byte(scmd, DID_TIME_OUT); |
173 | scsi_finish_command(scmd); | 168 | scsi_finish_command(scmd); |
174 | } | 169 | } |
@@ -194,7 +189,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) | |||
194 | scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED; | 189 | scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED; |
195 | SCSI_LOG_ERROR_RECOVERY(3, | 190 | SCSI_LOG_ERROR_RECOVERY(3, |
196 | scmd_printk(KERN_INFO, scmd, | 191 | scmd_printk(KERN_INFO, scmd, |
197 | "scmd %p previous abort failed\n", scmd)); | 192 | "previous abort failed\n")); |
198 | BUG_ON(delayed_work_pending(&scmd->abort_work)); | 193 | BUG_ON(delayed_work_pending(&scmd->abort_work)); |
199 | return FAILED; | 194 | return FAILED; |
200 | } | 195 | } |
@@ -208,8 +203,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) | |||
208 | spin_unlock_irqrestore(shost->host_lock, flags); | 203 | spin_unlock_irqrestore(shost->host_lock, flags); |
209 | SCSI_LOG_ERROR_RECOVERY(3, | 204 | SCSI_LOG_ERROR_RECOVERY(3, |
210 | scmd_printk(KERN_INFO, scmd, | 205 | scmd_printk(KERN_INFO, scmd, |
211 | "scmd %p not aborting, host in recovery\n", | 206 | "not aborting, host in recovery\n")); |
212 | scmd)); | ||
213 | return FAILED; | 207 | return FAILED; |
214 | } | 208 | } |
215 | 209 | ||
@@ -219,8 +213,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) | |||
219 | 213 | ||
220 | scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; | 214 | scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; |
221 | SCSI_LOG_ERROR_RECOVERY(3, | 215 | SCSI_LOG_ERROR_RECOVERY(3, |
222 | scmd_printk(KERN_INFO, scmd, | 216 | scmd_printk(KERN_INFO, scmd, "abort scheduled\n")); |
223 | "scmd %p abort scheduled\n", scmd)); | ||
224 | queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100); | 217 | queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100); |
225 | return SUCCESS; | 218 | return SUCCESS; |
226 | } | 219 | } |
@@ -737,8 +730,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) | |||
737 | struct completion *eh_action; | 730 | struct completion *eh_action; |
738 | 731 | ||
739 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, | 732 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
740 | "%s scmd: %p result: %x\n", | 733 | "%s result: %x\n", __func__, scmd->result)); |
741 | __func__, scmd, scmd->result)); | ||
742 | 734 | ||
743 | eh_action = scmd->device->host->eh_action; | 735 | eh_action = scmd->device->host->eh_action; |
744 | if (eh_action) | 736 | if (eh_action) |
@@ -868,6 +860,7 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) | |||
868 | 860 | ||
869 | /** | 861 | /** |
870 | * scsi_try_to_abort_cmd - Ask host to abort a SCSI command | 862 | * scsi_try_to_abort_cmd - Ask host to abort a SCSI command |
863 | * @hostt: SCSI driver host template | ||
871 | * @scmd: SCSI cmd used to send a target reset | 864 | * @scmd: SCSI cmd used to send a target reset |
872 | * | 865 | * |
873 | * Return value: | 866 | * Return value: |
@@ -1052,8 +1045,8 @@ retry: | |||
1052 | scsi_log_completion(scmd, rtn); | 1045 | scsi_log_completion(scmd, rtn); |
1053 | 1046 | ||
1054 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, | 1047 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
1055 | "%s: scmd: %p, timeleft: %ld\n", | 1048 | "%s timeleft: %ld\n", |
1056 | __func__, scmd, timeleft)); | 1049 | __func__, timeleft)); |
1057 | 1050 | ||
1058 | /* | 1051 | /* |
1059 | * If there is time left scsi_eh_done got called, and we will examine | 1052 | * If there is time left scsi_eh_done got called, and we will examine |
@@ -1192,8 +1185,7 @@ int scsi_eh_get_sense(struct list_head *work_q, | |||
1192 | continue; | 1185 | continue; |
1193 | 1186 | ||
1194 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, | 1187 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
1195 | "sense requested for %p result %x\n", | 1188 | "sense requested, result %x\n", scmd->result)); |
1196 | scmd, scmd->result)); | ||
1197 | SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd)); | 1189 | SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd)); |
1198 | 1190 | ||
1199 | rtn = scsi_decide_disposition(scmd); | 1191 | rtn = scsi_decide_disposition(scmd); |
@@ -1235,7 +1227,7 @@ retry_tur: | |||
1235 | scmd->device->eh_timeout, 0); | 1227 | scmd->device->eh_timeout, 0); |
1236 | 1228 | ||
1237 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, | 1229 | SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, |
1238 | "%s: scmd %p rtn %x\n", __func__, scmd, rtn)); | 1230 | "%s return: %x\n", __func__, rtn)); |
1239 | 1231 | ||
1240 | switch (rtn) { | 1232 | switch (rtn) { |
1241 | case NEEDS_RETRY: | 1233 | case NEEDS_RETRY: |
@@ -2092,8 +2084,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q) | |||
2092 | (++scmd->retries <= scmd->allowed)) { | 2084 | (++scmd->retries <= scmd->allowed)) { |
2093 | SCSI_LOG_ERROR_RECOVERY(3, | 2085 | SCSI_LOG_ERROR_RECOVERY(3, |
2094 | scmd_printk(KERN_INFO, scmd, | 2086 | scmd_printk(KERN_INFO, scmd, |
2095 | "%s: flush retry cmd: %p\n", | 2087 | "%s: flush retry cmd\n", |
2096 | current->comm, scmd)); | 2088 | current->comm)); |
2097 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); | 2089 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); |
2098 | } else { | 2090 | } else { |
2099 | /* | 2091 | /* |
@@ -2105,8 +2097,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q) | |||
2105 | scmd->result |= (DRIVER_TIMEOUT << 24); | 2097 | scmd->result |= (DRIVER_TIMEOUT << 24); |
2106 | SCSI_LOG_ERROR_RECOVERY(3, | 2098 | SCSI_LOG_ERROR_RECOVERY(3, |
2107 | scmd_printk(KERN_INFO, scmd, | 2099 | scmd_printk(KERN_INFO, scmd, |
2108 | "%s: flush finish cmd: %p\n", | 2100 | "%s: flush finish cmd\n", |
2109 | current->comm, scmd)); | 2101 | current->comm)); |
2110 | scsi_finish_command(scmd); | 2102 | scsi_finish_command(scmd); |
2111 | } | 2103 | } |
2112 | } | 2104 | } |
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c new file mode 100644 index 000000000000..bd70339c1242 --- /dev/null +++ b/drivers/scsi/scsi_logging.c | |||
@@ -0,0 +1,485 @@ | |||
1 | /* | ||
2 | * scsi_logging.c | ||
3 | * | ||
4 | * Copyright (C) 2014 SUSE Linux Products GmbH | ||
5 | * Copyright (C) 2014 Hannes Reinecke <hare@suse.de> | ||
6 | * | ||
7 | * This file is released under the GPLv2 | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/atomic.h> | ||
12 | |||
13 | #include <scsi/scsi.h> | ||
14 | #include <scsi/scsi_cmnd.h> | ||
15 | #include <scsi/scsi_device.h> | ||
16 | #include <scsi/scsi_eh.h> | ||
17 | #include <scsi/scsi_dbg.h> | ||
18 | |||
19 | #define SCSI_LOG_SPOOLSIZE 4096 | ||
20 | |||
21 | #if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG | ||
22 | #warning SCSI logging bitmask too large | ||
23 | #endif | ||
24 | |||
25 | struct scsi_log_buf { | ||
26 | char buffer[SCSI_LOG_SPOOLSIZE]; | ||
27 | unsigned long map; | ||
28 | }; | ||
29 | |||
30 | static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log); | ||
31 | |||
32 | static char *scsi_log_reserve_buffer(size_t *len) | ||
33 | { | ||
34 | struct scsi_log_buf *buf; | ||
35 | unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE; | ||
36 | unsigned long idx = 0; | ||
37 | |||
38 | preempt_disable(); | ||
39 | buf = this_cpu_ptr(&scsi_format_log); | ||
40 | idx = find_first_zero_bit(&buf->map, map_bits); | ||
41 | if (likely(idx < map_bits)) { | ||
42 | while (test_and_set_bit(idx, &buf->map)) { | ||
43 | idx = find_next_zero_bit(&buf->map, map_bits, idx); | ||
44 | if (idx >= map_bits) | ||
45 | break; | ||
46 | } | ||
47 | } | ||
48 | if (WARN_ON(idx >= map_bits)) { | ||
49 | preempt_enable(); | ||
50 | return NULL; | ||
51 | } | ||
52 | *len = SCSI_LOG_BUFSIZE; | ||
53 | return buf->buffer + idx * SCSI_LOG_BUFSIZE; | ||
54 | } | ||
55 | |||
56 | static void scsi_log_release_buffer(char *bufptr) | ||
57 | { | ||
58 | struct scsi_log_buf *buf; | ||
59 | unsigned long idx; | ||
60 | int ret; | ||
61 | |||
62 | buf = this_cpu_ptr(&scsi_format_log); | ||
63 | if (bufptr >= buf->buffer && | ||
64 | bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) { | ||
65 | idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE; | ||
66 | ret = test_and_clear_bit(idx, &buf->map); | ||
67 | WARN_ON(!ret); | ||
68 | } | ||
69 | preempt_enable(); | ||
70 | } | ||
71 | |||
72 | static inline const char *scmd_name(const struct scsi_cmnd *scmd) | ||
73 | { | ||
74 | return scmd->request->rq_disk ? | ||
75 | scmd->request->rq_disk->disk_name : NULL; | ||
76 | } | ||
77 | |||
78 | static size_t sdev_format_header(char *logbuf, size_t logbuf_len, | ||
79 | const char *name, int tag) | ||
80 | { | ||
81 | size_t off = 0; | ||
82 | |||
83 | if (name) | ||
84 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
85 | "[%s] ", name); | ||
86 | |||
87 | if (WARN_ON(off >= logbuf_len)) | ||
88 | return off; | ||
89 | |||
90 | if (tag >= 0) | ||
91 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
92 | "tag#%d ", tag); | ||
93 | return off; | ||
94 | } | ||
95 | |||
96 | void sdev_prefix_printk(const char *level, const struct scsi_device *sdev, | ||
97 | const char *name, const char *fmt, ...) | ||
98 | { | ||
99 | va_list args; | ||
100 | char *logbuf; | ||
101 | size_t off = 0, logbuf_len; | ||
102 | |||
103 | if (!sdev) | ||
104 | return; | ||
105 | |||
106 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
107 | if (!logbuf) | ||
108 | return; | ||
109 | |||
110 | if (name) | ||
111 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
112 | "[%s] ", name); | ||
113 | if (!WARN_ON(off >= logbuf_len)) { | ||
114 | va_start(args, fmt); | ||
115 | off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); | ||
116 | va_end(args); | ||
117 | } | ||
118 | dev_printk(level, &sdev->sdev_gendev, "%s", logbuf); | ||
119 | scsi_log_release_buffer(logbuf); | ||
120 | } | ||
121 | EXPORT_SYMBOL(sdev_prefix_printk); | ||
122 | |||
123 | void scmd_printk(const char *level, const struct scsi_cmnd *scmd, | ||
124 | const char *fmt, ...) | ||
125 | { | ||
126 | va_list args; | ||
127 | char *logbuf; | ||
128 | size_t off = 0, logbuf_len; | ||
129 | |||
130 | if (!scmd || !scmd->cmnd) | ||
131 | return; | ||
132 | |||
133 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
134 | if (!logbuf) | ||
135 | return; | ||
136 | off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd), | ||
137 | scmd->request->tag); | ||
138 | if (off < logbuf_len) { | ||
139 | va_start(args, fmt); | ||
140 | off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); | ||
141 | va_end(args); | ||
142 | } | ||
143 | dev_printk(level, &scmd->device->sdev_gendev, "%s", logbuf); | ||
144 | scsi_log_release_buffer(logbuf); | ||
145 | } | ||
146 | EXPORT_SYMBOL(scmd_printk); | ||
147 | |||
148 | static size_t scsi_format_opcode_name(char *buffer, size_t buf_len, | ||
149 | const unsigned char *cdbp) | ||
150 | { | ||
151 | int sa, cdb0; | ||
152 | const char *cdb_name = NULL, *sa_name = NULL; | ||
153 | size_t off; | ||
154 | |||
155 | cdb0 = cdbp[0]; | ||
156 | if (cdb0 == VARIABLE_LENGTH_CMD) { | ||
157 | int len = scsi_varlen_cdb_length(cdbp); | ||
158 | |||
159 | if (len < 10) { | ||
160 | off = scnprintf(buffer, buf_len, | ||
161 | "short variable length command, len=%d", | ||
162 | len); | ||
163 | return off; | ||
164 | } | ||
165 | sa = (cdbp[8] << 8) + cdbp[9]; | ||
166 | } else | ||
167 | sa = cdbp[1] & 0x1f; | ||
168 | |||
169 | if (!scsi_opcode_sa_name(cdb0, sa, &cdb_name, &sa_name)) { | ||
170 | if (cdb_name) | ||
171 | off = scnprintf(buffer, buf_len, "%s", cdb_name); | ||
172 | else { | ||
173 | off = scnprintf(buffer, buf_len, "opcode=0x%x", cdb0); | ||
174 | if (WARN_ON(off >= buf_len)) | ||
175 | return off; | ||
176 | if (cdb0 >= VENDOR_SPECIFIC_CDB) | ||
177 | off += scnprintf(buffer + off, buf_len - off, | ||
178 | " (vendor)"); | ||
179 | else if (cdb0 >= 0x60 && cdb0 < 0x7e) | ||
180 | off += scnprintf(buffer + off, buf_len - off, | ||
181 | " (reserved)"); | ||
182 | } | ||
183 | } else { | ||
184 | if (sa_name) | ||
185 | off = scnprintf(buffer, buf_len, "%s", sa_name); | ||
186 | else if (cdb_name) | ||
187 | off = scnprintf(buffer, buf_len, "%s, sa=0x%x", | ||
188 | cdb_name, sa); | ||
189 | else | ||
190 | off = scnprintf(buffer, buf_len, | ||
191 | "opcode=0x%x, sa=0x%x", cdb0, sa); | ||
192 | } | ||
193 | WARN_ON(off >= buf_len); | ||
194 | return off; | ||
195 | } | ||
196 | |||
197 | size_t __scsi_format_command(char *logbuf, size_t logbuf_len, | ||
198 | const unsigned char *cdb, size_t cdb_len) | ||
199 | { | ||
200 | int len, k; | ||
201 | size_t off; | ||
202 | |||
203 | off = scsi_format_opcode_name(logbuf, logbuf_len, cdb); | ||
204 | if (off >= logbuf_len) | ||
205 | return off; | ||
206 | len = scsi_command_size(cdb); | ||
207 | if (cdb_len < len) | ||
208 | len = cdb_len; | ||
209 | /* print out all bytes in cdb */ | ||
210 | for (k = 0; k < len; ++k) { | ||
211 | if (off > logbuf_len - 3) | ||
212 | break; | ||
213 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
214 | " %02x", cdb[k]); | ||
215 | } | ||
216 | return off; | ||
217 | } | ||
218 | EXPORT_SYMBOL(__scsi_format_command); | ||
219 | |||
220 | void scsi_print_command(struct scsi_cmnd *cmd) | ||
221 | { | ||
222 | int k; | ||
223 | char *logbuf; | ||
224 | size_t off, logbuf_len; | ||
225 | |||
226 | if (!cmd->cmnd) | ||
227 | return; | ||
228 | |||
229 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
230 | if (!logbuf) | ||
231 | return; | ||
232 | |||
233 | off = sdev_format_header(logbuf, logbuf_len, | ||
234 | scmd_name(cmd), cmd->request->tag); | ||
235 | if (off >= logbuf_len) | ||
236 | goto out_printk; | ||
237 | off += scnprintf(logbuf + off, logbuf_len - off, "CDB: "); | ||
238 | if (WARN_ON(off >= logbuf_len)) | ||
239 | goto out_printk; | ||
240 | |||
241 | off += scsi_format_opcode_name(logbuf + off, logbuf_len - off, | ||
242 | cmd->cmnd); | ||
243 | if (off >= logbuf_len) | ||
244 | goto out_printk; | ||
245 | |||
246 | /* print out all bytes in cdb */ | ||
247 | if (cmd->cmd_len > 16) { | ||
248 | /* Print opcode in one line and use separate lines for CDB */ | ||
249 | off += scnprintf(logbuf + off, logbuf_len - off, "\n"); | ||
250 | dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); | ||
251 | scsi_log_release_buffer(logbuf); | ||
252 | for (k = 0; k < cmd->cmd_len; k += 16) { | ||
253 | size_t linelen = min(cmd->cmd_len - k, 16); | ||
254 | |||
255 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
256 | if (!logbuf) | ||
257 | break; | ||
258 | off = sdev_format_header(logbuf, logbuf_len, | ||
259 | scmd_name(cmd), | ||
260 | cmd->request->tag); | ||
261 | if (!WARN_ON(off > logbuf_len - 58)) { | ||
262 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
263 | "CDB[%02x]: ", k); | ||
264 | hex_dump_to_buffer(&cmd->cmnd[k], linelen, | ||
265 | 16, 1, logbuf + off, | ||
266 | logbuf_len - off, false); | ||
267 | } | ||
268 | dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", | ||
269 | logbuf); | ||
270 | scsi_log_release_buffer(logbuf); | ||
271 | } | ||
272 | return; | ||
273 | } | ||
274 | if (!WARN_ON(off > logbuf_len - 49)) { | ||
275 | off += scnprintf(logbuf + off, logbuf_len - off, " "); | ||
276 | hex_dump_to_buffer(cmd->cmnd, cmd->cmd_len, 16, 1, | ||
277 | logbuf + off, logbuf_len - off, | ||
278 | false); | ||
279 | } | ||
280 | out_printk: | ||
281 | dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); | ||
282 | scsi_log_release_buffer(logbuf); | ||
283 | } | ||
284 | EXPORT_SYMBOL(scsi_print_command); | ||
285 | |||
286 | static size_t | ||
287 | scsi_format_extd_sense(char *buffer, size_t buf_len, | ||
288 | unsigned char asc, unsigned char ascq) | ||
289 | { | ||
290 | size_t off = 0; | ||
291 | const char *extd_sense_fmt = NULL; | ||
292 | const char *extd_sense_str = scsi_extd_sense_format(asc, ascq, | ||
293 | &extd_sense_fmt); | ||
294 | |||
295 | if (extd_sense_str) { | ||
296 | off = scnprintf(buffer, buf_len, "Add. Sense: %s", | ||
297 | extd_sense_str); | ||
298 | if (extd_sense_fmt) | ||
299 | off += scnprintf(buffer + off, buf_len - off, | ||
300 | "(%s%x)", extd_sense_fmt, ascq); | ||
301 | } else { | ||
302 | if (asc >= 0x80) | ||
303 | off = scnprintf(buffer, buf_len, "<<vendor>>"); | ||
304 | off += scnprintf(buffer + off, buf_len - off, | ||
305 | "ASC=0x%x ", asc); | ||
306 | if (ascq >= 0x80) | ||
307 | off += scnprintf(buffer + off, buf_len - off, | ||
308 | "<<vendor>>"); | ||
309 | off += scnprintf(buffer + off, buf_len - off, | ||
310 | "ASCQ=0x%x ", ascq); | ||
311 | } | ||
312 | return off; | ||
313 | } | ||
314 | |||
315 | static size_t | ||
316 | scsi_format_sense_hdr(char *buffer, size_t buf_len, | ||
317 | const struct scsi_sense_hdr *sshdr) | ||
318 | { | ||
319 | const char *sense_txt; | ||
320 | size_t off; | ||
321 | |||
322 | off = scnprintf(buffer, buf_len, "Sense Key : "); | ||
323 | sense_txt = scsi_sense_key_string(sshdr->sense_key); | ||
324 | if (sense_txt) | ||
325 | off += scnprintf(buffer + off, buf_len - off, | ||
326 | "%s ", sense_txt); | ||
327 | else | ||
328 | off += scnprintf(buffer + off, buf_len - off, | ||
329 | "0x%x ", sshdr->sense_key); | ||
330 | off += scnprintf(buffer + off, buf_len - off, | ||
331 | scsi_sense_is_deferred(sshdr) ? "[deferred] " : "[current] "); | ||
332 | |||
333 | if (sshdr->response_code >= 0x72) | ||
334 | off += scnprintf(buffer + off, buf_len - off, "[descriptor] "); | ||
335 | return off; | ||
336 | } | ||
337 | |||
338 | static void | ||
339 | scsi_log_dump_sense(const struct scsi_device *sdev, const char *name, int tag, | ||
340 | const unsigned char *sense_buffer, int sense_len) | ||
341 | { | ||
342 | char *logbuf; | ||
343 | size_t logbuf_len; | ||
344 | int i; | ||
345 | |||
346 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
347 | if (!logbuf) | ||
348 | return; | ||
349 | |||
350 | for (i = 0; i < sense_len; i += 16) { | ||
351 | int len = min(sense_len - i, 16); | ||
352 | size_t off; | ||
353 | |||
354 | off = sdev_format_header(logbuf, logbuf_len, | ||
355 | name, tag); | ||
356 | hex_dump_to_buffer(&sense_buffer[i], len, 16, 1, | ||
357 | logbuf + off, logbuf_len - off, | ||
358 | false); | ||
359 | dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); | ||
360 | } | ||
361 | scsi_log_release_buffer(logbuf); | ||
362 | } | ||
363 | |||
364 | static void | ||
365 | scsi_log_print_sense_hdr(const struct scsi_device *sdev, const char *name, | ||
366 | int tag, const struct scsi_sense_hdr *sshdr) | ||
367 | { | ||
368 | char *logbuf; | ||
369 | size_t off, logbuf_len; | ||
370 | |||
371 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
372 | if (!logbuf) | ||
373 | return; | ||
374 | off = sdev_format_header(logbuf, logbuf_len, name, tag); | ||
375 | off += scsi_format_sense_hdr(logbuf + off, logbuf_len - off, sshdr); | ||
376 | dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); | ||
377 | scsi_log_release_buffer(logbuf); | ||
378 | |||
379 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
380 | if (!logbuf) | ||
381 | return; | ||
382 | off = sdev_format_header(logbuf, logbuf_len, name, tag); | ||
383 | off += scsi_format_extd_sense(logbuf + off, logbuf_len - off, | ||
384 | sshdr->asc, sshdr->ascq); | ||
385 | dev_printk(KERN_INFO, &sdev->sdev_gendev, "%s", logbuf); | ||
386 | scsi_log_release_buffer(logbuf); | ||
387 | } | ||
388 | |||
389 | static void | ||
390 | scsi_log_print_sense(const struct scsi_device *sdev, const char *name, int tag, | ||
391 | const unsigned char *sense_buffer, int sense_len) | ||
392 | { | ||
393 | struct scsi_sense_hdr sshdr; | ||
394 | |||
395 | if (scsi_normalize_sense(sense_buffer, sense_len, &sshdr)) | ||
396 | scsi_log_print_sense_hdr(sdev, name, tag, &sshdr); | ||
397 | else | ||
398 | scsi_log_dump_sense(sdev, name, tag, sense_buffer, sense_len); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Print normalized SCSI sense header with a prefix. | ||
403 | */ | ||
404 | void | ||
405 | scsi_print_sense_hdr(const struct scsi_device *sdev, const char *name, | ||
406 | const struct scsi_sense_hdr *sshdr) | ||
407 | { | ||
408 | scsi_log_print_sense_hdr(sdev, name, -1, sshdr); | ||
409 | } | ||
410 | EXPORT_SYMBOL(scsi_print_sense_hdr); | ||
411 | |||
412 | /* Normalize and print sense buffer with name prefix */ | ||
413 | void __scsi_print_sense(const struct scsi_device *sdev, const char *name, | ||
414 | const unsigned char *sense_buffer, int sense_len) | ||
415 | { | ||
416 | scsi_log_print_sense(sdev, name, -1, sense_buffer, sense_len); | ||
417 | } | ||
418 | EXPORT_SYMBOL(__scsi_print_sense); | ||
419 | |||
420 | /* Normalize and print sense buffer in SCSI command */ | ||
421 | void scsi_print_sense(const struct scsi_cmnd *cmd) | ||
422 | { | ||
423 | scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag, | ||
424 | cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); | ||
425 | } | ||
426 | EXPORT_SYMBOL(scsi_print_sense); | ||
427 | |||
428 | void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg, | ||
429 | int disposition) | ||
430 | { | ||
431 | char *logbuf; | ||
432 | size_t off, logbuf_len; | ||
433 | const char *mlret_string = scsi_mlreturn_string(disposition); | ||
434 | const char *hb_string = scsi_hostbyte_string(cmd->result); | ||
435 | const char *db_string = scsi_driverbyte_string(cmd->result); | ||
436 | |||
437 | logbuf = scsi_log_reserve_buffer(&logbuf_len); | ||
438 | if (!logbuf) | ||
439 | return; | ||
440 | |||
441 | off = sdev_format_header(logbuf, logbuf_len, | ||
442 | scmd_name(cmd), cmd->request->tag); | ||
443 | |||
444 | if (off >= logbuf_len) | ||
445 | goto out_printk; | ||
446 | |||
447 | if (msg) { | ||
448 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
449 | "%s: ", msg); | ||
450 | if (WARN_ON(off >= logbuf_len)) | ||
451 | goto out_printk; | ||
452 | } | ||
453 | if (mlret_string) | ||
454 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
455 | "%s ", mlret_string); | ||
456 | else | ||
457 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
458 | "UNKNOWN(0x%02x) ", disposition); | ||
459 | if (WARN_ON(off >= logbuf_len)) | ||
460 | goto out_printk; | ||
461 | |||
462 | off += scnprintf(logbuf + off, logbuf_len - off, "Result: "); | ||
463 | if (WARN_ON(off >= logbuf_len)) | ||
464 | goto out_printk; | ||
465 | |||
466 | if (hb_string) | ||
467 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
468 | "hostbyte=%s ", hb_string); | ||
469 | else | ||
470 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
471 | "hostbyte=0x%02x ", host_byte(cmd->result)); | ||
472 | if (WARN_ON(off >= logbuf_len)) | ||
473 | goto out_printk; | ||
474 | |||
475 | if (db_string) | ||
476 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
477 | "driverbyte=%s", db_string); | ||
478 | else | ||
479 | off += scnprintf(logbuf + off, logbuf_len - off, | ||
480 | "driverbyte=0x%02x", driver_byte(cmd->result)); | ||
481 | out_printk: | ||
482 | dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); | ||
483 | scsi_log_release_buffer(logbuf); | ||
484 | } | ||
485 | EXPORT_SYMBOL(scsi_print_result); | ||
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 6fcefa2da503..251598eb3547 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c | |||
@@ -189,36 +189,36 @@ static int proc_print_scsidevice(struct device *dev, void *data) | |||
189 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); | 189 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); |
190 | for (i = 0; i < 8; i++) { | 190 | for (i = 0; i < 8; i++) { |
191 | if (sdev->vendor[i] >= 0x20) | 191 | if (sdev->vendor[i] >= 0x20) |
192 | seq_printf(s, "%c", sdev->vendor[i]); | 192 | seq_putc(s, sdev->vendor[i]); |
193 | else | 193 | else |
194 | seq_printf(s, " "); | 194 | seq_putc(s, ' '); |
195 | } | 195 | } |
196 | 196 | ||
197 | seq_printf(s, " Model: "); | 197 | seq_puts(s, " Model: "); |
198 | for (i = 0; i < 16; i++) { | 198 | for (i = 0; i < 16; i++) { |
199 | if (sdev->model[i] >= 0x20) | 199 | if (sdev->model[i] >= 0x20) |
200 | seq_printf(s, "%c", sdev->model[i]); | 200 | seq_putc(s, sdev->model[i]); |
201 | else | 201 | else |
202 | seq_printf(s, " "); | 202 | seq_putc(s, ' '); |
203 | } | 203 | } |
204 | 204 | ||
205 | seq_printf(s, " Rev: "); | 205 | seq_puts(s, " Rev: "); |
206 | for (i = 0; i < 4; i++) { | 206 | for (i = 0; i < 4; i++) { |
207 | if (sdev->rev[i] >= 0x20) | 207 | if (sdev->rev[i] >= 0x20) |
208 | seq_printf(s, "%c", sdev->rev[i]); | 208 | seq_putc(s, sdev->rev[i]); |
209 | else | 209 | else |
210 | seq_printf(s, " "); | 210 | seq_putc(s, ' '); |
211 | } | 211 | } |
212 | 212 | ||
213 | seq_printf(s, "\n"); | 213 | seq_putc(s, '\n'); |
214 | 214 | ||
215 | seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); | 215 | seq_printf(s, " Type: %s ", scsi_device_type(sdev->type)); |
216 | seq_printf(s, " ANSI SCSI revision: %02x", | 216 | seq_printf(s, " ANSI SCSI revision: %02x", |
217 | sdev->scsi_level - (sdev->scsi_level > 1)); | 217 | sdev->scsi_level - (sdev->scsi_level > 1)); |
218 | if (sdev->scsi_level == 2) | 218 | if (sdev->scsi_level == 2) |
219 | seq_printf(s, " CCS\n"); | 219 | seq_puts(s, " CCS\n"); |
220 | else | 220 | else |
221 | seq_printf(s, "\n"); | 221 | seq_putc(s, '\n'); |
222 | 222 | ||
223 | out: | 223 | out: |
224 | return 0; | 224 | return 0; |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 983aed10ff2f..0deb385ad4d6 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/async.h> | 35 | #include <linux/async.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <asm/unaligned.h> | ||
37 | 38 | ||
38 | #include <scsi/scsi.h> | 39 | #include <scsi/scsi.h> |
39 | #include <scsi/scsi_cmnd.h> | 40 | #include <scsi/scsi_cmnd.h> |
@@ -98,20 +99,6 @@ char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; | |||
98 | module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); | 99 | module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); |
99 | MODULE_PARM_DESC(scan, "sync, async or none"); | 100 | MODULE_PARM_DESC(scan, "sync, async or none"); |
100 | 101 | ||
101 | /* | ||
102 | * max_scsi_report_luns: the maximum number of LUNS that will be | ||
103 | * returned from the REPORT LUNS command. 8 times this value must | ||
104 | * be allocated. In theory this could be up to an 8 byte value, but | ||
105 | * in practice, the maximum number of LUNs suppored by any device | ||
106 | * is about 16k. | ||
107 | */ | ||
108 | static unsigned int max_scsi_report_luns = 511; | ||
109 | |||
110 | module_param_named(max_report_luns, max_scsi_report_luns, uint, S_IRUGO|S_IWUSR); | ||
111 | MODULE_PARM_DESC(max_report_luns, | ||
112 | "REPORT LUNS maximum number of LUNS received (should be" | ||
113 | " between 1 and 16384)"); | ||
114 | |||
115 | static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; | 102 | static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; |
116 | 103 | ||
117 | module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); | 104 | module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); |
@@ -1367,7 +1354,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1367 | unsigned int retries; | 1354 | unsigned int retries; |
1368 | int result; | 1355 | int result; |
1369 | struct scsi_lun *lunp, *lun_data; | 1356 | struct scsi_lun *lunp, *lun_data; |
1370 | u8 *data; | ||
1371 | struct scsi_sense_hdr sshdr; | 1357 | struct scsi_sense_hdr sshdr; |
1372 | struct scsi_device *sdev; | 1358 | struct scsi_device *sdev; |
1373 | struct Scsi_Host *shost = dev_to_shost(&starget->dev); | 1359 | struct Scsi_Host *shost = dev_to_shost(&starget->dev); |
@@ -1407,16 +1393,12 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1407 | 1393 | ||
1408 | /* | 1394 | /* |
1409 | * Allocate enough to hold the header (the same size as one scsi_lun) | 1395 | * Allocate enough to hold the header (the same size as one scsi_lun) |
1410 | * plus the max number of luns we are requesting. | 1396 | * plus the number of luns we are requesting. 511 was the default |
1411 | * | 1397 | * value of the now removed max_report_luns parameter. |
1412 | * Reallocating and trying again (with the exact amount we need) | ||
1413 | * would be nice, but then we need to somehow limit the size | ||
1414 | * allocated based on the available memory and the limits of | ||
1415 | * kmalloc - we don't want a kmalloc() failure of a huge value to | ||
1416 | * prevent us from finding any LUNs on this target. | ||
1417 | */ | 1398 | */ |
1418 | length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun); | 1399 | length = (511 + 1) * sizeof(struct scsi_lun); |
1419 | lun_data = kmalloc(length, GFP_ATOMIC | | 1400 | retry: |
1401 | lun_data = kmalloc(length, GFP_KERNEL | | ||
1420 | (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); | 1402 | (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); |
1421 | if (!lun_data) { | 1403 | if (!lun_data) { |
1422 | printk(ALLOC_FAILURE_MSG, __func__); | 1404 | printk(ALLOC_FAILURE_MSG, __func__); |
@@ -1433,10 +1415,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1433 | /* | 1415 | /* |
1434 | * bytes 6 - 9: length of the command. | 1416 | * bytes 6 - 9: length of the command. |
1435 | */ | 1417 | */ |
1436 | scsi_cmd[6] = (unsigned char) (length >> 24) & 0xff; | 1418 | put_unaligned_be32(length, &scsi_cmd[6]); |
1437 | scsi_cmd[7] = (unsigned char) (length >> 16) & 0xff; | ||
1438 | scsi_cmd[8] = (unsigned char) (length >> 8) & 0xff; | ||
1439 | scsi_cmd[9] = (unsigned char) length & 0xff; | ||
1440 | 1419 | ||
1441 | scsi_cmd[10] = 0; /* reserved */ | 1420 | scsi_cmd[10] = 0; /* reserved */ |
1442 | scsi_cmd[11] = 0; /* control */ | 1421 | scsi_cmd[11] = 0; /* control */ |
@@ -1484,19 +1463,16 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | |||
1484 | /* | 1463 | /* |
1485 | * Get the length from the first four bytes of lun_data. | 1464 | * Get the length from the first four bytes of lun_data. |
1486 | */ | 1465 | */ |
1487 | data = (u8 *) lun_data->scsi_lun; | 1466 | if (get_unaligned_be32(lun_data->scsi_lun) + |
1488 | length = ((data[0] << 24) | (data[1] << 16) | | 1467 | sizeof(struct scsi_lun) > length) { |
1489 | (data[2] << 8) | (data[3] << 0)); | 1468 | length = get_unaligned_be32(lun_data->scsi_lun) + |
1469 | sizeof(struct scsi_lun); | ||
1470 | kfree(lun_data); | ||
1471 | goto retry; | ||
1472 | } | ||
1473 | length = get_unaligned_be32(lun_data->scsi_lun); | ||
1490 | 1474 | ||
1491 | num_luns = (length / sizeof(struct scsi_lun)); | 1475 | num_luns = (length / sizeof(struct scsi_lun)); |
1492 | if (num_luns > max_scsi_report_luns) { | ||
1493 | sdev_printk(KERN_WARNING, sdev, | ||
1494 | "Only %d (max_scsi_report_luns)" | ||
1495 | " of %d luns reported, try increasing" | ||
1496 | " max_scsi_report_luns.\n", | ||
1497 | max_scsi_report_luns, num_luns); | ||
1498 | num_luns = max_scsi_report_luns; | ||
1499 | } | ||
1500 | 1476 | ||
1501 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, | 1477 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, |
1502 | "scsi scan: REPORT LUN scan\n")); | 1478 | "scsi scan: REPORT LUN scan\n")); |
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 82af28b90294..08bb47b53bc3 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c | |||
@@ -143,7 +143,7 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) | |||
143 | cmd = "WRITE_SAME"; | 143 | cmd = "WRITE_SAME"; |
144 | break; | 144 | break; |
145 | default: | 145 | default: |
146 | trace_seq_printf(p, "UNKNOWN"); | 146 | trace_seq_puts(p, "UNKNOWN"); |
147 | goto out; | 147 | goto out; |
148 | } | 148 | } |
149 | 149 | ||
@@ -204,7 +204,7 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) | |||
204 | cmd = "GET_LBA_STATUS"; | 204 | cmd = "GET_LBA_STATUS"; |
205 | break; | 205 | break; |
206 | default: | 206 | default: |
207 | trace_seq_printf(p, "UNKNOWN"); | 207 | trace_seq_puts(p, "UNKNOWN"); |
208 | goto out; | 208 | goto out; |
209 | } | 209 | } |
210 | 210 | ||
@@ -249,7 +249,7 @@ scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) | |||
249 | { | 249 | { |
250 | const char *ret = trace_seq_buffer_ptr(p); | 250 | const char *ret = trace_seq_buffer_ptr(p); |
251 | 251 | ||
252 | trace_seq_printf(p, "-"); | 252 | trace_seq_putc(p, '-'); |
253 | trace_seq_putc(p, 0); | 253 | trace_seq_putc(p, 0); |
254 | 254 | ||
255 | return ret; | 255 | return ret; |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 05ea0d49a3a3..6b78476d04bb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -3320,11 +3320,8 @@ module_exit(exit_sd); | |||
3320 | static void sd_print_sense_hdr(struct scsi_disk *sdkp, | 3320 | static void sd_print_sense_hdr(struct scsi_disk *sdkp, |
3321 | struct scsi_sense_hdr *sshdr) | 3321 | struct scsi_sense_hdr *sshdr) |
3322 | { | 3322 | { |
3323 | scsi_show_sense_hdr(sdkp->device, | 3323 | scsi_print_sense_hdr(sdkp->device, |
3324 | sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); | 3324 | sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); |
3325 | scsi_show_extd_sense(sdkp->device, | ||
3326 | sdkp->disk ? sdkp->disk->disk_name : NULL, | ||
3327 | sshdr->asc, sshdr->ascq); | ||
3328 | } | 3325 | } |
3329 | 3326 | ||
3330 | static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, | 3327 | static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index b7e79e7646ad..dcb0d76d7312 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -47,7 +47,6 @@ struct ses_device { | |||
47 | 47 | ||
48 | struct ses_component { | 48 | struct ses_component { |
49 | u64 addr; | 49 | u64 addr; |
50 | unsigned char *desc; | ||
51 | }; | 50 | }; |
52 | 51 | ||
53 | static int ses_probe(struct device *dev) | 52 | static int ses_probe(struct device *dev) |
@@ -68,6 +67,20 @@ static int ses_probe(struct device *dev) | |||
68 | #define SES_TIMEOUT (30 * HZ) | 67 | #define SES_TIMEOUT (30 * HZ) |
69 | #define SES_RETRIES 3 | 68 | #define SES_RETRIES 3 |
70 | 69 | ||
70 | static void init_device_slot_control(unsigned char *dest_desc, | ||
71 | struct enclosure_component *ecomp, | ||
72 | unsigned char *status) | ||
73 | { | ||
74 | memcpy(dest_desc, status, 4); | ||
75 | dest_desc[0] = 0; | ||
76 | /* only clear byte 1 for ENCLOSURE_COMPONENT_DEVICE */ | ||
77 | if (ecomp->type == ENCLOSURE_COMPONENT_DEVICE) | ||
78 | dest_desc[1] = 0; | ||
79 | dest_desc[2] &= 0xde; | ||
80 | dest_desc[3] &= 0x3c; | ||
81 | } | ||
82 | |||
83 | |||
71 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, | 84 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, |
72 | void *buf, int bufflen) | 85 | void *buf, int bufflen) |
73 | { | 86 | { |
@@ -179,14 +192,22 @@ static int ses_set_fault(struct enclosure_device *edev, | |||
179 | struct enclosure_component *ecomp, | 192 | struct enclosure_component *ecomp, |
180 | enum enclosure_component_setting val) | 193 | enum enclosure_component_setting val) |
181 | { | 194 | { |
182 | unsigned char desc[4] = {0 }; | 195 | unsigned char desc[4]; |
196 | unsigned char *desc_ptr; | ||
197 | |||
198 | desc_ptr = ses_get_page2_descriptor(edev, ecomp); | ||
199 | |||
200 | if (!desc_ptr) | ||
201 | return -EIO; | ||
202 | |||
203 | init_device_slot_control(desc, ecomp, desc_ptr); | ||
183 | 204 | ||
184 | switch (val) { | 205 | switch (val) { |
185 | case ENCLOSURE_SETTING_DISABLED: | 206 | case ENCLOSURE_SETTING_DISABLED: |
186 | /* zero is disabled */ | 207 | desc[3] &= 0xdf; |
187 | break; | 208 | break; |
188 | case ENCLOSURE_SETTING_ENABLED: | 209 | case ENCLOSURE_SETTING_ENABLED: |
189 | desc[3] = 0x20; | 210 | desc[3] |= 0x20; |
190 | break; | 211 | break; |
191 | default: | 212 | default: |
192 | /* SES doesn't do the SGPIO blink settings */ | 213 | /* SES doesn't do the SGPIO blink settings */ |
@@ -220,14 +241,22 @@ static int ses_set_locate(struct enclosure_device *edev, | |||
220 | struct enclosure_component *ecomp, | 241 | struct enclosure_component *ecomp, |
221 | enum enclosure_component_setting val) | 242 | enum enclosure_component_setting val) |
222 | { | 243 | { |
223 | unsigned char desc[4] = {0 }; | 244 | unsigned char desc[4]; |
245 | unsigned char *desc_ptr; | ||
246 | |||
247 | desc_ptr = ses_get_page2_descriptor(edev, ecomp); | ||
248 | |||
249 | if (!desc_ptr) | ||
250 | return -EIO; | ||
251 | |||
252 | init_device_slot_control(desc, ecomp, desc_ptr); | ||
224 | 253 | ||
225 | switch (val) { | 254 | switch (val) { |
226 | case ENCLOSURE_SETTING_DISABLED: | 255 | case ENCLOSURE_SETTING_DISABLED: |
227 | /* zero is disabled */ | 256 | desc[2] &= 0xfd; |
228 | break; | 257 | break; |
229 | case ENCLOSURE_SETTING_ENABLED: | 258 | case ENCLOSURE_SETTING_ENABLED: |
230 | desc[2] = 0x02; | 259 | desc[2] |= 0x02; |
231 | break; | 260 | break; |
232 | default: | 261 | default: |
233 | /* SES doesn't do the SGPIO blink settings */ | 262 | /* SES doesn't do the SGPIO blink settings */ |
@@ -240,15 +269,23 @@ static int ses_set_active(struct enclosure_device *edev, | |||
240 | struct enclosure_component *ecomp, | 269 | struct enclosure_component *ecomp, |
241 | enum enclosure_component_setting val) | 270 | enum enclosure_component_setting val) |
242 | { | 271 | { |
243 | unsigned char desc[4] = {0 }; | 272 | unsigned char desc[4]; |
273 | unsigned char *desc_ptr; | ||
274 | |||
275 | desc_ptr = ses_get_page2_descriptor(edev, ecomp); | ||
276 | |||
277 | if (!desc_ptr) | ||
278 | return -EIO; | ||
279 | |||
280 | init_device_slot_control(desc, ecomp, desc_ptr); | ||
244 | 281 | ||
245 | switch (val) { | 282 | switch (val) { |
246 | case ENCLOSURE_SETTING_DISABLED: | 283 | case ENCLOSURE_SETTING_DISABLED: |
247 | /* zero is disabled */ | 284 | desc[2] &= 0x7f; |
248 | ecomp->active = 0; | 285 | ecomp->active = 0; |
249 | break; | 286 | break; |
250 | case ENCLOSURE_SETTING_ENABLED: | 287 | case ENCLOSURE_SETTING_ENABLED: |
251 | desc[2] = 0x80; | 288 | desc[2] |= 0x80; |
252 | ecomp->active = 1; | 289 | ecomp->active = 1; |
253 | break; | 290 | break; |
254 | default: | 291 | default: |
@@ -258,13 +295,63 @@ static int ses_set_active(struct enclosure_device *edev, | |||
258 | return ses_set_page2_descriptor(edev, ecomp, desc); | 295 | return ses_set_page2_descriptor(edev, ecomp, desc); |
259 | } | 296 | } |
260 | 297 | ||
298 | static int ses_show_id(struct enclosure_device *edev, char *buf) | ||
299 | { | ||
300 | struct ses_device *ses_dev = edev->scratch; | ||
301 | unsigned long long id = get_unaligned_be64(ses_dev->page1+8+4); | ||
302 | |||
303 | return sprintf(buf, "%#llx\n", id); | ||
304 | } | ||
305 | |||
306 | static void ses_get_power_status(struct enclosure_device *edev, | ||
307 | struct enclosure_component *ecomp) | ||
308 | { | ||
309 | unsigned char *desc; | ||
310 | |||
311 | desc = ses_get_page2_descriptor(edev, ecomp); | ||
312 | if (desc) | ||
313 | ecomp->power_status = (desc[3] & 0x10) ? 0 : 1; | ||
314 | } | ||
315 | |||
316 | static int ses_set_power_status(struct enclosure_device *edev, | ||
317 | struct enclosure_component *ecomp, | ||
318 | int val) | ||
319 | { | ||
320 | unsigned char desc[4]; | ||
321 | unsigned char *desc_ptr; | ||
322 | |||
323 | desc_ptr = ses_get_page2_descriptor(edev, ecomp); | ||
324 | |||
325 | if (!desc_ptr) | ||
326 | return -EIO; | ||
327 | |||
328 | init_device_slot_control(desc, ecomp, desc_ptr); | ||
329 | |||
330 | switch (val) { | ||
331 | /* power = 1 is device_off = 0 and vice versa */ | ||
332 | case 0: | ||
333 | desc[3] |= 0x10; | ||
334 | break; | ||
335 | case 1: | ||
336 | desc[3] &= 0xef; | ||
337 | break; | ||
338 | default: | ||
339 | return -EINVAL; | ||
340 | } | ||
341 | ecomp->power_status = val; | ||
342 | return ses_set_page2_descriptor(edev, ecomp, desc); | ||
343 | } | ||
344 | |||
261 | static struct enclosure_component_callbacks ses_enclosure_callbacks = { | 345 | static struct enclosure_component_callbacks ses_enclosure_callbacks = { |
262 | .get_fault = ses_get_fault, | 346 | .get_fault = ses_get_fault, |
263 | .set_fault = ses_set_fault, | 347 | .set_fault = ses_set_fault, |
264 | .get_status = ses_get_status, | 348 | .get_status = ses_get_status, |
265 | .get_locate = ses_get_locate, | 349 | .get_locate = ses_get_locate, |
266 | .set_locate = ses_set_locate, | 350 | .set_locate = ses_set_locate, |
351 | .get_power_status = ses_get_power_status, | ||
352 | .set_power_status = ses_set_power_status, | ||
267 | .set_active = ses_set_active, | 353 | .set_active = ses_set_active, |
354 | .show_id = ses_show_id, | ||
268 | }; | 355 | }; |
269 | 356 | ||
270 | struct ses_host_edev { | 357 | struct ses_host_edev { |
@@ -298,19 +385,26 @@ static void ses_process_descriptor(struct enclosure_component *ecomp, | |||
298 | int invalid = desc[0] & 0x80; | 385 | int invalid = desc[0] & 0x80; |
299 | enum scsi_protocol proto = desc[0] & 0x0f; | 386 | enum scsi_protocol proto = desc[0] & 0x0f; |
300 | u64 addr = 0; | 387 | u64 addr = 0; |
388 | int slot = -1; | ||
301 | struct ses_component *scomp = ecomp->scratch; | 389 | struct ses_component *scomp = ecomp->scratch; |
302 | unsigned char *d; | 390 | unsigned char *d; |
303 | 391 | ||
304 | scomp->desc = desc; | ||
305 | |||
306 | if (invalid) | 392 | if (invalid) |
307 | return; | 393 | return; |
308 | 394 | ||
309 | switch (proto) { | 395 | switch (proto) { |
396 | case SCSI_PROTOCOL_FCP: | ||
397 | if (eip) { | ||
398 | d = desc + 4; | ||
399 | slot = d[3]; | ||
400 | } | ||
401 | break; | ||
310 | case SCSI_PROTOCOL_SAS: | 402 | case SCSI_PROTOCOL_SAS: |
311 | if (eip) | 403 | if (eip) { |
404 | d = desc + 4; | ||
405 | slot = d[3]; | ||
312 | d = desc + 8; | 406 | d = desc + 8; |
313 | else | 407 | } else |
314 | d = desc + 4; | 408 | d = desc + 4; |
315 | /* only take the phy0 addr */ | 409 | /* only take the phy0 addr */ |
316 | addr = (u64)d[12] << 56 | | 410 | addr = (u64)d[12] << 56 | |
@@ -326,6 +420,7 @@ static void ses_process_descriptor(struct enclosure_component *ecomp, | |||
326 | /* FIXME: Need to add more protocols than just SAS */ | 420 | /* FIXME: Need to add more protocols than just SAS */ |
327 | break; | 421 | break; |
328 | } | 422 | } |
423 | ecomp->slot = slot; | ||
329 | scomp->addr = addr; | 424 | scomp->addr = addr; |
330 | } | 425 | } |
331 | 426 | ||
@@ -349,7 +444,8 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev, | |||
349 | if (scomp->addr != efd->addr) | 444 | if (scomp->addr != efd->addr) |
350 | continue; | 445 | continue; |
351 | 446 | ||
352 | enclosure_add_device(edev, i, efd->dev); | 447 | if (enclosure_add_device(edev, i, efd->dev) == 0) |
448 | kobject_uevent(&efd->dev->kobj, KOBJ_CHANGE); | ||
353 | return 1; | 449 | return 1; |
354 | } | 450 | } |
355 | return 0; | 451 | return 0; |
@@ -423,16 +519,24 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
423 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { | 519 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) { |
424 | 520 | ||
425 | if (create) | 521 | if (create) |
426 | ecomp = enclosure_component_register(edev, | 522 | ecomp = enclosure_component_alloc( |
427 | components++, | 523 | edev, |
428 | type_ptr[0], | 524 | components++, |
429 | name); | 525 | type_ptr[0], |
526 | name); | ||
430 | else | 527 | else |
431 | ecomp = &edev->component[components++]; | 528 | ecomp = &edev->component[components++]; |
432 | 529 | ||
433 | if (!IS_ERR(ecomp) && addl_desc_ptr) | 530 | if (!IS_ERR(ecomp)) { |
434 | ses_process_descriptor(ecomp, | 531 | ses_get_power_status(edev, ecomp); |
435 | addl_desc_ptr); | 532 | if (addl_desc_ptr) |
533 | ses_process_descriptor( | ||
534 | ecomp, | ||
535 | addl_desc_ptr); | ||
536 | if (create) | ||
537 | enclosure_component_register( | ||
538 | ecomp); | ||
539 | } | ||
436 | } | 540 | } |
437 | if (desc_ptr) | 541 | if (desc_ptr) |
438 | desc_ptr += len; | 542 | desc_ptr += len; |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index b14f64cb9724..a668c88ea150 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -763,7 +763,7 @@ static int | |||
763 | sg_common_write(Sg_fd * sfp, Sg_request * srp, | 763 | sg_common_write(Sg_fd * sfp, Sg_request * srp, |
764 | unsigned char *cmnd, int timeout, int blocking) | 764 | unsigned char *cmnd, int timeout, int blocking) |
765 | { | 765 | { |
766 | int k, data_dir, at_head; | 766 | int k, at_head; |
767 | Sg_device *sdp = sfp->parentdp; | 767 | Sg_device *sdp = sfp->parentdp; |
768 | sg_io_hdr_t *hp = &srp->header; | 768 | sg_io_hdr_t *hp = &srp->header; |
769 | 769 | ||
@@ -793,21 +793,6 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
793 | return -ENODEV; | 793 | return -ENODEV; |
794 | } | 794 | } |
795 | 795 | ||
796 | switch (hp->dxfer_direction) { | ||
797 | case SG_DXFER_TO_FROM_DEV: | ||
798 | case SG_DXFER_FROM_DEV: | ||
799 | data_dir = DMA_FROM_DEVICE; | ||
800 | break; | ||
801 | case SG_DXFER_TO_DEV: | ||
802 | data_dir = DMA_TO_DEVICE; | ||
803 | break; | ||
804 | case SG_DXFER_UNKNOWN: | ||
805 | data_dir = DMA_BIDIRECTIONAL; | ||
806 | break; | ||
807 | default: | ||
808 | data_dir = DMA_NONE; | ||
809 | break; | ||
810 | } | ||
811 | hp->duration = jiffies_to_msecs(jiffies); | 796 | hp->duration = jiffies_to_msecs(jiffies); |
812 | if (hp->interface_id != '\0' && /* v3 (or later) interface */ | 797 | if (hp->interface_id != '\0' && /* v3 (or later) interface */ |
813 | (SG_FLAG_Q_AT_TAIL & hp->flags)) | 798 | (SG_FLAG_Q_AT_TAIL & hp->flags)) |
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index fb929fac22ba..03054c0e7689 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c | |||
@@ -245,9 +245,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
245 | sr_printk(KERN_INFO, cd, | 245 | sr_printk(KERN_INFO, cd, |
246 | "CDROM not ready. Make sure there " | 246 | "CDROM not ready. Make sure there " |
247 | "is a disc in the drive.\n"); | 247 | "is a disc in the drive.\n"); |
248 | #ifdef DEBUG | ||
249 | scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); | ||
250 | #endif | ||
251 | err = -ENOMEDIUM; | 248 | err = -ENOMEDIUM; |
252 | break; | 249 | break; |
253 | case ILLEGAL_REQUEST: | 250 | case ILLEGAL_REQUEST: |
@@ -256,16 +253,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) | |||
256 | sshdr.ascq == 0x00) | 253 | sshdr.ascq == 0x00) |
257 | /* sense: Invalid command operation code */ | 254 | /* sense: Invalid command operation code */ |
258 | err = -EDRIVE_CANT_DO_THIS; | 255 | err = -EDRIVE_CANT_DO_THIS; |
259 | #ifdef DEBUG | ||
260 | __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE); | ||
261 | scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); | ||
262 | #endif | ||
263 | break; | 256 | break; |
264 | default: | 257 | default: |
265 | sr_printk(KERN_ERR, cd, | ||
266 | "CDROM (ioctl) error, command: "); | ||
267 | __scsi_print_command(cgc->cmd, CDROM_PACKET_SIZE); | ||
268 | scsi_print_sense_hdr(cd->device, cd->cdi.name, &sshdr); | ||
269 | err = -EIO; | 258 | err = -EIO; |
270 | } | 259 | } |
271 | } | 260 | } |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 4cff0ddc2c25..efc6e446b6c8 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/device.h> | 33 | #include <linux/device.h> |
34 | #include <linux/hyperv.h> | 34 | #include <linux/hyperv.h> |
35 | #include <linux/mempool.h> | ||
36 | #include <linux/blkdev.h> | 35 | #include <linux/blkdev.h> |
37 | #include <scsi/scsi.h> | 36 | #include <scsi/scsi.h> |
38 | #include <scsi/scsi_cmnd.h> | 37 | #include <scsi/scsi_cmnd.h> |
@@ -309,14 +308,6 @@ enum storvsc_request_type { | |||
309 | * This is the end of Protocol specific defines. | 308 | * This is the end of Protocol specific defines. |
310 | */ | 309 | */ |
311 | 310 | ||
312 | |||
313 | /* | ||
314 | * We setup a mempool to allocate request structures for this driver | ||
315 | * on a per-lun basis. The following define specifies the number of | ||
316 | * elements in the pool. | ||
317 | */ | ||
318 | |||
319 | #define STORVSC_MIN_BUF_NR 64 | ||
320 | static int storvsc_ringbuffer_size = (20 * PAGE_SIZE); | 311 | static int storvsc_ringbuffer_size = (20 * PAGE_SIZE); |
321 | 312 | ||
322 | module_param(storvsc_ringbuffer_size, int, S_IRUGO); | 313 | module_param(storvsc_ringbuffer_size, int, S_IRUGO); |
@@ -346,7 +337,6 @@ static void storvsc_on_channel_callback(void *context); | |||
346 | #define STORVSC_IDE_MAX_CHANNELS 1 | 337 | #define STORVSC_IDE_MAX_CHANNELS 1 |
347 | 338 | ||
348 | struct storvsc_cmd_request { | 339 | struct storvsc_cmd_request { |
349 | struct list_head entry; | ||
350 | struct scsi_cmnd *cmd; | 340 | struct scsi_cmnd *cmd; |
351 | 341 | ||
352 | unsigned int bounce_sgl_count; | 342 | unsigned int bounce_sgl_count; |
@@ -357,7 +347,6 @@ struct storvsc_cmd_request { | |||
357 | /* Synchronize the request/response if needed */ | 347 | /* Synchronize the request/response if needed */ |
358 | struct completion wait_event; | 348 | struct completion wait_event; |
359 | 349 | ||
360 | unsigned char *sense_buffer; | ||
361 | struct hv_multipage_buffer data_buffer; | 350 | struct hv_multipage_buffer data_buffer; |
362 | struct vstor_packet vstor_packet; | 351 | struct vstor_packet vstor_packet; |
363 | }; | 352 | }; |
@@ -389,11 +378,6 @@ struct storvsc_device { | |||
389 | struct storvsc_cmd_request reset_request; | 378 | struct storvsc_cmd_request reset_request; |
390 | }; | 379 | }; |
391 | 380 | ||
392 | struct stor_mem_pools { | ||
393 | struct kmem_cache *request_pool; | ||
394 | mempool_t *request_mempool; | ||
395 | }; | ||
396 | |||
397 | struct hv_host_device { | 381 | struct hv_host_device { |
398 | struct hv_device *dev; | 382 | struct hv_device *dev; |
399 | unsigned int port; | 383 | unsigned int port; |
@@ -426,21 +410,42 @@ done: | |||
426 | kfree(wrk); | 410 | kfree(wrk); |
427 | } | 411 | } |
428 | 412 | ||
429 | static void storvsc_bus_scan(struct work_struct *work) | 413 | static void storvsc_host_scan(struct work_struct *work) |
430 | { | 414 | { |
431 | struct storvsc_scan_work *wrk; | 415 | struct storvsc_scan_work *wrk; |
432 | int id, order_id; | 416 | struct Scsi_Host *host; |
417 | struct scsi_device *sdev; | ||
418 | unsigned long flags; | ||
433 | 419 | ||
434 | wrk = container_of(work, struct storvsc_scan_work, work); | 420 | wrk = container_of(work, struct storvsc_scan_work, work); |
435 | for (id = 0; id < wrk->host->max_id; ++id) { | 421 | host = wrk->host; |
436 | if (wrk->host->reverse_ordering) | 422 | |
437 | order_id = wrk->host->max_id - id - 1; | 423 | /* |
438 | else | 424 | * Before scanning the host, first check to see if any of the |
439 | order_id = id; | 425 | * currrently known devices have been hot removed. We issue a |
440 | 426 | * "unit ready" command against all currently known devices. | |
441 | scsi_scan_target(&wrk->host->shost_gendev, 0, | 427 | * This I/O will result in an error for devices that have been |
442 | order_id, SCAN_WILD_CARD, 1); | 428 | * removed. As part of handling the I/O error, we remove the device. |
429 | * | ||
430 | * When a LUN is added or removed, the host sends us a signal to | ||
431 | * scan the host. Thus we are forced to discover the LUNs that | ||
432 | * may have been removed this way. | ||
433 | */ | ||
434 | mutex_lock(&host->scan_mutex); | ||
435 | spin_lock_irqsave(host->host_lock, flags); | ||
436 | list_for_each_entry(sdev, &host->__devices, siblings) { | ||
437 | spin_unlock_irqrestore(host->host_lock, flags); | ||
438 | scsi_test_unit_ready(sdev, 1, 1, NULL); | ||
439 | spin_lock_irqsave(host->host_lock, flags); | ||
440 | continue; | ||
443 | } | 441 | } |
442 | spin_unlock_irqrestore(host->host_lock, flags); | ||
443 | mutex_unlock(&host->scan_mutex); | ||
444 | /* | ||
445 | * Now scan the host to discover LUNs that may have been added. | ||
446 | */ | ||
447 | scsi_scan_host(host); | ||
448 | |||
444 | kfree(wrk); | 449 | kfree(wrk); |
445 | } | 450 | } |
446 | 451 | ||
@@ -1070,10 +1075,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) | |||
1070 | { | 1075 | { |
1071 | struct scsi_cmnd *scmnd = cmd_request->cmd; | 1076 | struct scsi_cmnd *scmnd = cmd_request->cmd; |
1072 | struct hv_host_device *host_dev = shost_priv(scmnd->device->host); | 1077 | struct hv_host_device *host_dev = shost_priv(scmnd->device->host); |
1073 | void (*scsi_done_fn)(struct scsi_cmnd *); | ||
1074 | struct scsi_sense_hdr sense_hdr; | 1078 | struct scsi_sense_hdr sense_hdr; |
1075 | struct vmscsi_request *vm_srb; | 1079 | struct vmscsi_request *vm_srb; |
1076 | struct stor_mem_pools *memp = scmnd->device->hostdata; | ||
1077 | struct Scsi_Host *host; | 1080 | struct Scsi_Host *host; |
1078 | struct storvsc_device *stor_dev; | 1081 | struct storvsc_device *stor_dev; |
1079 | struct hv_device *dev = host_dev->dev; | 1082 | struct hv_device *dev = host_dev->dev; |
@@ -1109,14 +1112,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request) | |||
1109 | cmd_request->data_buffer.len - | 1112 | cmd_request->data_buffer.len - |
1110 | vm_srb->data_transfer_length); | 1113 | vm_srb->data_transfer_length); |
1111 | 1114 | ||
1112 | scsi_done_fn = scmnd->scsi_done; | 1115 | scmnd->scsi_done(scmnd); |
1113 | |||
1114 | scmnd->host_scribble = NULL; | ||
1115 | scmnd->scsi_done = NULL; | ||
1116 | |||
1117 | scsi_done_fn(scmnd); | ||
1118 | |||
1119 | mempool_free(cmd_request, memp->request_mempool); | ||
1120 | } | 1116 | } |
1121 | 1117 | ||
1122 | static void storvsc_on_io_completion(struct hv_device *device, | 1118 | static void storvsc_on_io_completion(struct hv_device *device, |
@@ -1160,7 +1156,7 @@ static void storvsc_on_io_completion(struct hv_device *device, | |||
1160 | SRB_STATUS_AUTOSENSE_VALID) { | 1156 | SRB_STATUS_AUTOSENSE_VALID) { |
1161 | /* autosense data available */ | 1157 | /* autosense data available */ |
1162 | 1158 | ||
1163 | memcpy(request->sense_buffer, | 1159 | memcpy(request->cmd->sense_buffer, |
1164 | vstor_packet->vm_srb.sense_data, | 1160 | vstor_packet->vm_srb.sense_data, |
1165 | vstor_packet->vm_srb.sense_info_length); | 1161 | vstor_packet->vm_srb.sense_info_length); |
1166 | 1162 | ||
@@ -1198,7 +1194,7 @@ static void storvsc_on_receive(struct hv_device *device, | |||
1198 | if (!work) | 1194 | if (!work) |
1199 | return; | 1195 | return; |
1200 | 1196 | ||
1201 | INIT_WORK(&work->work, storvsc_bus_scan); | 1197 | INIT_WORK(&work->work, storvsc_host_scan); |
1202 | work->host = stor_device->host; | 1198 | work->host = stor_device->host; |
1203 | schedule_work(&work->work); | 1199 | schedule_work(&work->work); |
1204 | break; | 1200 | break; |
@@ -1378,55 +1374,6 @@ static int storvsc_do_io(struct hv_device *device, | |||
1378 | return ret; | 1374 | return ret; |
1379 | } | 1375 | } |
1380 | 1376 | ||
1381 | static int storvsc_device_alloc(struct scsi_device *sdevice) | ||
1382 | { | ||
1383 | struct stor_mem_pools *memp; | ||
1384 | int number = STORVSC_MIN_BUF_NR; | ||
1385 | |||
1386 | memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL); | ||
1387 | if (!memp) | ||
1388 | return -ENOMEM; | ||
1389 | |||
1390 | memp->request_pool = | ||
1391 | kmem_cache_create(dev_name(&sdevice->sdev_dev), | ||
1392 | sizeof(struct storvsc_cmd_request), 0, | ||
1393 | SLAB_HWCACHE_ALIGN, NULL); | ||
1394 | |||
1395 | if (!memp->request_pool) | ||
1396 | goto err0; | ||
1397 | |||
1398 | memp->request_mempool = mempool_create(number, mempool_alloc_slab, | ||
1399 | mempool_free_slab, | ||
1400 | memp->request_pool); | ||
1401 | |||
1402 | if (!memp->request_mempool) | ||
1403 | goto err1; | ||
1404 | |||
1405 | sdevice->hostdata = memp; | ||
1406 | |||
1407 | return 0; | ||
1408 | |||
1409 | err1: | ||
1410 | kmem_cache_destroy(memp->request_pool); | ||
1411 | |||
1412 | err0: | ||
1413 | kfree(memp); | ||
1414 | return -ENOMEM; | ||
1415 | } | ||
1416 | |||
1417 | static void storvsc_device_destroy(struct scsi_device *sdevice) | ||
1418 | { | ||
1419 | struct stor_mem_pools *memp = sdevice->hostdata; | ||
1420 | |||
1421 | if (!memp) | ||
1422 | return; | ||
1423 | |||
1424 | mempool_destroy(memp->request_mempool); | ||
1425 | kmem_cache_destroy(memp->request_pool); | ||
1426 | kfree(memp); | ||
1427 | sdevice->hostdata = NULL; | ||
1428 | } | ||
1429 | |||
1430 | static int storvsc_device_configure(struct scsi_device *sdevice) | 1377 | static int storvsc_device_configure(struct scsi_device *sdevice) |
1431 | { | 1378 | { |
1432 | scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS); | 1379 | scsi_change_queue_depth(sdevice, STORVSC_MAX_IO_REQUESTS); |
@@ -1447,6 +1394,19 @@ static int storvsc_device_configure(struct scsi_device *sdevice) | |||
1447 | */ | 1394 | */ |
1448 | sdevice->sdev_bflags |= msft_blist_flags; | 1395 | sdevice->sdev_bflags |= msft_blist_flags; |
1449 | 1396 | ||
1397 | /* | ||
1398 | * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 | ||
1399 | * if the device is a MSFT virtual device. | ||
1400 | */ | ||
1401 | if (!strncmp(sdevice->vendor, "Msft", 4)) { | ||
1402 | switch (vmbus_proto_version) { | ||
1403 | case VERSION_WIN8: | ||
1404 | case VERSION_WIN8_1: | ||
1405 | sdevice->scsi_level = SCSI_SPC_3; | ||
1406 | break; | ||
1407 | } | ||
1408 | } | ||
1409 | |||
1450 | return 0; | 1410 | return 0; |
1451 | } | 1411 | } |
1452 | 1412 | ||
@@ -1561,13 +1521,11 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1561 | int ret; | 1521 | int ret; |
1562 | struct hv_host_device *host_dev = shost_priv(host); | 1522 | struct hv_host_device *host_dev = shost_priv(host); |
1563 | struct hv_device *dev = host_dev->dev; | 1523 | struct hv_device *dev = host_dev->dev; |
1564 | struct storvsc_cmd_request *cmd_request; | 1524 | struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd); |
1565 | unsigned int request_size = 0; | ||
1566 | int i; | 1525 | int i; |
1567 | struct scatterlist *sgl; | 1526 | struct scatterlist *sgl; |
1568 | unsigned int sg_count = 0; | 1527 | unsigned int sg_count = 0; |
1569 | struct vmscsi_request *vm_srb; | 1528 | struct vmscsi_request *vm_srb; |
1570 | struct stor_mem_pools *memp = scmnd->device->hostdata; | ||
1571 | 1529 | ||
1572 | if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { | 1530 | if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { |
1573 | /* | 1531 | /* |
@@ -1584,25 +1542,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1584 | } | 1542 | } |
1585 | } | 1543 | } |
1586 | 1544 | ||
1587 | request_size = sizeof(struct storvsc_cmd_request); | ||
1588 | |||
1589 | cmd_request = mempool_alloc(memp->request_mempool, | ||
1590 | GFP_ATOMIC); | ||
1591 | |||
1592 | /* | ||
1593 | * We might be invoked in an interrupt context; hence | ||
1594 | * mempool_alloc() can fail. | ||
1595 | */ | ||
1596 | if (!cmd_request) | ||
1597 | return SCSI_MLQUEUE_DEVICE_BUSY; | ||
1598 | |||
1599 | memset(cmd_request, 0, sizeof(struct storvsc_cmd_request)); | ||
1600 | |||
1601 | /* Setup the cmd request */ | 1545 | /* Setup the cmd request */ |
1602 | cmd_request->cmd = scmnd; | 1546 | cmd_request->cmd = scmnd; |
1603 | 1547 | ||
1604 | scmnd->host_scribble = (unsigned char *)cmd_request; | ||
1605 | |||
1606 | vm_srb = &cmd_request->vstor_packet.vm_srb; | 1548 | vm_srb = &cmd_request->vstor_packet.vm_srb; |
1607 | vm_srb->win8_extension.time_out_value = 60; | 1549 | vm_srb->win8_extension.time_out_value = 60; |
1608 | 1550 | ||
@@ -1637,9 +1579,6 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1637 | 1579 | ||
1638 | memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); | 1580 | memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); |
1639 | 1581 | ||
1640 | cmd_request->sense_buffer = scmnd->sense_buffer; | ||
1641 | |||
1642 | |||
1643 | cmd_request->data_buffer.len = scsi_bufflen(scmnd); | 1582 | cmd_request->data_buffer.len = scsi_bufflen(scmnd); |
1644 | if (scsi_sg_count(scmnd)) { | 1583 | if (scsi_sg_count(scmnd)) { |
1645 | sgl = (struct scatterlist *)scsi_sglist(scmnd); | 1584 | sgl = (struct scatterlist *)scsi_sglist(scmnd); |
@@ -1651,10 +1590,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1651 | create_bounce_buffer(sgl, scsi_sg_count(scmnd), | 1590 | create_bounce_buffer(sgl, scsi_sg_count(scmnd), |
1652 | scsi_bufflen(scmnd), | 1591 | scsi_bufflen(scmnd), |
1653 | vm_srb->data_in); | 1592 | vm_srb->data_in); |
1654 | if (!cmd_request->bounce_sgl) { | 1593 | if (!cmd_request->bounce_sgl) |
1655 | ret = SCSI_MLQUEUE_HOST_BUSY; | 1594 | return SCSI_MLQUEUE_HOST_BUSY; |
1656 | goto queue_error; | ||
1657 | } | ||
1658 | 1595 | ||
1659 | cmd_request->bounce_sgl_count = | 1596 | cmd_request->bounce_sgl_count = |
1660 | ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >> | 1597 | ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >> |
@@ -1692,27 +1629,21 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) | |||
1692 | destroy_bounce_buffer(cmd_request->bounce_sgl, | 1629 | destroy_bounce_buffer(cmd_request->bounce_sgl, |
1693 | cmd_request->bounce_sgl_count); | 1630 | cmd_request->bounce_sgl_count); |
1694 | 1631 | ||
1695 | ret = SCSI_MLQUEUE_DEVICE_BUSY; | 1632 | return SCSI_MLQUEUE_DEVICE_BUSY; |
1696 | goto queue_error; | ||
1697 | } | 1633 | } |
1698 | 1634 | ||
1699 | return 0; | 1635 | return 0; |
1700 | |||
1701 | queue_error: | ||
1702 | mempool_free(cmd_request, memp->request_mempool); | ||
1703 | scmnd->host_scribble = NULL; | ||
1704 | return ret; | ||
1705 | } | 1636 | } |
1706 | 1637 | ||
1707 | static struct scsi_host_template scsi_driver = { | 1638 | static struct scsi_host_template scsi_driver = { |
1708 | .module = THIS_MODULE, | 1639 | .module = THIS_MODULE, |
1709 | .name = "storvsc_host_t", | 1640 | .name = "storvsc_host_t", |
1641 | .cmd_size = sizeof(struct storvsc_cmd_request), | ||
1710 | .bios_param = storvsc_get_chs, | 1642 | .bios_param = storvsc_get_chs, |
1711 | .queuecommand = storvsc_queuecommand, | 1643 | .queuecommand = storvsc_queuecommand, |
1712 | .eh_host_reset_handler = storvsc_host_reset_handler, | 1644 | .eh_host_reset_handler = storvsc_host_reset_handler, |
1645 | .proc_name = "storvsc_host", | ||
1713 | .eh_timed_out = storvsc_eh_timed_out, | 1646 | .eh_timed_out = storvsc_eh_timed_out, |
1714 | .slave_alloc = storvsc_device_alloc, | ||
1715 | .slave_destroy = storvsc_device_destroy, | ||
1716 | .slave_configure = storvsc_device_configure, | 1647 | .slave_configure = storvsc_device_configure, |
1717 | .cmd_per_lun = 255, | 1648 | .cmd_per_lun = 255, |
1718 | .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, | 1649 | .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, |
@@ -1760,6 +1691,9 @@ static int storvsc_probe(struct hv_device *device, | |||
1760 | bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); | 1691 | bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false); |
1761 | int target = 0; | 1692 | int target = 0; |
1762 | struct storvsc_device *stor_device; | 1693 | struct storvsc_device *stor_device; |
1694 | int max_luns_per_target; | ||
1695 | int max_targets; | ||
1696 | int max_channels; | ||
1763 | 1697 | ||
1764 | /* | 1698 | /* |
1765 | * Based on the windows host we are running on, | 1699 | * Based on the windows host we are running on, |
@@ -1773,12 +1707,18 @@ static int storvsc_probe(struct hv_device *device, | |||
1773 | vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); | 1707 | vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); |
1774 | vmstor_current_major = VMSTOR_WIN7_MAJOR; | 1708 | vmstor_current_major = VMSTOR_WIN7_MAJOR; |
1775 | vmstor_current_minor = VMSTOR_WIN7_MINOR; | 1709 | vmstor_current_minor = VMSTOR_WIN7_MINOR; |
1710 | max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET; | ||
1711 | max_targets = STORVSC_IDE_MAX_TARGETS; | ||
1712 | max_channels = STORVSC_IDE_MAX_CHANNELS; | ||
1776 | break; | 1713 | break; |
1777 | default: | 1714 | default: |
1778 | sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; | 1715 | sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; |
1779 | vmscsi_size_delta = 0; | 1716 | vmscsi_size_delta = 0; |
1780 | vmstor_current_major = VMSTOR_WIN8_MAJOR; | 1717 | vmstor_current_major = VMSTOR_WIN8_MAJOR; |
1781 | vmstor_current_minor = VMSTOR_WIN8_MINOR; | 1718 | vmstor_current_minor = VMSTOR_WIN8_MINOR; |
1719 | max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET; | ||
1720 | max_targets = STORVSC_MAX_TARGETS; | ||
1721 | max_channels = STORVSC_MAX_CHANNELS; | ||
1782 | break; | 1722 | break; |
1783 | } | 1723 | } |
1784 | 1724 | ||
@@ -1826,9 +1766,9 @@ static int storvsc_probe(struct hv_device *device, | |||
1826 | break; | 1766 | break; |
1827 | 1767 | ||
1828 | case SCSI_GUID: | 1768 | case SCSI_GUID: |
1829 | host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; | 1769 | host->max_lun = max_luns_per_target; |
1830 | host->max_id = STORVSC_MAX_TARGETS; | 1770 | host->max_id = max_targets; |
1831 | host->max_channel = STORVSC_MAX_CHANNELS - 1; | 1771 | host->max_channel = max_channels - 1; |
1832 | break; | 1772 | break; |
1833 | 1773 | ||
1834 | default: | 1774 | default: |
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 6e07b2afddeb..8a1f4b355416 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig | |||
@@ -70,3 +70,16 @@ config SCSI_UFSHCD_PLATFORM | |||
70 | If you have a controller with this interface, say Y or M here. | 70 | If you have a controller with this interface, say Y or M here. |
71 | 71 | ||
72 | If unsure, say N. | 72 | If unsure, say N. |
73 | |||
74 | config SCSI_UFS_QCOM | ||
75 | bool "QCOM specific hooks to UFS controller platform driver" | ||
76 | depends on SCSI_UFSHCD_PLATFORM && ARCH_MSM | ||
77 | select PHY_QCOM_UFS | ||
78 | help | ||
79 | This selects the QCOM specific additions to UFSHCD platform driver. | ||
80 | UFS host on QCOM needs some vendor specific configuration before | ||
81 | accessing the hardware which includes PHY configuration and vendor | ||
82 | specific registers. | ||
83 | |||
84 | Select this if you have UFS controller on QCOM chipset. | ||
85 | If unsure, say N. | ||
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 1e5bd48457d6..8303bcce7a23 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | # UFSHCD makefile | 1 | # UFSHCD makefile |
2 | obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o | ||
2 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o | 3 | obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o |
3 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o | 4 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o |
4 | obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o | 5 | obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o |
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c new file mode 100644 index 000000000000..9217af9bf734 --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom.c | |||
@@ -0,0 +1,1004 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/time.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/phy/phy.h> | ||
19 | |||
20 | #include <linux/phy/phy-qcom-ufs.h> | ||
21 | #include "ufshcd.h" | ||
22 | #include "unipro.h" | ||
23 | #include "ufs-qcom.h" | ||
24 | #include "ufshci.h" | ||
25 | |||
26 | static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; | ||
27 | |||
28 | static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result); | ||
29 | static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, | ||
30 | const char *speed_mode); | ||
31 | static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); | ||
32 | |||
33 | static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) | ||
34 | { | ||
35 | int err = 0; | ||
36 | |||
37 | err = ufshcd_dme_get(hba, | ||
38 | UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes); | ||
39 | if (err) | ||
40 | dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n", | ||
41 | __func__, err); | ||
42 | |||
43 | return err; | ||
44 | } | ||
45 | |||
46 | static int ufs_qcom_host_clk_get(struct device *dev, | ||
47 | const char *name, struct clk **clk_out) | ||
48 | { | ||
49 | struct clk *clk; | ||
50 | int err = 0; | ||
51 | |||
52 | clk = devm_clk_get(dev, name); | ||
53 | if (IS_ERR(clk)) { | ||
54 | err = PTR_ERR(clk); | ||
55 | dev_err(dev, "%s: failed to get %s err %d", | ||
56 | __func__, name, err); | ||
57 | } else { | ||
58 | *clk_out = clk; | ||
59 | } | ||
60 | |||
61 | return err; | ||
62 | } | ||
63 | |||
64 | static int ufs_qcom_host_clk_enable(struct device *dev, | ||
65 | const char *name, struct clk *clk) | ||
66 | { | ||
67 | int err = 0; | ||
68 | |||
69 | err = clk_prepare_enable(clk); | ||
70 | if (err) | ||
71 | dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err); | ||
72 | |||
73 | return err; | ||
74 | } | ||
75 | |||
76 | static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) | ||
77 | { | ||
78 | if (!host->is_lane_clks_enabled) | ||
79 | return; | ||
80 | |||
81 | clk_disable_unprepare(host->tx_l1_sync_clk); | ||
82 | clk_disable_unprepare(host->tx_l0_sync_clk); | ||
83 | clk_disable_unprepare(host->rx_l1_sync_clk); | ||
84 | clk_disable_unprepare(host->rx_l0_sync_clk); | ||
85 | |||
86 | host->is_lane_clks_enabled = false; | ||
87 | } | ||
88 | |||
89 | static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host) | ||
90 | { | ||
91 | int err = 0; | ||
92 | struct device *dev = host->hba->dev; | ||
93 | |||
94 | if (host->is_lane_clks_enabled) | ||
95 | return 0; | ||
96 | |||
97 | err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk", | ||
98 | host->rx_l0_sync_clk); | ||
99 | if (err) | ||
100 | goto out; | ||
101 | |||
102 | err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk", | ||
103 | host->tx_l0_sync_clk); | ||
104 | if (err) | ||
105 | goto disable_rx_l0; | ||
106 | |||
107 | err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", | ||
108 | host->rx_l1_sync_clk); | ||
109 | if (err) | ||
110 | goto disable_tx_l0; | ||
111 | |||
112 | err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", | ||
113 | host->tx_l1_sync_clk); | ||
114 | if (err) | ||
115 | goto disable_rx_l1; | ||
116 | |||
117 | host->is_lane_clks_enabled = true; | ||
118 | goto out; | ||
119 | |||
120 | disable_rx_l1: | ||
121 | clk_disable_unprepare(host->rx_l1_sync_clk); | ||
122 | disable_tx_l0: | ||
123 | clk_disable_unprepare(host->tx_l0_sync_clk); | ||
124 | disable_rx_l0: | ||
125 | clk_disable_unprepare(host->rx_l0_sync_clk); | ||
126 | out: | ||
127 | return err; | ||
128 | } | ||
129 | |||
130 | static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host) | ||
131 | { | ||
132 | int err = 0; | ||
133 | struct device *dev = host->hba->dev; | ||
134 | |||
135 | err = ufs_qcom_host_clk_get(dev, | ||
136 | "rx_lane0_sync_clk", &host->rx_l0_sync_clk); | ||
137 | if (err) | ||
138 | goto out; | ||
139 | |||
140 | err = ufs_qcom_host_clk_get(dev, | ||
141 | "tx_lane0_sync_clk", &host->tx_l0_sync_clk); | ||
142 | if (err) | ||
143 | goto out; | ||
144 | |||
145 | err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", | ||
146 | &host->rx_l1_sync_clk); | ||
147 | if (err) | ||
148 | goto out; | ||
149 | |||
150 | err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", | ||
151 | &host->tx_l1_sync_clk); | ||
152 | out: | ||
153 | return err; | ||
154 | } | ||
155 | |||
156 | static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba) | ||
157 | { | ||
158 | struct ufs_qcom_host *host = hba->priv; | ||
159 | struct phy *phy = host->generic_phy; | ||
160 | u32 tx_lanes; | ||
161 | int err = 0; | ||
162 | |||
163 | err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes); | ||
164 | if (err) | ||
165 | goto out; | ||
166 | |||
167 | err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes); | ||
168 | if (err) | ||
169 | dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n", | ||
170 | __func__); | ||
171 | |||
172 | out: | ||
173 | return err; | ||
174 | } | ||
175 | |||
176 | static int ufs_qcom_check_hibern8(struct ufs_hba *hba) | ||
177 | { | ||
178 | int err; | ||
179 | u32 tx_fsm_val = 0; | ||
180 | unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); | ||
181 | |||
182 | do { | ||
183 | err = ufshcd_dme_get(hba, | ||
184 | UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); | ||
185 | if (err || tx_fsm_val == TX_FSM_HIBERN8) | ||
186 | break; | ||
187 | |||
188 | /* sleep for max. 200us */ | ||
189 | usleep_range(100, 200); | ||
190 | } while (time_before(jiffies, timeout)); | ||
191 | |||
192 | /* | ||
193 | * we might have scheduled out for long during polling so | ||
194 | * check the state again. | ||
195 | */ | ||
196 | if (time_after(jiffies, timeout)) | ||
197 | err = ufshcd_dme_get(hba, | ||
198 | UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); | ||
199 | |||
200 | if (err) { | ||
201 | dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", | ||
202 | __func__, err); | ||
203 | } else if (tx_fsm_val != TX_FSM_HIBERN8) { | ||
204 | err = tx_fsm_val; | ||
205 | dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n", | ||
206 | __func__, err); | ||
207 | } | ||
208 | |||
209 | return err; | ||
210 | } | ||
211 | |||
212 | static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) | ||
213 | { | ||
214 | struct ufs_qcom_host *host = hba->priv; | ||
215 | struct phy *phy = host->generic_phy; | ||
216 | int ret = 0; | ||
217 | u8 major; | ||
218 | u16 minor, step; | ||
219 | bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) | ||
220 | ? true : false; | ||
221 | |||
222 | /* Assert PHY reset and apply PHY calibration values */ | ||
223 | ufs_qcom_assert_reset(hba); | ||
224 | /* provide 1ms delay to let the reset pulse propagate */ | ||
225 | usleep_range(1000, 1100); | ||
226 | |||
227 | ufs_qcom_get_controller_revision(hba, &major, &minor, &step); | ||
228 | ufs_qcom_phy_save_controller_version(phy, major, minor, step); | ||
229 | ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); | ||
230 | if (ret) { | ||
231 | dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", | ||
232 | __func__, ret); | ||
233 | goto out; | ||
234 | } | ||
235 | |||
236 | /* De-assert PHY reset and start serdes */ | ||
237 | ufs_qcom_deassert_reset(hba); | ||
238 | |||
239 | /* | ||
240 | * after reset deassertion, phy will need all ref clocks, | ||
241 | * voltage, current to settle down before starting serdes. | ||
242 | */ | ||
243 | usleep_range(1000, 1100); | ||
244 | ret = ufs_qcom_phy_start_serdes(phy); | ||
245 | if (ret) { | ||
246 | dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n", | ||
247 | __func__, ret); | ||
248 | goto out; | ||
249 | } | ||
250 | |||
251 | ret = ufs_qcom_phy_is_pcs_ready(phy); | ||
252 | if (ret) | ||
253 | dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", | ||
254 | __func__, ret); | ||
255 | |||
256 | out: | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | /* | ||
261 | * The UTP controller has a number of internal clock gating cells (CGCs). | ||
262 | * Internal hardware sub-modules within the UTP controller control the CGCs. | ||
263 | * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved | ||
264 | * in a specific operation, UTP controller CGCs are by default disabled and | ||
265 | * this function enables them (after every UFS link startup) to save some power | ||
266 | * leakage. | ||
267 | */ | ||
268 | static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) | ||
269 | { | ||
270 | ufshcd_writel(hba, | ||
271 | ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL, | ||
272 | REG_UFS_CFG2); | ||
273 | |||
274 | /* Ensure that HW clock gating is enabled before next operations */ | ||
275 | mb(); | ||
276 | } | ||
277 | |||
278 | static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status) | ||
279 | { | ||
280 | struct ufs_qcom_host *host = hba->priv; | ||
281 | int err = 0; | ||
282 | |||
283 | switch (status) { | ||
284 | case PRE_CHANGE: | ||
285 | ufs_qcom_power_up_sequence(hba); | ||
286 | /* | ||
287 | * The PHY PLL output is the source of tx/rx lane symbol | ||
288 | * clocks, hence, enable the lane clocks only after PHY | ||
289 | * is initialized. | ||
290 | */ | ||
291 | err = ufs_qcom_enable_lane_clks(host); | ||
292 | break; | ||
293 | case POST_CHANGE: | ||
294 | /* check if UFS PHY moved from DISABLED to HIBERN8 */ | ||
295 | err = ufs_qcom_check_hibern8(hba); | ||
296 | ufs_qcom_enable_hw_clk_gating(hba); | ||
297 | |||
298 | break; | ||
299 | default: | ||
300 | dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); | ||
301 | err = -EINVAL; | ||
302 | break; | ||
303 | } | ||
304 | return err; | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * Returns non-zero for success (which rate of core_clk) and 0 | ||
309 | * in case of a failure | ||
310 | */ | ||
311 | static unsigned long | ||
312 | ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) | ||
313 | { | ||
314 | struct ufs_clk_info *clki; | ||
315 | u32 core_clk_period_in_ns; | ||
316 | u32 tx_clk_cycles_per_us = 0; | ||
317 | unsigned long core_clk_rate = 0; | ||
318 | u32 core_clk_cycles_per_us = 0; | ||
319 | |||
320 | static u32 pwm_fr_table[][2] = { | ||
321 | {UFS_PWM_G1, 0x1}, | ||
322 | {UFS_PWM_G2, 0x1}, | ||
323 | {UFS_PWM_G3, 0x1}, | ||
324 | {UFS_PWM_G4, 0x1}, | ||
325 | }; | ||
326 | |||
327 | static u32 hs_fr_table_rA[][2] = { | ||
328 | {UFS_HS_G1, 0x1F}, | ||
329 | {UFS_HS_G2, 0x3e}, | ||
330 | }; | ||
331 | |||
332 | static u32 hs_fr_table_rB[][2] = { | ||
333 | {UFS_HS_G1, 0x24}, | ||
334 | {UFS_HS_G2, 0x49}, | ||
335 | }; | ||
336 | |||
337 | if (gear == 0) { | ||
338 | dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); | ||
339 | goto out_error; | ||
340 | } | ||
341 | |||
342 | list_for_each_entry(clki, &hba->clk_list_head, list) { | ||
343 | if (!strcmp(clki->name, "core_clk")) | ||
344 | core_clk_rate = clk_get_rate(clki->clk); | ||
345 | } | ||
346 | |||
347 | /* If frequency is smaller than 1MHz, set to 1MHz */ | ||
348 | if (core_clk_rate < DEFAULT_CLK_RATE_HZ) | ||
349 | core_clk_rate = DEFAULT_CLK_RATE_HZ; | ||
350 | |||
351 | core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; | ||
352 | ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); | ||
353 | |||
354 | core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; | ||
355 | core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; | ||
356 | core_clk_period_in_ns &= MASK_CLK_NS_REG; | ||
357 | |||
358 | switch (hs) { | ||
359 | case FASTAUTO_MODE: | ||
360 | case FAST_MODE: | ||
361 | if (rate == PA_HS_MODE_A) { | ||
362 | if (gear > ARRAY_SIZE(hs_fr_table_rA)) { | ||
363 | dev_err(hba->dev, | ||
364 | "%s: index %d exceeds table size %zu\n", | ||
365 | __func__, gear, | ||
366 | ARRAY_SIZE(hs_fr_table_rA)); | ||
367 | goto out_error; | ||
368 | } | ||
369 | tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1]; | ||
370 | } else if (rate == PA_HS_MODE_B) { | ||
371 | if (gear > ARRAY_SIZE(hs_fr_table_rB)) { | ||
372 | dev_err(hba->dev, | ||
373 | "%s: index %d exceeds table size %zu\n", | ||
374 | __func__, gear, | ||
375 | ARRAY_SIZE(hs_fr_table_rB)); | ||
376 | goto out_error; | ||
377 | } | ||
378 | tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1]; | ||
379 | } else { | ||
380 | dev_err(hba->dev, "%s: invalid rate = %d\n", | ||
381 | __func__, rate); | ||
382 | goto out_error; | ||
383 | } | ||
384 | break; | ||
385 | case SLOWAUTO_MODE: | ||
386 | case SLOW_MODE: | ||
387 | if (gear > ARRAY_SIZE(pwm_fr_table)) { | ||
388 | dev_err(hba->dev, | ||
389 | "%s: index %d exceeds table size %zu\n", | ||
390 | __func__, gear, | ||
391 | ARRAY_SIZE(pwm_fr_table)); | ||
392 | goto out_error; | ||
393 | } | ||
394 | tx_clk_cycles_per_us = pwm_fr_table[gear-1][1]; | ||
395 | break; | ||
396 | case UNCHANGED: | ||
397 | default: | ||
398 | dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs); | ||
399 | goto out_error; | ||
400 | } | ||
401 | |||
402 | /* this register 2 fields shall be written at once */ | ||
403 | ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, | ||
404 | REG_UFS_TX_SYMBOL_CLK_NS_US); | ||
405 | goto out; | ||
406 | |||
407 | out_error: | ||
408 | core_clk_rate = 0; | ||
409 | out: | ||
410 | return core_clk_rate; | ||
411 | } | ||
412 | |||
413 | static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status) | ||
414 | { | ||
415 | unsigned long core_clk_rate = 0; | ||
416 | u32 core_clk_cycles_per_100ms; | ||
417 | |||
418 | switch (status) { | ||
419 | case PRE_CHANGE: | ||
420 | core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1, | ||
421 | SLOWAUTO_MODE, 0); | ||
422 | if (!core_clk_rate) { | ||
423 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", | ||
424 | __func__); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | core_clk_cycles_per_100ms = | ||
428 | (core_clk_rate / MSEC_PER_SEC) * 100; | ||
429 | ufshcd_writel(hba, core_clk_cycles_per_100ms, | ||
430 | REG_UFS_PA_LINK_STARTUP_TIMER); | ||
431 | break; | ||
432 | case POST_CHANGE: | ||
433 | ufs_qcom_link_startup_post_change(hba); | ||
434 | break; | ||
435 | default: | ||
436 | break; | ||
437 | } | ||
438 | |||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) | ||
443 | { | ||
444 | struct ufs_qcom_host *host = hba->priv; | ||
445 | struct phy *phy = host->generic_phy; | ||
446 | int ret = 0; | ||
447 | |||
448 | if (ufs_qcom_is_link_off(hba)) { | ||
449 | /* | ||
450 | * Disable the tx/rx lane symbol clocks before PHY is | ||
451 | * powered down as the PLL source should be disabled | ||
452 | * after downstream clocks are disabled. | ||
453 | */ | ||
454 | ufs_qcom_disable_lane_clks(host); | ||
455 | phy_power_off(phy); | ||
456 | |||
457 | /* Assert PHY soft reset */ | ||
458 | ufs_qcom_assert_reset(hba); | ||
459 | goto out; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * If UniPro link is not active, PHY ref_clk, main PHY analog power | ||
464 | * rail and low noise analog power rail for PLL can be switched off. | ||
465 | */ | ||
466 | if (!ufs_qcom_is_link_active(hba)) | ||
467 | phy_power_off(phy); | ||
468 | |||
469 | out: | ||
470 | return ret; | ||
471 | } | ||
472 | |||
473 | static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) | ||
474 | { | ||
475 | struct ufs_qcom_host *host = hba->priv; | ||
476 | struct phy *phy = host->generic_phy; | ||
477 | int err; | ||
478 | |||
479 | err = phy_power_on(phy); | ||
480 | if (err) { | ||
481 | dev_err(hba->dev, "%s: failed enabling regs, err = %d\n", | ||
482 | __func__, err); | ||
483 | goto out; | ||
484 | } | ||
485 | |||
486 | hba->is_sys_suspended = false; | ||
487 | |||
488 | out: | ||
489 | return err; | ||
490 | } | ||
491 | |||
492 | struct ufs_qcom_dev_params { | ||
493 | u32 pwm_rx_gear; /* pwm rx gear to work in */ | ||
494 | u32 pwm_tx_gear; /* pwm tx gear to work in */ | ||
495 | u32 hs_rx_gear; /* hs rx gear to work in */ | ||
496 | u32 hs_tx_gear; /* hs tx gear to work in */ | ||
497 | u32 rx_lanes; /* number of rx lanes */ | ||
498 | u32 tx_lanes; /* number of tx lanes */ | ||
499 | u32 rx_pwr_pwm; /* rx pwm working pwr */ | ||
500 | u32 tx_pwr_pwm; /* tx pwm working pwr */ | ||
501 | u32 rx_pwr_hs; /* rx hs working pwr */ | ||
502 | u32 tx_pwr_hs; /* tx hs working pwr */ | ||
503 | u32 hs_rate; /* rate A/B to work in HS */ | ||
504 | u32 desired_working_mode; | ||
505 | }; | ||
506 | |||
507 | static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param, | ||
508 | struct ufs_pa_layer_attr *dev_max, | ||
509 | struct ufs_pa_layer_attr *agreed_pwr) | ||
510 | { | ||
511 | int min_qcom_gear; | ||
512 | int min_dev_gear; | ||
513 | bool is_dev_sup_hs = false; | ||
514 | bool is_qcom_max_hs = false; | ||
515 | |||
516 | if (dev_max->pwr_rx == FAST_MODE) | ||
517 | is_dev_sup_hs = true; | ||
518 | |||
519 | if (qcom_param->desired_working_mode == FAST) { | ||
520 | is_qcom_max_hs = true; | ||
521 | min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear, | ||
522 | qcom_param->hs_tx_gear); | ||
523 | } else { | ||
524 | min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear, | ||
525 | qcom_param->pwm_tx_gear); | ||
526 | } | ||
527 | |||
528 | /* | ||
529 | * device doesn't support HS but qcom_param->desired_working_mode is | ||
530 | * HS, thus device and qcom_param don't agree | ||
531 | */ | ||
532 | if (!is_dev_sup_hs && is_qcom_max_hs) { | ||
533 | pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n", | ||
534 | __func__); | ||
535 | return -ENOTSUPP; | ||
536 | } else if (is_dev_sup_hs && is_qcom_max_hs) { | ||
537 | /* | ||
538 | * since device supports HS, it supports FAST_MODE. | ||
539 | * since qcom_param->desired_working_mode is also HS | ||
540 | * then final decision (FAST/FASTAUTO) is done according | ||
541 | * to qcom_params as it is the restricting factor | ||
542 | */ | ||
543 | agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = | ||
544 | qcom_param->rx_pwr_hs; | ||
545 | } else { | ||
546 | /* | ||
547 | * here qcom_param->desired_working_mode is PWM. | ||
548 | * it doesn't matter whether device supports HS or PWM, | ||
549 | * in both cases qcom_param->desired_working_mode will | ||
550 | * determine the mode | ||
551 | */ | ||
552 | agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = | ||
553 | qcom_param->rx_pwr_pwm; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * we would like tx to work in the minimum number of lanes | ||
558 | * between device capability and vendor preferences. | ||
559 | * the same decision will be made for rx | ||
560 | */ | ||
561 | agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx, | ||
562 | qcom_param->tx_lanes); | ||
563 | agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx, | ||
564 | qcom_param->rx_lanes); | ||
565 | |||
566 | /* device maximum gear is the minimum between device rx and tx gears */ | ||
567 | min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); | ||
568 | |||
569 | /* | ||
570 | * if both device capabilities and vendor pre-defined preferences are | ||
571 | * both HS or both PWM then set the minimum gear to be the chosen | ||
572 | * working gear. | ||
573 | * if one is PWM and one is HS then the one that is PWM get to decide | ||
574 | * what is the gear, as it is the one that also decided previously what | ||
575 | * pwr the device will be configured to. | ||
576 | */ | ||
577 | if ((is_dev_sup_hs && is_qcom_max_hs) || | ||
578 | (!is_dev_sup_hs && !is_qcom_max_hs)) | ||
579 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = | ||
580 | min_t(u32, min_dev_gear, min_qcom_gear); | ||
581 | else if (!is_dev_sup_hs) | ||
582 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear; | ||
583 | else | ||
584 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear; | ||
585 | |||
586 | agreed_pwr->hs_rate = qcom_param->hs_rate; | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) | ||
591 | { | ||
592 | int vote; | ||
593 | int err = 0; | ||
594 | char mode[BUS_VECTOR_NAME_LEN]; | ||
595 | |||
596 | ufs_qcom_get_speed_mode(&host->dev_req_params, mode); | ||
597 | |||
598 | vote = ufs_qcom_get_bus_vote(host, mode); | ||
599 | if (vote >= 0) | ||
600 | err = ufs_qcom_set_bus_vote(host, vote); | ||
601 | else | ||
602 | err = vote; | ||
603 | |||
604 | if (err) | ||
605 | dev_err(host->hba->dev, "%s: failed %d\n", __func__, err); | ||
606 | else | ||
607 | host->bus_vote.saved_vote = vote; | ||
608 | return err; | ||
609 | } | ||
610 | |||
611 | static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, | ||
612 | bool status, | ||
613 | struct ufs_pa_layer_attr *dev_max_params, | ||
614 | struct ufs_pa_layer_attr *dev_req_params) | ||
615 | { | ||
616 | u32 val; | ||
617 | struct ufs_qcom_host *host = hba->priv; | ||
618 | struct phy *phy = host->generic_phy; | ||
619 | struct ufs_qcom_dev_params ufs_qcom_cap; | ||
620 | int ret = 0; | ||
621 | int res = 0; | ||
622 | |||
623 | if (!dev_req_params) { | ||
624 | pr_err("%s: incoming dev_req_params is NULL\n", __func__); | ||
625 | ret = -EINVAL; | ||
626 | goto out; | ||
627 | } | ||
628 | |||
629 | switch (status) { | ||
630 | case PRE_CHANGE: | ||
631 | ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX; | ||
632 | ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX; | ||
633 | ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX; | ||
634 | ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX; | ||
635 | ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX; | ||
636 | ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX; | ||
637 | ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM; | ||
638 | ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM; | ||
639 | ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS; | ||
640 | ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS; | ||
641 | ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE; | ||
642 | ufs_qcom_cap.desired_working_mode = | ||
643 | UFS_QCOM_LIMIT_DESIRED_MODE; | ||
644 | |||
645 | ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap, | ||
646 | dev_max_params, | ||
647 | dev_req_params); | ||
648 | if (ret) { | ||
649 | pr_err("%s: failed to determine capabilities\n", | ||
650 | __func__); | ||
651 | goto out; | ||
652 | } | ||
653 | |||
654 | break; | ||
655 | case POST_CHANGE: | ||
656 | if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, | ||
657 | dev_req_params->pwr_rx, | ||
658 | dev_req_params->hs_rate)) { | ||
659 | dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", | ||
660 | __func__); | ||
661 | /* | ||
662 | * we return error code at the end of the routine, | ||
663 | * but continue to configure UFS_PHY_TX_LANE_ENABLE | ||
664 | * and bus voting as usual | ||
665 | */ | ||
666 | ret = -EINVAL; | ||
667 | } | ||
668 | |||
669 | val = ~(MAX_U32 << dev_req_params->lane_tx); | ||
670 | res = ufs_qcom_phy_set_tx_lane_enable(phy, val); | ||
671 | if (res) { | ||
672 | dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n", | ||
673 | __func__, res); | ||
674 | ret = res; | ||
675 | } | ||
676 | |||
677 | /* cache the power mode parameters to use internally */ | ||
678 | memcpy(&host->dev_req_params, | ||
679 | dev_req_params, sizeof(*dev_req_params)); | ||
680 | ufs_qcom_update_bus_bw_vote(host); | ||
681 | break; | ||
682 | default: | ||
683 | ret = -EINVAL; | ||
684 | break; | ||
685 | } | ||
686 | out: | ||
687 | return ret; | ||
688 | } | ||
689 | |||
690 | /** | ||
691 | * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks | ||
692 | * @hba: host controller instance | ||
693 | * | ||
694 | * QCOM UFS host controller might have some non standard behaviours (quirks) | ||
695 | * than what is specified by UFSHCI specification. Advertise all such | ||
696 | * quirks to standard UFS host controller driver so standard takes them into | ||
697 | * account. | ||
698 | */ | ||
699 | static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) | ||
700 | { | ||
701 | u8 major; | ||
702 | u16 minor, step; | ||
703 | |||
704 | ufs_qcom_get_controller_revision(hba, &major, &minor, &step); | ||
705 | |||
706 | /* | ||
707 | * TBD | ||
708 | * here we should be advertising controller quirks according to | ||
709 | * controller version. | ||
710 | */ | ||
711 | } | ||
712 | |||
713 | static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host, | ||
714 | const char *speed_mode) | ||
715 | { | ||
716 | struct device *dev = host->hba->dev; | ||
717 | struct device_node *np = dev->of_node; | ||
718 | int err; | ||
719 | const char *key = "qcom,bus-vector-names"; | ||
720 | |||
721 | if (!speed_mode) { | ||
722 | err = -EINVAL; | ||
723 | goto out; | ||
724 | } | ||
725 | |||
726 | if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN")) | ||
727 | err = of_property_match_string(np, key, "MAX"); | ||
728 | else | ||
729 | err = of_property_match_string(np, key, speed_mode); | ||
730 | |||
731 | out: | ||
732 | if (err < 0) | ||
733 | dev_err(dev, "%s: Invalid %s mode %d\n", | ||
734 | __func__, speed_mode, err); | ||
735 | return err; | ||
736 | } | ||
737 | |||
738 | static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote) | ||
739 | { | ||
740 | int err = 0; | ||
741 | |||
742 | if (vote != host->bus_vote.curr_vote) | ||
743 | host->bus_vote.curr_vote = vote; | ||
744 | |||
745 | return err; | ||
746 | } | ||
747 | |||
748 | static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result) | ||
749 | { | ||
750 | int gear = max_t(u32, p->gear_rx, p->gear_tx); | ||
751 | int lanes = max_t(u32, p->lane_rx, p->lane_tx); | ||
752 | int pwr; | ||
753 | |||
754 | /* default to PWM Gear 1, Lane 1 if power mode is not initialized */ | ||
755 | if (!gear) | ||
756 | gear = 1; | ||
757 | |||
758 | if (!lanes) | ||
759 | lanes = 1; | ||
760 | |||
761 | if (!p->pwr_rx && !p->pwr_tx) { | ||
762 | pwr = SLOWAUTO_MODE; | ||
763 | snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); | ||
764 | } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE || | ||
765 | p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) { | ||
766 | pwr = FAST_MODE; | ||
767 | snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS", | ||
768 | p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes); | ||
769 | } else { | ||
770 | pwr = SLOW_MODE; | ||
771 | snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d", | ||
772 | "PWM", gear, lanes); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) | ||
777 | { | ||
778 | struct ufs_qcom_host *host = hba->priv; | ||
779 | int err = 0; | ||
780 | int vote = 0; | ||
781 | |||
782 | /* | ||
783 | * In case ufs_qcom_init() is not yet done, simply ignore. | ||
784 | * This ufs_qcom_setup_clocks() shall be called from | ||
785 | * ufs_qcom_init() after init is done. | ||
786 | */ | ||
787 | if (!host) | ||
788 | return 0; | ||
789 | |||
790 | if (on) { | ||
791 | err = ufs_qcom_phy_enable_iface_clk(host->generic_phy); | ||
792 | if (err) | ||
793 | goto out; | ||
794 | |||
795 | err = ufs_qcom_phy_enable_ref_clk(host->generic_phy); | ||
796 | if (err) { | ||
797 | dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n", | ||
798 | __func__, err); | ||
799 | ufs_qcom_phy_disable_iface_clk(host->generic_phy); | ||
800 | goto out; | ||
801 | } | ||
802 | /* enable the device ref clock */ | ||
803 | ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy); | ||
804 | vote = host->bus_vote.saved_vote; | ||
805 | if (vote == host->bus_vote.min_bw_vote) | ||
806 | ufs_qcom_update_bus_bw_vote(host); | ||
807 | } else { | ||
808 | /* M-PHY RMMI interface clocks can be turned off */ | ||
809 | ufs_qcom_phy_disable_iface_clk(host->generic_phy); | ||
810 | if (!ufs_qcom_is_link_active(hba)) { | ||
811 | /* turn off UFS local PHY ref_clk */ | ||
812 | ufs_qcom_phy_disable_ref_clk(host->generic_phy); | ||
813 | /* disable device ref_clk */ | ||
814 | ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy); | ||
815 | } | ||
816 | vote = host->bus_vote.min_bw_vote; | ||
817 | } | ||
818 | |||
819 | err = ufs_qcom_set_bus_vote(host, vote); | ||
820 | if (err) | ||
821 | dev_err(hba->dev, "%s: set bus vote failed %d\n", | ||
822 | __func__, err); | ||
823 | |||
824 | out: | ||
825 | return err; | ||
826 | } | ||
827 | |||
828 | static ssize_t | ||
829 | show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, | ||
830 | char *buf) | ||
831 | { | ||
832 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
833 | struct ufs_qcom_host *host = hba->priv; | ||
834 | |||
835 | return snprintf(buf, PAGE_SIZE, "%u\n", | ||
836 | host->bus_vote.is_max_bw_needed); | ||
837 | } | ||
838 | |||
839 | static ssize_t | ||
840 | store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr, | ||
841 | const char *buf, size_t count) | ||
842 | { | ||
843 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
844 | struct ufs_qcom_host *host = hba->priv; | ||
845 | uint32_t value; | ||
846 | |||
847 | if (!kstrtou32(buf, 0, &value)) { | ||
848 | host->bus_vote.is_max_bw_needed = !!value; | ||
849 | ufs_qcom_update_bus_bw_vote(host); | ||
850 | } | ||
851 | |||
852 | return count; | ||
853 | } | ||
854 | |||
855 | static int ufs_qcom_bus_register(struct ufs_qcom_host *host) | ||
856 | { | ||
857 | int err; | ||
858 | struct device *dev = host->hba->dev; | ||
859 | struct device_node *np = dev->of_node; | ||
860 | |||
861 | err = of_property_count_strings(np, "qcom,bus-vector-names"); | ||
862 | if (err < 0 ) { | ||
863 | dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n", | ||
864 | __func__, err); | ||
865 | goto out; | ||
866 | } | ||
867 | |||
868 | /* cache the vote index for minimum and maximum bandwidth */ | ||
869 | host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN"); | ||
870 | host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX"); | ||
871 | |||
872 | host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw; | ||
873 | host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw; | ||
874 | sysfs_attr_init(&host->bus_vote.max_bus_bw.attr); | ||
875 | host->bus_vote.max_bus_bw.attr.name = "max_bus_bw"; | ||
876 | host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR; | ||
877 | err = device_create_file(dev, &host->bus_vote.max_bus_bw); | ||
878 | out: | ||
879 | return err; | ||
880 | } | ||
881 | |||
882 | #define ANDROID_BOOT_DEV_MAX 30 | ||
883 | static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; | ||
884 | static int get_android_boot_dev(char *str) | ||
885 | { | ||
886 | strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX); | ||
887 | return 1; | ||
888 | } | ||
889 | __setup("androidboot.bootdevice=", get_android_boot_dev); | ||
890 | |||
891 | /** | ||
892 | * ufs_qcom_init - bind phy with controller | ||
893 | * @hba: host controller instance | ||
894 | * | ||
895 | * Binds PHY with controller and powers up PHY enabling clocks | ||
896 | * and regulators. | ||
897 | * | ||
898 | * Returns -EPROBE_DEFER if binding fails, returns negative error | ||
899 | * on phy power up failure and returns zero on success. | ||
900 | */ | ||
901 | static int ufs_qcom_init(struct ufs_hba *hba) | ||
902 | { | ||
903 | int err; | ||
904 | struct device *dev = hba->dev; | ||
905 | struct ufs_qcom_host *host; | ||
906 | |||
907 | if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) | ||
908 | return -ENODEV; | ||
909 | |||
910 | host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); | ||
911 | if (!host) { | ||
912 | err = -ENOMEM; | ||
913 | dev_err(dev, "%s: no memory for qcom ufs host\n", __func__); | ||
914 | goto out; | ||
915 | } | ||
916 | |||
917 | host->hba = hba; | ||
918 | hba->priv = (void *)host; | ||
919 | |||
920 | host->generic_phy = devm_phy_get(dev, "ufsphy"); | ||
921 | |||
922 | if (IS_ERR(host->generic_phy)) { | ||
923 | err = PTR_ERR(host->generic_phy); | ||
924 | dev_err(dev, "%s: PHY get failed %d\n", __func__, err); | ||
925 | goto out; | ||
926 | } | ||
927 | |||
928 | err = ufs_qcom_bus_register(host); | ||
929 | if (err) | ||
930 | goto out_host_free; | ||
931 | |||
932 | phy_init(host->generic_phy); | ||
933 | err = phy_power_on(host->generic_phy); | ||
934 | if (err) | ||
935 | goto out_unregister_bus; | ||
936 | |||
937 | err = ufs_qcom_init_lane_clks(host); | ||
938 | if (err) | ||
939 | goto out_disable_phy; | ||
940 | |||
941 | ufs_qcom_advertise_quirks(hba); | ||
942 | |||
943 | hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING; | ||
944 | hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; | ||
945 | |||
946 | ufs_qcom_setup_clocks(hba, true); | ||
947 | |||
948 | if (hba->dev->id < MAX_UFS_QCOM_HOSTS) | ||
949 | ufs_qcom_hosts[hba->dev->id] = host; | ||
950 | |||
951 | goto out; | ||
952 | |||
953 | out_disable_phy: | ||
954 | phy_power_off(host->generic_phy); | ||
955 | out_unregister_bus: | ||
956 | phy_exit(host->generic_phy); | ||
957 | out_host_free: | ||
958 | devm_kfree(dev, host); | ||
959 | hba->priv = NULL; | ||
960 | out: | ||
961 | return err; | ||
962 | } | ||
963 | |||
964 | static void ufs_qcom_exit(struct ufs_hba *hba) | ||
965 | { | ||
966 | struct ufs_qcom_host *host = hba->priv; | ||
967 | |||
968 | ufs_qcom_disable_lane_clks(host); | ||
969 | phy_power_off(host->generic_phy); | ||
970 | } | ||
971 | |||
972 | static | ||
973 | void ufs_qcom_clk_scale_notify(struct ufs_hba *hba) | ||
974 | { | ||
975 | struct ufs_qcom_host *host = hba->priv; | ||
976 | struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; | ||
977 | |||
978 | if (!dev_req_params) | ||
979 | return; | ||
980 | |||
981 | ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, | ||
982 | dev_req_params->pwr_rx, | ||
983 | dev_req_params->hs_rate); | ||
984 | } | ||
985 | |||
986 | /** | ||
987 | * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations | ||
988 | * | ||
989 | * The variant operations configure the necessary controller and PHY | ||
990 | * handshake during initialization. | ||
991 | */ | ||
992 | static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { | ||
993 | .name = "qcom", | ||
994 | .init = ufs_qcom_init, | ||
995 | .exit = ufs_qcom_exit, | ||
996 | .clk_scale_notify = ufs_qcom_clk_scale_notify, | ||
997 | .setup_clocks = ufs_qcom_setup_clocks, | ||
998 | .hce_enable_notify = ufs_qcom_hce_enable_notify, | ||
999 | .link_startup_notify = ufs_qcom_link_startup_notify, | ||
1000 | .pwr_change_notify = ufs_qcom_pwr_change_notify, | ||
1001 | .suspend = ufs_qcom_suspend, | ||
1002 | .resume = ufs_qcom_resume, | ||
1003 | }; | ||
1004 | EXPORT_SYMBOL(ufs_hba_qcom_vops); | ||
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h new file mode 100644 index 000000000000..9a6febd007df --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom.h | |||
@@ -0,0 +1,170 @@ | |||
1 | /* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef UFS_QCOM_H_ | ||
15 | #define UFS_QCOM_H_ | ||
16 | |||
17 | #define MAX_UFS_QCOM_HOSTS 1 | ||
18 | #define MAX_U32 (~(u32)0) | ||
19 | #define MPHY_TX_FSM_STATE 0x41 | ||
20 | #define TX_FSM_HIBERN8 0x1 | ||
21 | #define HBRN8_POLL_TOUT_MS 100 | ||
22 | #define DEFAULT_CLK_RATE_HZ 1000000 | ||
23 | #define BUS_VECTOR_NAME_LEN 32 | ||
24 | |||
25 | #define UFS_HW_VER_MAJOR_SHFT (28) | ||
26 | #define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT) | ||
27 | #define UFS_HW_VER_MINOR_SHFT (16) | ||
28 | #define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT) | ||
29 | #define UFS_HW_VER_STEP_SHFT (0) | ||
30 | #define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT) | ||
31 | |||
32 | /* vendor specific pre-defined parameters */ | ||
33 | #define SLOW 1 | ||
34 | #define FAST 2 | ||
35 | |||
36 | #define UFS_QCOM_LIMIT_NUM_LANES_RX 2 | ||
37 | #define UFS_QCOM_LIMIT_NUM_LANES_TX 2 | ||
38 | #define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G2 | ||
39 | #define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G2 | ||
40 | #define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4 | ||
41 | #define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4 | ||
42 | #define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE | ||
43 | #define UFS_QCOM_LIMIT_TX_PWR_PWM SLOW_MODE | ||
44 | #define UFS_QCOM_LIMIT_RX_PWR_HS FAST_MODE | ||
45 | #define UFS_QCOM_LIMIT_TX_PWR_HS FAST_MODE | ||
46 | #define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B | ||
47 | #define UFS_QCOM_LIMIT_DESIRED_MODE FAST | ||
48 | |||
49 | /* QCOM UFS host controller vendor specific registers */ | ||
50 | enum { | ||
51 | REG_UFS_SYS1CLK_1US = 0xC0, | ||
52 | REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4, | ||
53 | REG_UFS_LOCAL_PORT_ID_REG = 0xC8, | ||
54 | REG_UFS_PA_ERR_CODE = 0xCC, | ||
55 | REG_UFS_RETRY_TIMER_REG = 0xD0, | ||
56 | REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8, | ||
57 | REG_UFS_CFG1 = 0xDC, | ||
58 | REG_UFS_CFG2 = 0xE0, | ||
59 | REG_UFS_HW_VERSION = 0xE4, | ||
60 | |||
61 | UFS_DBG_RD_REG_UAWM = 0x100, | ||
62 | UFS_DBG_RD_REG_UARM = 0x200, | ||
63 | UFS_DBG_RD_REG_TXUC = 0x300, | ||
64 | UFS_DBG_RD_REG_RXUC = 0x400, | ||
65 | UFS_DBG_RD_REG_DFC = 0x500, | ||
66 | UFS_DBG_RD_REG_TRLUT = 0x600, | ||
67 | UFS_DBG_RD_REG_TMRLUT = 0x700, | ||
68 | UFS_UFS_DBG_RD_REG_OCSC = 0x800, | ||
69 | |||
70 | UFS_UFS_DBG_RD_DESC_RAM = 0x1500, | ||
71 | UFS_UFS_DBG_RD_PRDT_RAM = 0x1700, | ||
72 | UFS_UFS_DBG_RD_RESP_RAM = 0x1800, | ||
73 | UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, | ||
74 | }; | ||
75 | |||
76 | /* bit definitions for REG_UFS_CFG2 register */ | ||
77 | #define UAWM_HW_CGC_EN (1 << 0) | ||
78 | #define UARM_HW_CGC_EN (1 << 1) | ||
79 | #define TXUC_HW_CGC_EN (1 << 2) | ||
80 | #define RXUC_HW_CGC_EN (1 << 3) | ||
81 | #define DFC_HW_CGC_EN (1 << 4) | ||
82 | #define TRLUT_HW_CGC_EN (1 << 5) | ||
83 | #define TMRLUT_HW_CGC_EN (1 << 6) | ||
84 | #define OCSC_HW_CGC_EN (1 << 7) | ||
85 | |||
86 | #define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\ | ||
87 | TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\ | ||
88 | DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\ | ||
89 | TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN) | ||
90 | |||
91 | /* bit offset */ | ||
92 | enum { | ||
93 | OFFSET_UFS_PHY_SOFT_RESET = 1, | ||
94 | OFFSET_CLK_NS_REG = 10, | ||
95 | }; | ||
96 | |||
97 | /* bit masks */ | ||
98 | enum { | ||
99 | MASK_UFS_PHY_SOFT_RESET = 0x2, | ||
100 | MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF, | ||
101 | MASK_CLK_NS_REG = 0xFFFC00, | ||
102 | }; | ||
103 | |||
104 | enum ufs_qcom_phy_init_type { | ||
105 | UFS_PHY_INIT_FULL, | ||
106 | UFS_PHY_INIT_CFG_RESTORE, | ||
107 | }; | ||
108 | |||
109 | static inline void | ||
110 | ufs_qcom_get_controller_revision(struct ufs_hba *hba, | ||
111 | u8 *major, u16 *minor, u16 *step) | ||
112 | { | ||
113 | u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION); | ||
114 | |||
115 | *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT; | ||
116 | *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT; | ||
117 | *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT; | ||
118 | }; | ||
119 | |||
120 | static inline void ufs_qcom_assert_reset(struct ufs_hba *hba) | ||
121 | { | ||
122 | ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, | ||
123 | 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); | ||
124 | |||
125 | /* | ||
126 | * Make sure assertion of ufs phy reset is written to | ||
127 | * register before returning | ||
128 | */ | ||
129 | mb(); | ||
130 | } | ||
131 | |||
132 | static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba) | ||
133 | { | ||
134 | ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET, | ||
135 | 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1); | ||
136 | |||
137 | /* | ||
138 | * Make sure de-assertion of ufs phy reset is written to | ||
139 | * register before returning | ||
140 | */ | ||
141 | mb(); | ||
142 | } | ||
143 | |||
144 | struct ufs_qcom_bus_vote { | ||
145 | uint32_t client_handle; | ||
146 | uint32_t curr_vote; | ||
147 | int min_bw_vote; | ||
148 | int max_bw_vote; | ||
149 | int saved_vote; | ||
150 | bool is_max_bw_needed; | ||
151 | struct device_attribute max_bus_bw; | ||
152 | }; | ||
153 | |||
154 | struct ufs_qcom_host { | ||
155 | struct phy *generic_phy; | ||
156 | struct ufs_hba *hba; | ||
157 | struct ufs_qcom_bus_vote bus_vote; | ||
158 | struct ufs_pa_layer_attr dev_req_params; | ||
159 | struct clk *rx_l0_sync_clk; | ||
160 | struct clk *tx_l0_sync_clk; | ||
161 | struct clk *rx_l1_sync_clk; | ||
162 | struct clk *tx_l1_sync_clk; | ||
163 | bool is_lane_clks_enabled; | ||
164 | }; | ||
165 | |||
166 | #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) | ||
167 | #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) | ||
168 | #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) | ||
169 | |||
170 | #endif /* UFS_QCOM_H_ */ | ||
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 2e4614b9dddf..5d60a868830d 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -4714,10 +4714,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, | |||
4714 | sdev_printk(KERN_WARNING, sdp, | 4714 | sdev_printk(KERN_WARNING, sdp, |
4715 | "START_STOP failed for power mode: %d, result %x\n", | 4715 | "START_STOP failed for power mode: %d, result %x\n", |
4716 | pwr_mode, ret); | 4716 | pwr_mode, ret); |
4717 | if (driver_byte(ret) & DRIVER_SENSE) { | 4717 | if (driver_byte(ret) & DRIVER_SENSE) |
4718 | scsi_show_sense_hdr(sdp, NULL, &sshdr); | 4718 | scsi_print_sense_hdr(sdp, NULL, &sshdr); |
4719 | scsi_show_extd_sense(sdp, NULL, sshdr.asc, sshdr.ascq); | ||
4720 | } | ||
4721 | } | 4719 | } |
4722 | 4720 | ||
4723 | if (!ret) | 4721 | if (!ret) |
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index c0506de4f3b6..9e09da412b92 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c | |||
@@ -2143,22 +2143,22 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2143 | seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d" | 2143 | seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d" |
2144 | " dma_mode=%02x fast=%d", | 2144 | " dma_mode=%02x fast=%d", |
2145 | hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); | 2145 | hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); |
2146 | seq_printf(m, "\nsync_xfer[] = "); | 2146 | seq_puts(m, "\nsync_xfer[] = "); |
2147 | for (x = 0; x < 7; x++) | 2147 | for (x = 0; x < 7; x++) |
2148 | seq_printf(m, "\t%02x", hd->sync_xfer[x]); | 2148 | seq_printf(m, "\t%02x", hd->sync_xfer[x]); |
2149 | seq_printf(m, "\nsync_stat[] = "); | 2149 | seq_puts(m, "\nsync_stat[] = "); |
2150 | for (x = 0; x < 7; x++) | 2150 | for (x = 0; x < 7; x++) |
2151 | seq_printf(m, "\t%02x", hd->sync_stat[x]); | 2151 | seq_printf(m, "\t%02x", hd->sync_stat[x]); |
2152 | } | 2152 | } |
2153 | #ifdef PROC_STATISTICS | 2153 | #ifdef PROC_STATISTICS |
2154 | if (hd->proc & PR_STATISTICS) { | 2154 | if (hd->proc & PR_STATISTICS) { |
2155 | seq_printf(m, "\ncommands issued: "); | 2155 | seq_puts(m, "\ncommands issued: "); |
2156 | for (x = 0; x < 7; x++) | 2156 | for (x = 0; x < 7; x++) |
2157 | seq_printf(m, "\t%ld", hd->cmd_cnt[x]); | 2157 | seq_printf(m, "\t%ld", hd->cmd_cnt[x]); |
2158 | seq_printf(m, "\ndisconnects allowed:"); | 2158 | seq_puts(m, "\ndisconnects allowed:"); |
2159 | for (x = 0; x < 7; x++) | 2159 | for (x = 0; x < 7; x++) |
2160 | seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); | 2160 | seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); |
2161 | seq_printf(m, "\ndisconnects done: "); | 2161 | seq_puts(m, "\ndisconnects done: "); |
2162 | for (x = 0; x < 7; x++) | 2162 | for (x = 0; x < 7; x++) |
2163 | seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); | 2163 | seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); |
2164 | seq_printf(m, | 2164 | seq_printf(m, |
@@ -2167,7 +2167,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2167 | } | 2167 | } |
2168 | #endif | 2168 | #endif |
2169 | if (hd->proc & PR_CONNECTED) { | 2169 | if (hd->proc & PR_CONNECTED) { |
2170 | seq_printf(m, "\nconnected: "); | 2170 | seq_puts(m, "\nconnected: "); |
2171 | if (hd->connected) { | 2171 | if (hd->connected) { |
2172 | cmd = (struct scsi_cmnd *) hd->connected; | 2172 | cmd = (struct scsi_cmnd *) hd->connected; |
2173 | seq_printf(m, " %d:%llu(%02x)", | 2173 | seq_printf(m, " %d:%llu(%02x)", |
@@ -2175,7 +2175,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2175 | } | 2175 | } |
2176 | } | 2176 | } |
2177 | if (hd->proc & PR_INPUTQ) { | 2177 | if (hd->proc & PR_INPUTQ) { |
2178 | seq_printf(m, "\ninput_Q: "); | 2178 | seq_puts(m, "\ninput_Q: "); |
2179 | cmd = (struct scsi_cmnd *) hd->input_Q; | 2179 | cmd = (struct scsi_cmnd *) hd->input_Q; |
2180 | while (cmd) { | 2180 | while (cmd) { |
2181 | seq_printf(m, " %d:%llu(%02x)", | 2181 | seq_printf(m, " %d:%llu(%02x)", |
@@ -2184,7 +2184,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2184 | } | 2184 | } |
2185 | } | 2185 | } |
2186 | if (hd->proc & PR_DISCQ) { | 2186 | if (hd->proc & PR_DISCQ) { |
2187 | seq_printf(m, "\ndisconnected_Q:"); | 2187 | seq_puts(m, "\ndisconnected_Q:"); |
2188 | cmd = (struct scsi_cmnd *) hd->disconnected_Q; | 2188 | cmd = (struct scsi_cmnd *) hd->disconnected_Q; |
2189 | while (cmd) { | 2189 | while (cmd) { |
2190 | seq_printf(m, " %d:%llu(%02x)", | 2190 | seq_printf(m, " %d:%llu(%02x)", |
@@ -2192,7 +2192,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) | |||
2192 | cmd = (struct scsi_cmnd *) cmd->host_scribble; | 2192 | cmd = (struct scsi_cmnd *) cmd->host_scribble; |
2193 | } | 2193 | } |
2194 | } | 2194 | } |
2195 | seq_printf(m, "\n"); | 2195 | seq_putc(m, '\n'); |
2196 | spin_unlock_irq(&hd->lock); | 2196 | spin_unlock_irq(&hd->lock); |
2197 | #endif /* PROC_INTERFACE */ | 2197 | #endif /* PROC_INTERFACE */ |
2198 | return 0; | 2198 | return 0; |
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index f94d73611ab4..0c0f17b9a3eb 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c | |||
@@ -1295,9 +1295,6 @@ static void wd7000_revision(Adapter * host) | |||
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | 1297 | ||
1298 | #undef SPRINTF | ||
1299 | #define SPRINTF(args...) { seq_printf(m, ## args); } | ||
1300 | |||
1301 | static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length) | 1298 | static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length) |
1302 | { | 1299 | { |
1303 | dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length); | 1300 | dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length); |
@@ -1320,43 +1317,43 @@ static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host) | |||
1320 | #endif | 1317 | #endif |
1321 | 1318 | ||
1322 | spin_lock_irqsave(host->host_lock, flags); | 1319 | spin_lock_irqsave(host->host_lock, flags); |
1323 | SPRINTF("Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2); | 1320 | seq_printf(m, "Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2); |
1324 | SPRINTF(" IO base: 0x%x\n", adapter->iobase); | 1321 | seq_printf(m, " IO base: 0x%x\n", adapter->iobase); |
1325 | SPRINTF(" IRQ: %d\n", adapter->irq); | 1322 | seq_printf(m, " IRQ: %d\n", adapter->irq); |
1326 | SPRINTF(" DMA channel: %d\n", adapter->dma); | 1323 | seq_printf(m, " DMA channel: %d\n", adapter->dma); |
1327 | SPRINTF(" Interrupts: %d\n", adapter->int_counter); | 1324 | seq_printf(m, " Interrupts: %d\n", adapter->int_counter); |
1328 | SPRINTF(" BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125); | 1325 | seq_printf(m, " BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125); |
1329 | SPRINTF(" BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125); | 1326 | seq_printf(m, " BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125); |
1330 | 1327 | ||
1331 | #ifdef WD7000_DEBUG | 1328 | #ifdef WD7000_DEBUG |
1332 | ogmbs = adapter->mb.ogmb; | 1329 | ogmbs = adapter->mb.ogmb; |
1333 | icmbs = adapter->mb.icmb; | 1330 | icmbs = adapter->mb.icmb; |
1334 | 1331 | ||
1335 | SPRINTF("\nControl port value: 0x%x\n", adapter->control); | 1332 | seq_printf(m, "\nControl port value: 0x%x\n", adapter->control); |
1336 | SPRINTF("Incoming mailbox:\n"); | 1333 | seq_puts(m, "Incoming mailbox:\n"); |
1337 | SPRINTF(" size: %d\n", ICMB_CNT); | 1334 | seq_printf(m, " size: %d\n", ICMB_CNT); |
1338 | SPRINTF(" queued messages: "); | 1335 | seq_puts(m, " queued messages: "); |
1339 | 1336 | ||
1340 | for (i = count = 0; i < ICMB_CNT; i++) | 1337 | for (i = count = 0; i < ICMB_CNT; i++) |
1341 | if (icmbs[i].status) { | 1338 | if (icmbs[i].status) { |
1342 | count++; | 1339 | count++; |
1343 | SPRINTF("0x%x ", i); | 1340 | seq_printf(m, "0x%x ", i); |
1344 | } | 1341 | } |
1345 | 1342 | ||
1346 | SPRINTF(count ? "\n" : "none\n"); | 1343 | seq_puts(m, count ? "\n" : "none\n"); |
1347 | 1344 | ||
1348 | SPRINTF("Outgoing mailbox:\n"); | 1345 | seq_puts(m, "Outgoing mailbox:\n"); |
1349 | SPRINTF(" size: %d\n", OGMB_CNT); | 1346 | seq_printf(m, " size: %d\n", OGMB_CNT); |
1350 | SPRINTF(" next message: 0x%x\n", adapter->next_ogmb); | 1347 | seq_printf(m, " next message: 0x%x\n", adapter->next_ogmb); |
1351 | SPRINTF(" queued messages: "); | 1348 | seq_puts(m, " queued messages: "); |
1352 | 1349 | ||
1353 | for (i = count = 0; i < OGMB_CNT; i++) | 1350 | for (i = count = 0; i < OGMB_CNT; i++) |
1354 | if (ogmbs[i].status) { | 1351 | if (ogmbs[i].status) { |
1355 | count++; | 1352 | count++; |
1356 | SPRINTF("0x%x ", i); | 1353 | seq_printf(m, "0x%x ", i); |
1357 | } | 1354 | } |
1358 | 1355 | ||
1359 | SPRINTF(count ? "\n" : "none\n"); | 1356 | seq_puts(m, count ? "\n" : "none\n"); |
1360 | #endif | 1357 | #endif |
1361 | 1358 | ||
1362 | spin_unlock_irqrestore(host->host_lock, flags); | 1359 | spin_unlock_irqrestore(host->host_lock, flags); |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index ecd540a7a562..61653a03a8f5 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | #include <generated/utsrelease.h> | 48 | #include <generated/utsrelease.h> |
49 | 49 | ||
50 | #include <scsi/scsi.h> | ||
50 | #include <scsi/scsi_dbg.h> | 51 | #include <scsi/scsi_dbg.h> |
51 | #include <scsi/scsi_eh.h> | 52 | #include <scsi/scsi_eh.h> |
52 | #include <scsi/scsi_tcq.h> | 53 | #include <scsi/scsi_tcq.h> |