diff options
Diffstat (limited to 'drivers')
182 files changed, 2602 insertions, 2306 deletions
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 5277a0ee5704..b1def411c0b8 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
| @@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev) | |||
| 512 | dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); | 512 | dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); |
| 513 | if (gsi >= 0) { | 513 | if (gsi >= 0) { |
| 514 | acpi_unregister_gsi(gsi); | 514 | acpi_unregister_gsi(gsi); |
| 515 | dev->irq = 0; | ||
| 516 | dev->irq_managed = 0; | 515 | dev->irq_managed = 0; |
| 517 | } | 516 | } |
| 518 | } | 517 | } |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index a3a13605a9c4..5f601553b9b0 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
| @@ -835,6 +835,7 @@ config PATA_AT32 | |||
| 835 | config PATA_AT91 | 835 | config PATA_AT91 |
| 836 | tristate "PATA support for AT91SAM9260" | 836 | tristate "PATA support for AT91SAM9260" |
| 837 | depends on ARM && SOC_AT91SAM9 | 837 | depends on ARM && SOC_AT91SAM9 |
| 838 | depends on !ARCH_MULTIPLATFORM | ||
| 838 | help | 839 | help |
| 839 | This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. | 840 | This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. |
| 840 | 841 | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 49f1e6890587..33bb06e006c9 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -325,7 +325,6 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
| 325 | { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ | 325 | { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ |
| 326 | { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ | 326 | { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ |
| 327 | { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ | 327 | { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ |
| 328 | { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ | ||
| 329 | { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ | 328 | { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ |
| 330 | { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ | 329 | { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ |
| 331 | { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ | 330 | { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ |
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index feeb8f1e2fe8..cbcd20810355 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c | |||
| @@ -125,10 +125,11 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) | |||
| 125 | * xgene_ahci_qc_issue - Issue commands to the device | 125 | * xgene_ahci_qc_issue - Issue commands to the device |
| 126 | * @qc: Command to issue | 126 | * @qc: Command to issue |
| 127 | * | 127 | * |
| 128 | * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot | 128 | * Due to Hardware errata for IDENTIFY DEVICE command and PACKET |
| 129 | * clear the BSY bit after receiving the PIO setup FIS. This results in the dma | 129 | * command of ATAPI protocol set, the controller cannot clear the BSY bit |
| 130 | * state machine goes into the CMFatalErrorUpdate state and locks up. By | 130 | * after receiving the PIO setup FIS. This results in the DMA state machine |
| 131 | * restarting the dma engine, it removes the controller out of lock up state. | 131 | * going into the CMFatalErrorUpdate state and locks up. By restarting the |
| 132 | * DMA engine, it removes the controller out of lock up state. | ||
| 132 | */ | 133 | */ |
| 133 | static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) | 134 | static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) |
| 134 | { | 135 | { |
| @@ -137,7 +138,8 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) | |||
| 137 | struct xgene_ahci_context *ctx = hpriv->plat_data; | 138 | struct xgene_ahci_context *ctx = hpriv->plat_data; |
| 138 | int rc = 0; | 139 | int rc = 0; |
| 139 | 140 | ||
| 140 | if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA)) | 141 | if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) || |
| 142 | (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET))) | ||
| 141 | xgene_ahci_restart_engine(ap); | 143 | xgene_ahci_restart_engine(ap); |
| 142 | 144 | ||
| 143 | rc = ahci_qc_issue(qc); | 145 | rc = ahci_qc_issue(qc); |
| @@ -188,7 +190,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev, | |||
| 188 | * | 190 | * |
| 189 | * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP | 191 | * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP |
| 190 | */ | 192 | */ |
| 191 | id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); | 193 | id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8)); |
| 192 | 194 | ||
| 193 | return 0; | 195 | return 0; |
| 194 | } | 196 | } |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 97683e45ab04..61a9c07e0dff 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
| @@ -2003,7 +2003,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) | |||
| 2003 | 2003 | ||
| 2004 | devslp = readl(port_mmio + PORT_DEVSLP); | 2004 | devslp = readl(port_mmio + PORT_DEVSLP); |
| 2005 | if (!(devslp & PORT_DEVSLP_DSP)) { | 2005 | if (!(devslp & PORT_DEVSLP_DSP)) { |
| 2006 | dev_err(ap->host->dev, "port does not support device sleep\n"); | 2006 | dev_info(ap->host->dev, "port does not support device sleep\n"); |
| 2007 | return; | 2007 | return; |
| 2008 | } | 2008 | } |
| 2009 | 2009 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5c84fb5c3372..d1a05f9bb91f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4233,10 +4233,33 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4233 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | 4233 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
| 4234 | 4234 | ||
| 4235 | /* devices that don't properly handle queued TRIM commands */ | 4235 | /* devices that don't properly handle queued TRIM commands */ |
| 4236 | { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4236 | { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
| 4237 | { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4237 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
| 4238 | { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4238 | { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, |
| 4239 | { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, | 4239 | |
| 4240 | /* | ||
| 4241 | * As defined, the DRAT (Deterministic Read After Trim) and RZAT | ||
| 4242 | * (Return Zero After Trim) flags in the ATA Command Set are | ||
| 4243 | * unreliable in the sense that they only define what happens if | ||
| 4244 | * the device successfully executed the DSM TRIM command. TRIM | ||
| 4245 | * is only advisory, however, and the device is free to silently | ||
| 4246 | * ignore all or parts of the request. | ||
| 4247 | * | ||
| 4248 | * Whitelist drives that are known to reliably return zeroes | ||
| 4249 | * after TRIM. | ||
| 4250 | */ | ||
| 4251 | |||
| 4252 | /* | ||
| 4253 | * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude | ||
| 4254 | * that model before whitelisting all other intel SSDs. | ||
| 4255 | */ | ||
| 4256 | { "INTEL*SSDSC2MH*", NULL, 0, }, | ||
| 4257 | |||
| 4258 | { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4259 | { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4260 | { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4261 | { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4262 | { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
| 4240 | 4263 | ||
| 4241 | /* | 4264 | /* |
| 4242 | * Some WD SATA-I drives spin up and down erratically when the link | 4265 | * Some WD SATA-I drives spin up and down erratically when the link |
| @@ -4748,7 +4771,10 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) | |||
| 4748 | return NULL; | 4771 | return NULL; |
| 4749 | 4772 | ||
| 4750 | for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { | 4773 | for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { |
| 4751 | tag = tag < max_queue ? tag : 0; | 4774 | if (ap->flags & ATA_FLAG_LOWTAG) |
| 4775 | tag = i; | ||
| 4776 | else | ||
| 4777 | tag = tag < max_queue ? tag : 0; | ||
| 4752 | 4778 | ||
| 4753 | /* the last tag is reserved for internal command. */ | 4779 | /* the last tag is reserved for internal command. */ |
| 4754 | if (tag == ATA_TAG_INTERNAL) | 4780 | if (tag == ATA_TAG_INTERNAL) |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 3dbec8954c86..8d00c2638bed 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
| @@ -2389,6 +2389,7 @@ const char *ata_get_cmd_descript(u8 command) | |||
| 2389 | 2389 | ||
| 2390 | return NULL; | 2390 | return NULL; |
| 2391 | } | 2391 | } |
| 2392 | EXPORT_SYMBOL_GPL(ata_get_cmd_descript); | ||
| 2392 | 2393 | ||
| 2393 | /** | 2394 | /** |
| 2394 | * ata_eh_link_report - report error handling to user | 2395 | * ata_eh_link_report - report error handling to user |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e364e86e84d7..6abd17a85b13 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -2532,13 +2532,15 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) | |||
| 2532 | rbuf[15] = lowest_aligned; | 2532 | rbuf[15] = lowest_aligned; |
| 2533 | 2533 | ||
| 2534 | if (ata_id_has_trim(args->id)) { | 2534 | if (ata_id_has_trim(args->id)) { |
| 2535 | rbuf[14] |= 0x80; /* TPE */ | 2535 | rbuf[14] |= 0x80; /* LBPME */ |
| 2536 | 2536 | ||
| 2537 | if (ata_id_has_zero_after_trim(args->id)) | 2537 | if (ata_id_has_zero_after_trim(args->id) && |
| 2538 | rbuf[14] |= 0x40; /* TPRZ */ | 2538 | dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { |
| 2539 | ata_dev_info(dev, "Enabling discard_zeroes_data\n"); | ||
| 2540 | rbuf[14] |= 0x40; /* LBPRZ */ | ||
| 2541 | } | ||
| 2539 | } | 2542 | } |
| 2540 | } | 2543 | } |
| 2541 | |||
| 2542 | return 0; | 2544 | return 0; |
| 2543 | } | 2545 | } |
| 2544 | 2546 | ||
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index db90aa35cb71..2e86e3b85266 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
| @@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap) | |||
| 1333 | DPRINTK("ENTER\n"); | 1333 | DPRINTK("ENTER\n"); |
| 1334 | 1334 | ||
| 1335 | cancel_delayed_work_sync(&ap->sff_pio_task); | 1335 | cancel_delayed_work_sync(&ap->sff_pio_task); |
| 1336 | |||
| 1337 | /* | ||
| 1338 | * We wanna reset the HSM state to IDLE. If we do so without | ||
| 1339 | * grabbing the port lock, critical sections protected by it which | ||
| 1340 | * expect the HSM state to stay stable may get surprised. For | ||
| 1341 | * example, we may set IDLE in between the time | ||
| 1342 | * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls | ||
| 1343 | * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG(). | ||
| 1344 | */ | ||
| 1345 | spin_lock_irq(ap->lock); | ||
| 1336 | ap->hsm_task_state = HSM_ST_IDLE; | 1346 | ap->hsm_task_state = HSM_ST_IDLE; |
| 1347 | spin_unlock_irq(ap->lock); | ||
| 1348 | |||
| 1337 | ap->sff_pio_task_link = NULL; | 1349 | ap->sff_pio_task_link = NULL; |
| 1338 | 1350 | ||
| 1339 | if (ata_msg_ctl(ap)) | 1351 | if (ata_msg_ctl(ap)) |
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index c7ddef89e7b0..8e8248179d20 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c | |||
| @@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) | |||
| 797 | if (err) { | 797 | if (err) { |
| 798 | dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" | 798 | dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" |
| 799 | " %d\n", __func__, err); | 799 | " %d\n", __func__, err); |
| 800 | goto error_out; | 800 | return err; |
| 801 | } | 801 | } |
| 802 | 802 | ||
| 803 | /* Enabe DMA */ | 803 | /* Enabe DMA */ |
| @@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) | |||
| 808 | sata_dma_regs); | 808 | sata_dma_regs); |
| 809 | 809 | ||
| 810 | return 0; | 810 | return 0; |
| 811 | |||
| 812 | error_out: | ||
| 813 | dma_dwc_exit(hsdev); | ||
| 814 | |||
| 815 | return err; | ||
| 816 | } | 811 | } |
| 817 | 812 | ||
| 818 | static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) | 813 | static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) |
| @@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
| 1662 | char *ver = (char *)&versionr; | 1657 | char *ver = (char *)&versionr; |
| 1663 | u8 *base = NULL; | 1658 | u8 *base = NULL; |
| 1664 | int err = 0; | 1659 | int err = 0; |
| 1665 | int irq, rc; | 1660 | int irq; |
| 1666 | struct ata_host *host; | 1661 | struct ata_host *host; |
| 1667 | struct ata_port_info pi = sata_dwc_port_info[0]; | 1662 | struct ata_port_info pi = sata_dwc_port_info[0]; |
| 1668 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 1663 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
| @@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
| 1725 | if (irq == NO_IRQ) { | 1720 | if (irq == NO_IRQ) { |
| 1726 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); | 1721 | dev_err(&ofdev->dev, "no SATA DMA irq\n"); |
| 1727 | err = -ENODEV; | 1722 | err = -ENODEV; |
| 1728 | goto error_out; | 1723 | goto error_iomap; |
| 1729 | } | 1724 | } |
| 1730 | 1725 | ||
| 1731 | /* Get physical SATA DMA register base address */ | 1726 | /* Get physical SATA DMA register base address */ |
| @@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
| 1734 | dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" | 1729 | dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" |
| 1735 | " address\n"); | 1730 | " address\n"); |
| 1736 | err = -ENODEV; | 1731 | err = -ENODEV; |
| 1737 | goto error_out; | 1732 | goto error_iomap; |
| 1738 | } | 1733 | } |
| 1739 | 1734 | ||
| 1740 | /* Save dev for later use in dev_xxx() routines */ | 1735 | /* Save dev for later use in dev_xxx() routines */ |
| 1741 | host_pvt.dwc_dev = &ofdev->dev; | 1736 | host_pvt.dwc_dev = &ofdev->dev; |
| 1742 | 1737 | ||
| 1743 | /* Initialize AHB DMAC */ | 1738 | /* Initialize AHB DMAC */ |
| 1744 | dma_dwc_init(hsdev, irq); | 1739 | err = dma_dwc_init(hsdev, irq); |
| 1740 | if (err) | ||
| 1741 | goto error_dma_iomap; | ||
| 1745 | 1742 | ||
| 1746 | /* Enable SATA Interrupts */ | 1743 | /* Enable SATA Interrupts */ |
| 1747 | sata_dwc_enable_interrupts(hsdev); | 1744 | sata_dwc_enable_interrupts(hsdev); |
| @@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
| 1759 | * device discovery process, invoking our port_start() handler & | 1756 | * device discovery process, invoking our port_start() handler & |
| 1760 | * error_handler() to execute a dummy Softreset EH session | 1757 | * error_handler() to execute a dummy Softreset EH session |
| 1761 | */ | 1758 | */ |
| 1762 | rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); | 1759 | err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); |
| 1763 | 1760 | if (err) | |
| 1764 | if (rc != 0) | ||
| 1765 | dev_err(&ofdev->dev, "failed to activate host"); | 1761 | dev_err(&ofdev->dev, "failed to activate host"); |
| 1766 | 1762 | ||
| 1767 | dev_set_drvdata(&ofdev->dev, host); | 1763 | dev_set_drvdata(&ofdev->dev, host); |
| @@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) | |||
| 1770 | error_out: | 1766 | error_out: |
| 1771 | /* Free SATA DMA resources */ | 1767 | /* Free SATA DMA resources */ |
| 1772 | dma_dwc_exit(hsdev); | 1768 | dma_dwc_exit(hsdev); |
| 1773 | 1769 | error_dma_iomap: | |
| 1770 | iounmap((void __iomem *)host_pvt.sata_dma_regs); | ||
| 1774 | error_iomap: | 1771 | error_iomap: |
| 1775 | iounmap(base); | 1772 | iounmap(base); |
| 1776 | error_kmalloc: | 1773 | error_kmalloc: |
| @@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev) | |||
| 1791 | /* Free SATA DMA resources */ | 1788 | /* Free SATA DMA resources */ |
| 1792 | dma_dwc_exit(hsdev); | 1789 | dma_dwc_exit(hsdev); |
| 1793 | 1790 | ||
| 1791 | iounmap((void __iomem *)host_pvt.sata_dma_regs); | ||
| 1794 | iounmap(hsdev->reg_base); | 1792 | iounmap(hsdev->reg_base); |
| 1795 | kfree(hsdev); | 1793 | kfree(hsdev); |
| 1796 | kfree(host); | 1794 | kfree(host); |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index d81b20ddb527..ea655949023f 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
| @@ -246,7 +246,7 @@ enum { | |||
| 246 | /* host flags */ | 246 | /* host flags */ |
| 247 | SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | | 247 | SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | |
| 248 | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | | 248 | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | |
| 249 | ATA_FLAG_AN | ATA_FLAG_PMP, | 249 | ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG, |
| 250 | SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ | 250 | SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ |
| 251 | 251 | ||
| 252 | IRQ_STAT_4PORTS = 0xf, | 252 | IRQ_STAT_4PORTS = 0xf, |
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index cb529e9a82dd..d826bf3e62c8 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
| @@ -106,7 +106,7 @@ struct nvme_queue { | |||
| 106 | dma_addr_t cq_dma_addr; | 106 | dma_addr_t cq_dma_addr; |
| 107 | u32 __iomem *q_db; | 107 | u32 __iomem *q_db; |
| 108 | u16 q_depth; | 108 | u16 q_depth; |
| 109 | u16 cq_vector; | 109 | s16 cq_vector; |
| 110 | u16 sq_head; | 110 | u16 sq_head; |
| 111 | u16 sq_tail; | 111 | u16 sq_tail; |
| 112 | u16 cq_head; | 112 | u16 cq_head; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3ec85dfce124..8a86b62466f7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev) | |||
| 2098 | * If an image has a non-zero parent overlap, get a reference to its | 2098 | * If an image has a non-zero parent overlap, get a reference to its |
| 2099 | * parent. | 2099 | * parent. |
| 2100 | * | 2100 | * |
| 2101 | * We must get the reference before checking for the overlap to | ||
| 2102 | * coordinate properly with zeroing the parent overlap in | ||
| 2103 | * rbd_dev_v2_parent_info() when an image gets flattened. We | ||
| 2104 | * drop it again if there is no overlap. | ||
| 2105 | * | ||
| 2106 | * Returns true if the rbd device has a parent with a non-zero | 2101 | * Returns true if the rbd device has a parent with a non-zero |
| 2107 | * overlap and a reference for it was successfully taken, or | 2102 | * overlap and a reference for it was successfully taken, or |
| 2108 | * false otherwise. | 2103 | * false otherwise. |
| 2109 | */ | 2104 | */ |
| 2110 | static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) | 2105 | static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) |
| 2111 | { | 2106 | { |
| 2112 | int counter; | 2107 | int counter = 0; |
| 2113 | 2108 | ||
| 2114 | if (!rbd_dev->parent_spec) | 2109 | if (!rbd_dev->parent_spec) |
| 2115 | return false; | 2110 | return false; |
| 2116 | 2111 | ||
| 2117 | counter = atomic_inc_return_safe(&rbd_dev->parent_ref); | 2112 | down_read(&rbd_dev->header_rwsem); |
| 2118 | if (counter > 0 && rbd_dev->parent_overlap) | 2113 | if (rbd_dev->parent_overlap) |
| 2119 | return true; | 2114 | counter = atomic_inc_return_safe(&rbd_dev->parent_ref); |
| 2120 | 2115 | up_read(&rbd_dev->header_rwsem); | |
| 2121 | /* Image was flattened, but parent is not yet torn down */ | ||
| 2122 | 2116 | ||
| 2123 | if (counter < 0) | 2117 | if (counter < 0) |
| 2124 | rbd_warn(rbd_dev, "parent reference overflow"); | 2118 | rbd_warn(rbd_dev, "parent reference overflow"); |
| 2125 | 2119 | ||
| 2126 | return false; | 2120 | return counter > 0; |
| 2127 | } | 2121 | } |
| 2128 | 2122 | ||
| 2129 | /* | 2123 | /* |
| @@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 4239 | */ | 4233 | */ |
| 4240 | if (rbd_dev->parent_overlap) { | 4234 | if (rbd_dev->parent_overlap) { |
| 4241 | rbd_dev->parent_overlap = 0; | 4235 | rbd_dev->parent_overlap = 0; |
| 4242 | smp_mb(); | ||
| 4243 | rbd_dev_parent_put(rbd_dev); | 4236 | rbd_dev_parent_put(rbd_dev); |
| 4244 | pr_info("%s: clone image has been flattened\n", | 4237 | pr_info("%s: clone image has been flattened\n", |
| 4245 | rbd_dev->disk->disk_name); | 4238 | rbd_dev->disk->disk_name); |
| @@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) | |||
| 4285 | * treat it specially. | 4278 | * treat it specially. |
| 4286 | */ | 4279 | */ |
| 4287 | rbd_dev->parent_overlap = overlap; | 4280 | rbd_dev->parent_overlap = overlap; |
| 4288 | smp_mb(); | ||
| 4289 | if (!overlap) { | 4281 | if (!overlap) { |
| 4290 | 4282 | ||
| 4291 | /* A null parent_spec indicates it's the initial probe */ | 4283 | /* A null parent_spec indicates it's the initial probe */ |
| @@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) | |||
| 5114 | { | 5106 | { |
| 5115 | struct rbd_image_header *header; | 5107 | struct rbd_image_header *header; |
| 5116 | 5108 | ||
| 5117 | /* Drop parent reference unless it's already been done (or none) */ | 5109 | rbd_dev_parent_put(rbd_dev); |
| 5118 | |||
| 5119 | if (rbd_dev->parent_overlap) | ||
| 5120 | rbd_dev_parent_put(rbd_dev); | ||
| 5121 | 5110 | ||
| 5122 | /* Free dynamic fields from the header, then zero it out */ | 5111 | /* Free dynamic fields from the header, then zero it out */ |
| 5123 | 5112 | ||
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index eb7682dc123b..81bf297f1034 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
| @@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus, | |||
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | /* Checks whether the given window number is available */ | 212 | /* Checks whether the given window number is available */ |
| 213 | |||
| 214 | /* On Armada XP, 375 and 38x the MBus window 13 has the remap | ||
| 215 | * capability, like windows 0 to 7. However, the mvebu-mbus driver | ||
| 216 | * isn't currently taking into account this special case, which means | ||
| 217 | * that when window 13 is actually used, the remap registers are left | ||
| 218 | * to 0, making the device using this MBus window unavailable. The | ||
| 219 | * quick fix for stable is to not use window 13. A follow up patch | ||
| 220 | * will correctly handle this window. | ||
| 221 | */ | ||
| 213 | static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus, | 222 | static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus, |
| 214 | const int win) | 223 | const int win) |
| 215 | { | 224 | { |
| 216 | void __iomem *addr = mbus->mbuswins_base + | 225 | void __iomem *addr = mbus->mbuswins_base + |
| 217 | mbus->soc->win_cfg_offset(win); | 226 | mbus->soc->win_cfg_offset(win); |
| 218 | u32 ctrl = readl(addr + WIN_CTRL_OFF); | 227 | u32 ctrl = readl(addr + WIN_CTRL_OFF); |
| 228 | |||
| 229 | if (win == 13) | ||
| 230 | return false; | ||
| 231 | |||
| 219 | return !(ctrl & WIN_CTRL_ENABLE); | 232 | return !(ctrl & WIN_CTRL_ENABLE); |
| 220 | } | 233 | } |
| 221 | 234 | ||
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 0595dc6c453e..f1e33d08dd83 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c | |||
| @@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base) | |||
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | static void | 70 | static void |
| 71 | kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) | 71 | kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) |
| 72 | { | 72 | { |
| 73 | void __iomem *base = IOMEM(timer_base); | ||
| 74 | int loop_limit = 4; | 73 | int loop_limit = 4; |
| 75 | 74 | ||
| 76 | /* | 75 | /* |
| @@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw) | |||
| 86 | */ | 85 | */ |
| 87 | 86 | ||
| 88 | while (--loop_limit) { | 87 | while (--loop_limit) { |
| 89 | *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET); | 88 | *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); |
| 90 | *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET); | 89 | *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); |
| 91 | if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET)) | 90 | if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) |
| 92 | break; | 91 | break; |
| 93 | } | 92 | } |
| 94 | if (!loop_limit) { | 93 | if (!loop_limit) { |
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 9403061a2acc..83564c9cfdbe 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
| @@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset) | |||
| 97 | writel_relaxed(value, reg_base + offset); | 97 | writel_relaxed(value, reg_base + offset); |
| 98 | 98 | ||
| 99 | if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { | 99 | if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { |
| 100 | stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; | 100 | stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; |
| 101 | switch (offset & EXYNOS4_MCT_L_MASK) { | 101 | switch (offset & ~EXYNOS4_MCT_L_MASK) { |
| 102 | case MCT_L_TCON_OFFSET: | 102 | case MCT_L_TCON_OFFSET: |
| 103 | mask = 1 << 3; /* L_TCON write status */ | 103 | mask = 1 << 3; /* L_TCON write status */ |
| 104 | break; | 104 | break; |
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 0f665b8f2461..f150ca82bfaf 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c | |||
| @@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, | |||
| 428 | ced->features = CLOCK_EVT_FEAT_PERIODIC; | 428 | ced->features = CLOCK_EVT_FEAT_PERIODIC; |
| 429 | ced->features |= CLOCK_EVT_FEAT_ONESHOT; | 429 | ced->features |= CLOCK_EVT_FEAT_ONESHOT; |
| 430 | ced->rating = 200; | 430 | ced->rating = 200; |
| 431 | ced->cpumask = cpumask_of(0); | 431 | ced->cpumask = cpu_possible_mask; |
| 432 | ced->set_next_event = sh_tmu_clock_event_next; | 432 | ced->set_next_event = sh_tmu_clock_event_next; |
| 433 | ced->set_mode = sh_tmu_clock_event_mode; | 433 | ced->set_mode = sh_tmu_clock_event_mode; |
| 434 | ced->suspend = sh_tmu_clock_event_suspend; | 434 | ced->suspend = sh_tmu_clock_event_suspend; |
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index 55d4803d71b0..3d9e08f7e823 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c | |||
| @@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) | |||
| 272 | for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) { | 272 | for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) { |
| 273 | if (pending & BIT(gpio)) { | 273 | if (pending & BIT(gpio)) { |
| 274 | virq = irq_find_mapping(cg->chip.irqdomain, gpio); | 274 | virq = irq_find_mapping(cg->chip.irqdomain, gpio); |
| 275 | generic_handle_irq(virq); | 275 | handle_nested_irq(virq); |
| 276 | } | 276 | } |
| 277 | } | 277 | } |
| 278 | 278 | ||
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 604dbe60bdee..08261f2b3a82 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) | |||
| 45 | return false; | 45 | return false; |
| 46 | 46 | ||
| 47 | ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); | 47 | ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); |
| 48 | if (ret < 0) | 48 | if (ret < 0) { |
| 49 | return false; | 49 | /* We've found the gpio chip, but the translation failed. |
| 50 | * Return true to stop looking and return the translation | ||
| 51 | * error via out_gpio | ||
| 52 | */ | ||
| 53 | gg_data->out_gpio = ERR_PTR(ret); | ||
| 54 | return true; | ||
| 55 | } | ||
| 50 | 56 | ||
| 51 | gg_data->out_gpio = gpiochip_get_desc(gc, ret); | 57 | gg_data->out_gpio = gpiochip_get_desc(gc, ret); |
| 52 | return true; | 58 | return true; |
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 2ac1800b58bb..f62aa115d79a 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c | |||
| @@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev, | |||
| 128 | return status; | 128 | return status; |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static const DEVICE_ATTR(value, 0644, | 131 | static DEVICE_ATTR(value, 0644, |
| 132 | gpio_value_show, gpio_value_store); | 132 | gpio_value_show, gpio_value_store); |
| 133 | 133 | ||
| 134 | static irqreturn_t gpio_sysfs_irq(int irq, void *priv) | 134 | static irqreturn_t gpio_sysfs_irq(int irq, void *priv) |
| @@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev, | |||
| 353 | return status ? : size; | 353 | return status ? : size; |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | static const DEVICE_ATTR(active_low, 0644, | 356 | static DEVICE_ATTR(active_low, 0644, |
| 357 | gpio_active_low_show, gpio_active_low_store); | 357 | gpio_active_low_show, gpio_active_low_store); |
| 358 | 358 | ||
| 359 | static const struct attribute *gpio_attrs[] = { | 359 | static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr, |
| 360 | int n) | ||
| 361 | { | ||
| 362 | struct device *dev = container_of(kobj, struct device, kobj); | ||
| 363 | struct gpio_desc *desc = dev_get_drvdata(dev); | ||
| 364 | umode_t mode = attr->mode; | ||
| 365 | bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags); | ||
| 366 | |||
| 367 | if (attr == &dev_attr_direction.attr) { | ||
| 368 | if (!show_direction) | ||
| 369 | mode = 0; | ||
| 370 | } else if (attr == &dev_attr_edge.attr) { | ||
| 371 | if (gpiod_to_irq(desc) < 0) | ||
| 372 | mode = 0; | ||
| 373 | if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags)) | ||
| 374 | mode = 0; | ||
| 375 | } | ||
| 376 | |||
| 377 | return mode; | ||
| 378 | } | ||
| 379 | |||
| 380 | static struct attribute *gpio_attrs[] = { | ||
| 381 | &dev_attr_direction.attr, | ||
| 382 | &dev_attr_edge.attr, | ||
| 360 | &dev_attr_value.attr, | 383 | &dev_attr_value.attr, |
| 361 | &dev_attr_active_low.attr, | 384 | &dev_attr_active_low.attr, |
| 362 | NULL, | 385 | NULL, |
| 363 | }; | 386 | }; |
| 364 | 387 | ||
| 365 | static const struct attribute_group gpio_attr_group = { | 388 | static const struct attribute_group gpio_group = { |
| 366 | .attrs = (struct attribute **) gpio_attrs, | 389 | .attrs = gpio_attrs, |
| 390 | .is_visible = gpio_is_visible, | ||
| 391 | }; | ||
| 392 | |||
| 393 | static const struct attribute_group *gpio_groups[] = { | ||
| 394 | &gpio_group, | ||
| 395 | NULL | ||
| 367 | }; | 396 | }; |
| 368 | 397 | ||
| 369 | /* | 398 | /* |
| @@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev, | |||
| 400 | } | 429 | } |
| 401 | static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); | 430 | static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); |
| 402 | 431 | ||
| 403 | static const struct attribute *gpiochip_attrs[] = { | 432 | static struct attribute *gpiochip_attrs[] = { |
| 404 | &dev_attr_base.attr, | 433 | &dev_attr_base.attr, |
| 405 | &dev_attr_label.attr, | 434 | &dev_attr_label.attr, |
| 406 | &dev_attr_ngpio.attr, | 435 | &dev_attr_ngpio.attr, |
| 407 | NULL, | 436 | NULL, |
| 408 | }; | 437 | }; |
| 409 | 438 | ATTRIBUTE_GROUPS(gpiochip); | |
| 410 | static const struct attribute_group gpiochip_attr_group = { | ||
| 411 | .attrs = (struct attribute **) gpiochip_attrs, | ||
| 412 | }; | ||
| 413 | 439 | ||
| 414 | /* | 440 | /* |
| 415 | * /sys/class/gpio/export ... write-only | 441 | * /sys/class/gpio/export ... write-only |
| @@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) | |||
| 556 | goto fail_unlock; | 582 | goto fail_unlock; |
| 557 | } | 583 | } |
| 558 | 584 | ||
| 559 | if (!desc->chip->direction_input || !desc->chip->direction_output) | 585 | if (desc->chip->direction_input && desc->chip->direction_output && |
| 560 | direction_may_change = false; | 586 | direction_may_change) { |
| 587 | set_bit(FLAG_SYSFS_DIR, &desc->flags); | ||
| 588 | } | ||
| 589 | |||
| 561 | spin_unlock_irqrestore(&gpio_lock, flags); | 590 | spin_unlock_irqrestore(&gpio_lock, flags); |
| 562 | 591 | ||
| 563 | offset = gpio_chip_hwgpio(desc); | 592 | offset = gpio_chip_hwgpio(desc); |
| 564 | if (desc->chip->names && desc->chip->names[offset]) | 593 | if (desc->chip->names && desc->chip->names[offset]) |
| 565 | ioname = desc->chip->names[offset]; | 594 | ioname = desc->chip->names[offset]; |
| 566 | 595 | ||
| 567 | dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), | 596 | dev = device_create_with_groups(&gpio_class, desc->chip->dev, |
| 568 | desc, ioname ? ioname : "gpio%u", | 597 | MKDEV(0, 0), desc, gpio_groups, |
| 569 | desc_to_gpio(desc)); | 598 | ioname ? ioname : "gpio%u", |
| 599 | desc_to_gpio(desc)); | ||
| 570 | if (IS_ERR(dev)) { | 600 | if (IS_ERR(dev)) { |
| 571 | status = PTR_ERR(dev); | 601 | status = PTR_ERR(dev); |
| 572 | goto fail_unlock; | 602 | goto fail_unlock; |
| 573 | } | 603 | } |
| 574 | 604 | ||
| 575 | status = sysfs_create_group(&dev->kobj, &gpio_attr_group); | ||
| 576 | if (status) | ||
| 577 | goto fail_unregister_device; | ||
| 578 | |||
| 579 | if (direction_may_change) { | ||
| 580 | status = device_create_file(dev, &dev_attr_direction); | ||
| 581 | if (status) | ||
| 582 | goto fail_unregister_device; | ||
| 583 | } | ||
| 584 | |||
| 585 | if (gpiod_to_irq(desc) >= 0 && (direction_may_change || | ||
| 586 | !test_bit(FLAG_IS_OUT, &desc->flags))) { | ||
| 587 | status = device_create_file(dev, &dev_attr_edge); | ||
| 588 | if (status) | ||
| 589 | goto fail_unregister_device; | ||
| 590 | } | ||
| 591 | |||
| 592 | set_bit(FLAG_EXPORT, &desc->flags); | 605 | set_bit(FLAG_EXPORT, &desc->flags); |
| 593 | mutex_unlock(&sysfs_lock); | 606 | mutex_unlock(&sysfs_lock); |
| 594 | return 0; | 607 | return 0; |
| 595 | 608 | ||
| 596 | fail_unregister_device: | ||
| 597 | device_unregister(dev); | ||
| 598 | fail_unlock: | 609 | fail_unlock: |
| 599 | mutex_unlock(&sysfs_lock); | 610 | mutex_unlock(&sysfs_lock); |
| 600 | gpiod_dbg(desc, "%s: status %d\n", __func__, status); | 611 | gpiod_dbg(desc, "%s: status %d\n", __func__, status); |
| @@ -718,6 +729,7 @@ void gpiod_unexport(struct gpio_desc *desc) | |||
| 718 | dev = class_find_device(&gpio_class, NULL, desc, match_export); | 729 | dev = class_find_device(&gpio_class, NULL, desc, match_export); |
| 719 | if (dev) { | 730 | if (dev) { |
| 720 | gpio_setup_irq(desc, dev, 0); | 731 | gpio_setup_irq(desc, dev, 0); |
| 732 | clear_bit(FLAG_SYSFS_DIR, &desc->flags); | ||
| 721 | clear_bit(FLAG_EXPORT, &desc->flags); | 733 | clear_bit(FLAG_EXPORT, &desc->flags); |
| 722 | } else | 734 | } else |
| 723 | status = -ENODEV; | 735 | status = -ENODEV; |
| @@ -750,13 +762,13 @@ int gpiochip_export(struct gpio_chip *chip) | |||
| 750 | 762 | ||
| 751 | /* use chip->base for the ID; it's already known to be unique */ | 763 | /* use chip->base for the ID; it's already known to be unique */ |
| 752 | mutex_lock(&sysfs_lock); | 764 | mutex_lock(&sysfs_lock); |
| 753 | dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, | 765 | dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0), |
| 754 | "gpiochip%d", chip->base); | 766 | chip, gpiochip_groups, |
| 755 | if (!IS_ERR(dev)) { | 767 | "gpiochip%d", chip->base); |
| 756 | status = sysfs_create_group(&dev->kobj, | 768 | if (IS_ERR(dev)) |
| 757 | &gpiochip_attr_group); | ||
| 758 | } else | ||
| 759 | status = PTR_ERR(dev); | 769 | status = PTR_ERR(dev); |
| 770 | else | ||
| 771 | status = 0; | ||
| 760 | chip->exported = (status == 0); | 772 | chip->exported = (status == 0); |
| 761 | mutex_unlock(&sysfs_lock); | 773 | mutex_unlock(&sysfs_lock); |
| 762 | 774 | ||
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 487afe6f22fc..568aa2b6bdb0 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip) | |||
| 248 | base = gpiochip_find_base(chip->ngpio); | 248 | base = gpiochip_find_base(chip->ngpio); |
| 249 | if (base < 0) { | 249 | if (base < 0) { |
| 250 | status = base; | 250 | status = base; |
| 251 | goto unlock; | 251 | spin_unlock_irqrestore(&gpio_lock, flags); |
| 252 | goto err_free_descs; | ||
| 252 | } | 253 | } |
| 253 | chip->base = base; | 254 | chip->base = base; |
| 254 | } | 255 | } |
| 255 | 256 | ||
| 256 | status = gpiochip_add_to_list(chip); | 257 | status = gpiochip_add_to_list(chip); |
| 258 | if (status) { | ||
| 259 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
| 260 | goto err_free_descs; | ||
| 261 | } | ||
| 257 | 262 | ||
| 258 | if (status == 0) { | 263 | for (id = 0; id < chip->ngpio; id++) { |
| 259 | for (id = 0; id < chip->ngpio; id++) { | 264 | struct gpio_desc *desc = &descs[id]; |
| 260 | struct gpio_desc *desc = &descs[id]; | 265 | |
| 261 | desc->chip = chip; | 266 | desc->chip = chip; |
| 262 | 267 | ||
| 263 | /* REVISIT: most hardware initializes GPIOs as | 268 | /* REVISIT: most hardware initializes GPIOs as inputs (often |
| 264 | * inputs (often with pullups enabled) so power | 269 | * with pullups enabled) so power usage is minimized. Linux |
| 265 | * usage is minimized. Linux code should set the | 270 | * code should set the gpio direction first thing; but until |
| 266 | * gpio direction first thing; but until it does, | 271 | * it does, and in case chip->get_direction is not set, we may |
| 267 | * and in case chip->get_direction is not set, | 272 | * expose the wrong direction in sysfs. |
| 268 | * we may expose the wrong direction in sysfs. | 273 | */ |
| 269 | */ | 274 | desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; |
| 270 | desc->flags = !chip->direction_input | ||
| 271 | ? (1 << FLAG_IS_OUT) | ||
| 272 | : 0; | ||
| 273 | } | ||
| 274 | } | 275 | } |
| 275 | 276 | ||
| 276 | chip->desc = descs; | 277 | chip->desc = descs; |
| @@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip) | |||
| 284 | of_gpiochip_add(chip); | 285 | of_gpiochip_add(chip); |
| 285 | acpi_gpiochip_add(chip); | 286 | acpi_gpiochip_add(chip); |
| 286 | 287 | ||
| 287 | if (status) | ||
| 288 | goto fail; | ||
| 289 | |||
| 290 | status = gpiochip_export(chip); | 288 | status = gpiochip_export(chip); |
| 291 | if (status) | 289 | if (status) |
| 292 | goto fail; | 290 | goto err_remove_chip; |
| 293 | 291 | ||
| 294 | pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__, | 292 | pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__, |
| 295 | chip->base, chip->base + chip->ngpio - 1, | 293 | chip->base, chip->base + chip->ngpio - 1, |
| @@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip) | |||
| 297 | 295 | ||
| 298 | return 0; | 296 | return 0; |
| 299 | 297 | ||
| 300 | unlock: | 298 | err_remove_chip: |
| 299 | acpi_gpiochip_remove(chip); | ||
| 300 | of_gpiochip_remove(chip); | ||
| 301 | spin_lock_irqsave(&gpio_lock, flags); | ||
| 302 | list_del(&chip->list); | ||
| 301 | spin_unlock_irqrestore(&gpio_lock, flags); | 303 | spin_unlock_irqrestore(&gpio_lock, flags); |
| 302 | fail: | ||
| 303 | kfree(descs); | ||
| 304 | chip->desc = NULL; | 304 | chip->desc = NULL; |
| 305 | err_free_descs: | ||
| 306 | kfree(descs); | ||
| 305 | 307 | ||
| 306 | /* failures here can mean systems won't boot... */ | 308 | /* failures here can mean systems won't boot... */ |
| 307 | pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__, | 309 | pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__, |
| @@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip) | |||
| 325 | unsigned long flags; | 327 | unsigned long flags; |
| 326 | unsigned id; | 328 | unsigned id; |
| 327 | 329 | ||
| 328 | acpi_gpiochip_remove(chip); | 330 | gpiochip_unexport(chip); |
| 329 | |||
| 330 | spin_lock_irqsave(&gpio_lock, flags); | ||
| 331 | 331 | ||
| 332 | gpiochip_irqchip_remove(chip); | 332 | gpiochip_irqchip_remove(chip); |
| 333 | |||
| 334 | acpi_gpiochip_remove(chip); | ||
| 333 | gpiochip_remove_pin_ranges(chip); | 335 | gpiochip_remove_pin_ranges(chip); |
| 334 | of_gpiochip_remove(chip); | 336 | of_gpiochip_remove(chip); |
| 335 | 337 | ||
| 338 | spin_lock_irqsave(&gpio_lock, flags); | ||
| 336 | for (id = 0; id < chip->ngpio; id++) { | 339 | for (id = 0; id < chip->ngpio; id++) { |
| 337 | if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) | 340 | if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) |
| 338 | dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); | 341 | dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); |
| @@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip) | |||
| 342 | 345 | ||
| 343 | list_del(&chip->list); | 346 | list_del(&chip->list); |
| 344 | spin_unlock_irqrestore(&gpio_lock, flags); | 347 | spin_unlock_irqrestore(&gpio_lock, flags); |
| 345 | gpiochip_unexport(chip); | ||
| 346 | 348 | ||
| 347 | kfree(chip->desc); | 349 | kfree(chip->desc); |
| 348 | chip->desc = NULL; | 350 | chip->desc = NULL; |
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index e3a52113a541..550a5eafbd38 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h | |||
| @@ -77,6 +77,7 @@ struct gpio_desc { | |||
| 77 | #define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ | 77 | #define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ |
| 78 | #define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ | 78 | #define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ |
| 79 | #define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ | 79 | #define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ |
| 80 | #define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */ | ||
| 80 | 81 | ||
| 81 | #define ID_SHIFT 16 /* add new flags before this one */ | 82 | #define ID_SHIFT 16 /* add new flags before this one */ |
| 82 | 83 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index be6246de5091..307a309110e6 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile | |||
| @@ -8,7 +8,6 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ | |||
| 8 | kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ | 8 | kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ |
| 9 | kfd_process.o kfd_queue.o kfd_mqd_manager.o \ | 9 | kfd_process.o kfd_queue.o kfd_mqd_manager.o \ |
| 10 | kfd_kernel_queue.o kfd_packet_manager.o \ | 10 | kfd_kernel_queue.o kfd_packet_manager.o \ |
| 11 | kfd_process_queue_manager.o kfd_device_queue_manager.o \ | 11 | kfd_process_queue_manager.o kfd_device_queue_manager.o |
| 12 | kfd_interrupt.o | ||
| 13 | 12 | ||
| 14 | obj-$(CONFIG_HSA_AMD) += amdkfd.o | 13 | obj-$(CONFIG_HSA_AMD) += amdkfd.o |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 43884ebd4303..25bc47f3c1cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | #include "kfd_priv.h" | 27 | #include "kfd_priv.h" |
| 28 | #include "kfd_device_queue_manager.h" | 28 | #include "kfd_device_queue_manager.h" |
| 29 | #include "kfd_pm4_headers.h" | ||
| 29 | 30 | ||
| 30 | #define MQD_SIZE_ALIGNED 768 | 31 | #define MQD_SIZE_ALIGNED 768 |
| 31 | 32 | ||
| @@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, | |||
| 169 | kfd->shared_resources = *gpu_resources; | 170 | kfd->shared_resources = *gpu_resources; |
| 170 | 171 | ||
| 171 | /* calculate max size of mqds needed for queues */ | 172 | /* calculate max size of mqds needed for queues */ |
| 172 | size = max_num_of_processes * | 173 | size = max_num_of_queues_per_device * |
| 173 | max_num_of_queues_per_process * | 174 | kfd->device_info->mqd_size_aligned; |
| 174 | kfd->device_info->mqd_size_aligned; | ||
| 175 | 175 | ||
| 176 | /* add another 512KB for all other allocations on gart */ | 176 | /* add another 512KB for all other allocations on gart */ |
| 177 | size += 512 * 1024; | 177 | size += 512 * 1024; |
| @@ -192,13 +192,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, | |||
| 192 | goto kfd_topology_add_device_error; | 192 | goto kfd_topology_add_device_error; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | if (kfd_interrupt_init(kfd)) { | ||
| 196 | dev_err(kfd_device, | ||
| 197 | "Error initializing interrupts for device (%x:%x)\n", | ||
| 198 | kfd->pdev->vendor, kfd->pdev->device); | ||
| 199 | goto kfd_interrupt_error; | ||
| 200 | } | ||
| 201 | |||
| 202 | if (!device_iommu_pasid_init(kfd)) { | 195 | if (!device_iommu_pasid_init(kfd)) { |
| 203 | dev_err(kfd_device, | 196 | dev_err(kfd_device, |
| 204 | "Error initializing iommuv2 for device (%x:%x)\n", | 197 | "Error initializing iommuv2 for device (%x:%x)\n", |
| @@ -237,8 +230,6 @@ dqm_start_error: | |||
| 237 | device_queue_manager_error: | 230 | device_queue_manager_error: |
| 238 | amd_iommu_free_device(kfd->pdev); | 231 | amd_iommu_free_device(kfd->pdev); |
| 239 | device_iommu_pasid_error: | 232 | device_iommu_pasid_error: |
| 240 | kfd_interrupt_exit(kfd); | ||
| 241 | kfd_interrupt_error: | ||
| 242 | kfd_topology_remove_device(kfd); | 233 | kfd_topology_remove_device(kfd); |
| 243 | kfd_topology_add_device_error: | 234 | kfd_topology_add_device_error: |
| 244 | kfd2kgd->fini_sa_manager(kfd->kgd); | 235 | kfd2kgd->fini_sa_manager(kfd->kgd); |
| @@ -254,7 +245,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) | |||
| 254 | if (kfd->init_complete) { | 245 | if (kfd->init_complete) { |
| 255 | device_queue_manager_uninit(kfd->dqm); | 246 | device_queue_manager_uninit(kfd->dqm); |
| 256 | amd_iommu_free_device(kfd->pdev); | 247 | amd_iommu_free_device(kfd->pdev); |
| 257 | kfd_interrupt_exit(kfd); | ||
| 258 | kfd_topology_remove_device(kfd); | 248 | kfd_topology_remove_device(kfd); |
| 259 | } | 249 | } |
| 260 | 250 | ||
| @@ -296,13 +286,5 @@ int kgd2kfd_resume(struct kfd_dev *kfd) | |||
| 296 | /* This is called directly from KGD at ISR. */ | 286 | /* This is called directly from KGD at ISR. */ |
| 297 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) | 287 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) |
| 298 | { | 288 | { |
| 299 | if (kfd->init_complete) { | 289 | /* Process interrupts / schedule work as necessary */ |
| 300 | spin_lock(&kfd->interrupt_lock); | ||
| 301 | |||
| 302 | if (kfd->interrupts_active | ||
| 303 | && enqueue_ih_ring_entry(kfd, ih_ring_entry)) | ||
| 304 | schedule_work(&kfd->interrupt_work); | ||
| 305 | |||
| 306 | spin_unlock(&kfd->interrupt_lock); | ||
| 307 | } | ||
| 308 | } | 290 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 9c8961d22360..0d8694f015c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
| @@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, | |||
| 183 | 183 | ||
| 184 | mutex_lock(&dqm->lock); | 184 | mutex_lock(&dqm->lock); |
| 185 | 185 | ||
| 186 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
| 187 | pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", | ||
| 188 | dqm->total_queue_count); | ||
| 189 | mutex_unlock(&dqm->lock); | ||
| 190 | return -EPERM; | ||
| 191 | } | ||
| 192 | |||
| 186 | if (list_empty(&qpd->queues_list)) { | 193 | if (list_empty(&qpd->queues_list)) { |
| 187 | retval = allocate_vmid(dqm, qpd, q); | 194 | retval = allocate_vmid(dqm, qpd, q); |
| 188 | if (retval != 0) { | 195 | if (retval != 0) { |
| @@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, | |||
| 207 | list_add(&q->list, &qpd->queues_list); | 214 | list_add(&q->list, &qpd->queues_list); |
| 208 | dqm->queue_count++; | 215 | dqm->queue_count++; |
| 209 | 216 | ||
| 217 | /* | ||
| 218 | * Unconditionally increment this counter, regardless of the queue's | ||
| 219 | * type or whether the queue is active. | ||
| 220 | */ | ||
| 221 | dqm->total_queue_count++; | ||
| 222 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 223 | dqm->total_queue_count); | ||
| 224 | |||
| 210 | mutex_unlock(&dqm->lock); | 225 | mutex_unlock(&dqm->lock); |
| 211 | return 0; | 226 | return 0; |
| 212 | } | 227 | } |
| @@ -280,7 +295,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |||
| 280 | q->queue); | 295 | q->queue); |
| 281 | 296 | ||
| 282 | retval = mqd->load_mqd(mqd, q->mqd, q->pipe, | 297 | retval = mqd->load_mqd(mqd, q->mqd, q->pipe, |
| 283 | q->queue, q->properties.write_ptr); | 298 | q->queue, (uint32_t __user *) q->properties.write_ptr); |
| 284 | if (retval != 0) { | 299 | if (retval != 0) { |
| 285 | deallocate_hqd(dqm, q); | 300 | deallocate_hqd(dqm, q); |
| 286 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | 301 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); |
| @@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, | |||
| 326 | if (list_empty(&qpd->queues_list)) | 341 | if (list_empty(&qpd->queues_list)) |
| 327 | deallocate_vmid(dqm, qpd, q); | 342 | deallocate_vmid(dqm, qpd, q); |
| 328 | dqm->queue_count--; | 343 | dqm->queue_count--; |
| 344 | |||
| 345 | /* | ||
| 346 | * Unconditionally decrement this counter, regardless of the queue's | ||
| 347 | * type | ||
| 348 | */ | ||
| 349 | dqm->total_queue_count--; | ||
| 350 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 351 | dqm->total_queue_count); | ||
| 352 | |||
| 329 | out: | 353 | out: |
| 330 | mutex_unlock(&dqm->lock); | 354 | mutex_unlock(&dqm->lock); |
| 331 | return retval; | 355 | return retval; |
| @@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm, | |||
| 541 | 565 | ||
| 542 | for (i = 0; i < pipes_num; i++) { | 566 | for (i = 0; i < pipes_num; i++) { |
| 543 | inx = i + first_pipe; | 567 | inx = i + first_pipe; |
| 568 | /* | ||
| 569 | * HPD buffer on GTT is allocated by amdkfd, no need to waste | ||
| 570 | * space in GTT for pipelines we don't initialize | ||
| 571 | */ | ||
| 544 | pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; | 572 | pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; |
| 545 | pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); | 573 | pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); |
| 546 | /* = log2(bytes/4)-1 */ | 574 | /* = log2(bytes/4)-1 */ |
| 547 | kfd2kgd->init_pipeline(dqm->dev->kgd, i, | 575 | kfd2kgd->init_pipeline(dqm->dev->kgd, inx, |
| 548 | CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); | 576 | CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); |
| 549 | } | 577 | } |
| 550 | 578 | ||
| @@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm) | |||
| 560 | 588 | ||
| 561 | pr_debug("kfd: In %s\n", __func__); | 589 | pr_debug("kfd: In %s\n", __func__); |
| 562 | 590 | ||
| 563 | retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); | 591 | retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); |
| 564 | if (retval != 0) | 592 | if (retval != 0) |
| 565 | return retval; | 593 | return retval; |
| 566 | 594 | ||
| @@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
| 752 | pr_debug("kfd: In func %s\n", __func__); | 780 | pr_debug("kfd: In func %s\n", __func__); |
| 753 | 781 | ||
| 754 | mutex_lock(&dqm->lock); | 782 | mutex_lock(&dqm->lock); |
| 783 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
| 784 | pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", | ||
| 785 | dqm->total_queue_count); | ||
| 786 | mutex_unlock(&dqm->lock); | ||
| 787 | return -EPERM; | ||
| 788 | } | ||
| 789 | |||
| 790 | /* | ||
| 791 | * Unconditionally increment this counter, regardless of the queue's | ||
| 792 | * type or whether the queue is active. | ||
| 793 | */ | ||
| 794 | dqm->total_queue_count++; | ||
| 795 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 796 | dqm->total_queue_count); | ||
| 797 | |||
| 755 | list_add(&kq->list, &qpd->priv_queue_list); | 798 | list_add(&kq->list, &qpd->priv_queue_list); |
| 756 | dqm->queue_count++; | 799 | dqm->queue_count++; |
| 757 | qpd->is_debug = true; | 800 | qpd->is_debug = true; |
| @@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
| 775 | dqm->queue_count--; | 818 | dqm->queue_count--; |
| 776 | qpd->is_debug = false; | 819 | qpd->is_debug = false; |
| 777 | execute_queues_cpsch(dqm, false); | 820 | execute_queues_cpsch(dqm, false); |
| 821 | /* | ||
| 822 | * Unconditionally decrement this counter, regardless of the queue's | ||
| 823 | * type. | ||
| 824 | */ | ||
| 825 | dqm->total_queue_count++; | ||
| 826 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 827 | dqm->total_queue_count); | ||
| 778 | mutex_unlock(&dqm->lock); | 828 | mutex_unlock(&dqm->lock); |
| 779 | } | 829 | } |
| 780 | 830 | ||
| @@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
| 793 | 843 | ||
| 794 | mutex_lock(&dqm->lock); | 844 | mutex_lock(&dqm->lock); |
| 795 | 845 | ||
| 846 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
| 847 | pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", | ||
| 848 | dqm->total_queue_count); | ||
| 849 | retval = -EPERM; | ||
| 850 | goto out; | ||
| 851 | } | ||
| 852 | |||
| 796 | mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); | 853 | mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); |
| 797 | if (mqd == NULL) { | 854 | if (mqd == NULL) { |
| 798 | mutex_unlock(&dqm->lock); | 855 | mutex_unlock(&dqm->lock); |
| @@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
| 810 | retval = execute_queues_cpsch(dqm, false); | 867 | retval = execute_queues_cpsch(dqm, false); |
| 811 | } | 868 | } |
| 812 | 869 | ||
| 870 | /* | ||
| 871 | * Unconditionally increment this counter, regardless of the queue's | ||
| 872 | * type or whether the queue is active. | ||
| 873 | */ | ||
| 874 | dqm->total_queue_count++; | ||
| 875 | |||
| 876 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 877 | dqm->total_queue_count); | ||
| 878 | |||
| 813 | out: | 879 | out: |
| 814 | mutex_unlock(&dqm->lock); | 880 | mutex_unlock(&dqm->lock); |
| 815 | return retval; | 881 | return retval; |
| @@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, | |||
| 930 | 996 | ||
| 931 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | 997 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); |
| 932 | 998 | ||
| 999 | /* | ||
| 1000 | * Unconditionally decrement this counter, regardless of the queue's | ||
| 1001 | * type | ||
| 1002 | */ | ||
| 1003 | dqm->total_queue_count--; | ||
| 1004 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 1005 | dqm->total_queue_count); | ||
| 1006 | |||
| 933 | mutex_unlock(&dqm->lock); | 1007 | mutex_unlock(&dqm->lock); |
| 934 | 1008 | ||
| 935 | return 0; | 1009 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c3f189e8ae35..52035bf0c1cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | |||
| @@ -130,6 +130,7 @@ struct device_queue_manager { | |||
| 130 | struct list_head queues; | 130 | struct list_head queues; |
| 131 | unsigned int processes_count; | 131 | unsigned int processes_count; |
| 132 | unsigned int queue_count; | 132 | unsigned int queue_count; |
| 133 | unsigned int total_queue_count; | ||
| 133 | unsigned int next_pipe_to_allocate; | 134 | unsigned int next_pipe_to_allocate; |
| 134 | unsigned int *allocated_queues; | 135 | unsigned int *allocated_queues; |
| 135 | unsigned int vmid_bitmap; | 136 | unsigned int vmid_bitmap; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c deleted file mode 100644 index 5b999095a1f7..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ /dev/null | |||
| @@ -1,176 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | /* | ||
| 24 | * KFD Interrupts. | ||
| 25 | * | ||
| 26 | * AMD GPUs deliver interrupts by pushing an interrupt description onto the | ||
| 27 | * interrupt ring and then sending an interrupt. KGD receives the interrupt | ||
| 28 | * in ISR and sends us a pointer to each new entry on the interrupt ring. | ||
| 29 | * | ||
| 30 | * We generally can't process interrupt-signaled events from ISR, so we call | ||
| 31 | * out to each interrupt client module (currently only the scheduler) to ask if | ||
| 32 | * each interrupt is interesting. If they return true, then it requires further | ||
| 33 | * processing so we copy it to an internal interrupt ring and call each | ||
| 34 | * interrupt client again from a work-queue. | ||
| 35 | * | ||
| 36 | * There's no acknowledgment for the interrupts we use. The hardware simply | ||
| 37 | * queues a new interrupt each time without waiting. | ||
| 38 | * | ||
| 39 | * The fixed-size internal queue means that it's possible for us to lose | ||
| 40 | * interrupts because we have no back-pressure to the hardware. | ||
| 41 | */ | ||
| 42 | |||
| 43 | #include <linux/slab.h> | ||
| 44 | #include <linux/device.h> | ||
| 45 | #include "kfd_priv.h" | ||
| 46 | |||
| 47 | #define KFD_INTERRUPT_RING_SIZE 256 | ||
| 48 | |||
| 49 | static void interrupt_wq(struct work_struct *); | ||
| 50 | |||
| 51 | int kfd_interrupt_init(struct kfd_dev *kfd) | ||
| 52 | { | ||
| 53 | void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE, | ||
| 54 | kfd->device_info->ih_ring_entry_size, | ||
| 55 | GFP_KERNEL); | ||
| 56 | if (!interrupt_ring) | ||
| 57 | return -ENOMEM; | ||
| 58 | |||
| 59 | kfd->interrupt_ring = interrupt_ring; | ||
| 60 | kfd->interrupt_ring_size = | ||
| 61 | KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size; | ||
| 62 | atomic_set(&kfd->interrupt_ring_wptr, 0); | ||
| 63 | atomic_set(&kfd->interrupt_ring_rptr, 0); | ||
| 64 | |||
| 65 | spin_lock_init(&kfd->interrupt_lock); | ||
| 66 | |||
| 67 | INIT_WORK(&kfd->interrupt_work, interrupt_wq); | ||
| 68 | |||
| 69 | kfd->interrupts_active = true; | ||
| 70 | |||
| 71 | /* | ||
| 72 | * After this function returns, the interrupt will be enabled. This | ||
| 73 | * barrier ensures that the interrupt running on a different processor | ||
| 74 | * sees all the above writes. | ||
| 75 | */ | ||
| 76 | smp_wmb(); | ||
| 77 | |||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | void kfd_interrupt_exit(struct kfd_dev *kfd) | ||
| 82 | { | ||
| 83 | /* | ||
| 84 | * Stop the interrupt handler from writing to the ring and scheduling | ||
| 85 | * workqueue items. The spinlock ensures that any interrupt running | ||
| 86 | * after we have unlocked sees interrupts_active = false. | ||
| 87 | */ | ||
| 88 | unsigned long flags; | ||
| 89 | |||
| 90 | spin_lock_irqsave(&kfd->interrupt_lock, flags); | ||
| 91 | kfd->interrupts_active = false; | ||
| 92 | spin_unlock_irqrestore(&kfd->interrupt_lock, flags); | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Flush_scheduled_work ensures that there are no outstanding | ||
| 96 | * work-queue items that will access interrupt_ring. New work items | ||
| 97 | * can't be created because we stopped interrupt handling above. | ||
| 98 | */ | ||
| 99 | flush_scheduled_work(); | ||
| 100 | |||
| 101 | kfree(kfd->interrupt_ring); | ||
| 102 | } | ||
| 103 | |||
| 104 | /* | ||
| 105 | * This assumes that it can't be called concurrently with itself | ||
| 106 | * but only with dequeue_ih_ring_entry. | ||
| 107 | */ | ||
| 108 | bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) | ||
| 109 | { | ||
| 110 | unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr); | ||
| 111 | unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr); | ||
| 112 | |||
| 113 | if ((rptr - wptr) % kfd->interrupt_ring_size == | ||
| 114 | kfd->device_info->ih_ring_entry_size) { | ||
| 115 | /* This is very bad, the system is likely to hang. */ | ||
| 116 | dev_err_ratelimited(kfd_chardev(), | ||
| 117 | "Interrupt ring overflow, dropping interrupt.\n"); | ||
| 118 | return false; | ||
| 119 | } | ||
| 120 | |||
| 121 | memcpy(kfd->interrupt_ring + wptr, ih_ring_entry, | ||
| 122 | kfd->device_info->ih_ring_entry_size); | ||
| 123 | |||
| 124 | wptr = (wptr + kfd->device_info->ih_ring_entry_size) % | ||
| 125 | kfd->interrupt_ring_size; | ||
| 126 | smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */ | ||
| 127 | atomic_set(&kfd->interrupt_ring_wptr, wptr); | ||
| 128 | |||
| 129 | return true; | ||
| 130 | } | ||
| 131 | |||
| 132 | /* | ||
| 133 | * This assumes that it can't be called concurrently with itself | ||
| 134 | * but only with enqueue_ih_ring_entry. | ||
| 135 | */ | ||
| 136 | static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) | ||
| 137 | { | ||
| 138 | /* | ||
| 139 | * Assume that wait queues have an implicit barrier, i.e. anything that | ||
| 140 | * happened in the ISR before it queued work is visible. | ||
| 141 | */ | ||
| 142 | |||
| 143 | unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr); | ||
| 144 | unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr); | ||
| 145 | |||
| 146 | if (rptr == wptr) | ||
| 147 | return false; | ||
| 148 | |||
| 149 | memcpy(ih_ring_entry, kfd->interrupt_ring + rptr, | ||
| 150 | kfd->device_info->ih_ring_entry_size); | ||
| 151 | |||
| 152 | rptr = (rptr + kfd->device_info->ih_ring_entry_size) % | ||
| 153 | kfd->interrupt_ring_size; | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Ensure the rptr write update is not visible until | ||
| 157 | * memcpy has finished reading. | ||
| 158 | */ | ||
| 159 | smp_mb(); | ||
| 160 | atomic_set(&kfd->interrupt_ring_rptr, rptr); | ||
| 161 | |||
| 162 | return true; | ||
| 163 | } | ||
| 164 | |||
| 165 | static void interrupt_wq(struct work_struct *work) | ||
| 166 | { | ||
| 167 | struct kfd_dev *dev = container_of(work, struct kfd_dev, | ||
| 168 | interrupt_work); | ||
| 169 | |||
| 170 | uint32_t ih_ring_entry[DIV_ROUND_UP( | ||
| 171 | dev->device_info->ih_ring_entry_size, | ||
| 172 | sizeof(uint32_t))]; | ||
| 173 | |||
| 174 | while (dequeue_ih_ring_entry(dev, ih_ring_entry)) | ||
| 175 | ; | ||
| 176 | } | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 95d5af138e6e..a8be6df85347 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c | |||
| @@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444); | |||
| 50 | MODULE_PARM_DESC(sched_policy, | 50 | MODULE_PARM_DESC(sched_policy, |
| 51 | "Kernel cmdline parameter that defines the amdkfd scheduling policy"); | 51 | "Kernel cmdline parameter that defines the amdkfd scheduling policy"); |
| 52 | 52 | ||
| 53 | int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; | 53 | int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; |
| 54 | module_param(max_num_of_processes, int, 0444); | 54 | module_param(max_num_of_queues_per_device, int, 0444); |
| 55 | MODULE_PARM_DESC(max_num_of_processes, | 55 | MODULE_PARM_DESC(max_num_of_queues_per_device, |
| 56 | "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); | 56 | "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); |
| 57 | |||
| 58 | int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; | ||
| 59 | module_param(max_num_of_queues_per_process, int, 0444); | ||
| 60 | MODULE_PARM_DESC(max_num_of_queues_per_process, | ||
| 61 | "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); | ||
| 62 | 57 | ||
| 63 | bool kgd2kfd_init(unsigned interface_version, | 58 | bool kgd2kfd_init(unsigned interface_version, |
| 64 | const struct kfd2kgd_calls *f2g, | 59 | const struct kfd2kgd_calls *f2g, |
| @@ -100,16 +95,10 @@ static int __init kfd_module_init(void) | |||
| 100 | } | 95 | } |
| 101 | 96 | ||
| 102 | /* Verify module parameters */ | 97 | /* Verify module parameters */ |
| 103 | if ((max_num_of_processes < 0) || | 98 | if ((max_num_of_queues_per_device < 0) || |
| 104 | (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { | 99 | (max_num_of_queues_per_device > |
| 105 | pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); | 100 | KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { |
| 106 | return -1; | 101 | pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); |
| 107 | } | ||
| 108 | |||
| 109 | if ((max_num_of_queues_per_process < 0) || | ||
| 110 | (max_num_of_queues_per_process > | ||
| 111 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { | ||
| 112 | pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); | ||
| 113 | return -1; | 102 | return -1; |
| 114 | } | 103 | } |
| 115 | 104 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 4c25ef504f79..6cfe7f1f18cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | |||
| @@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex); | |||
| 30 | 30 | ||
| 31 | int kfd_pasid_init(void) | 31 | int kfd_pasid_init(void) |
| 32 | { | 32 | { |
| 33 | pasid_limit = max_num_of_processes; | 33 | pasid_limit = KFD_MAX_NUM_OF_PROCESSES; |
| 34 | 34 | ||
| 35 | pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); | 35 | pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); |
| 36 | if (!pasid_bitmap) | 36 | if (!pasid_bitmap) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index a5edb29507e3..96dc10e8904a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | |||
| @@ -52,20 +52,19 @@ | |||
| 52 | #define kfd_alloc_struct(ptr_to_struct) \ | 52 | #define kfd_alloc_struct(ptr_to_struct) \ |
| 53 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) | 53 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) |
| 54 | 54 | ||
| 55 | /* Kernel module parameter to specify maximum number of supported processes */ | ||
| 56 | extern int max_num_of_processes; | ||
| 57 | |||
| 58 | #define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32 | ||
| 59 | #define KFD_MAX_NUM_OF_PROCESSES 512 | 55 | #define KFD_MAX_NUM_OF_PROCESSES 512 |
| 56 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 | ||
| 60 | 57 | ||
| 61 | /* | 58 | /* |
| 62 | * Kernel module parameter to specify maximum number of supported queues | 59 | * Kernel module parameter to specify maximum number of supported queues per |
| 63 | * per process | 60 | * device |
| 64 | */ | 61 | */ |
| 65 | extern int max_num_of_queues_per_process; | 62 | extern int max_num_of_queues_per_device; |
| 66 | 63 | ||
| 67 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 | 64 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 |
| 68 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 | 65 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ |
| 66 | (KFD_MAX_NUM_OF_PROCESSES * \ | ||
| 67 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) | ||
| 69 | 68 | ||
| 70 | #define KFD_KERNEL_QUEUE_SIZE 2048 | 69 | #define KFD_KERNEL_QUEUE_SIZE 2048 |
| 71 | 70 | ||
| @@ -135,22 +134,10 @@ struct kfd_dev { | |||
| 135 | 134 | ||
| 136 | struct kgd2kfd_shared_resources shared_resources; | 135 | struct kgd2kfd_shared_resources shared_resources; |
| 137 | 136 | ||
| 138 | void *interrupt_ring; | ||
| 139 | size_t interrupt_ring_size; | ||
| 140 | atomic_t interrupt_ring_rptr; | ||
| 141 | atomic_t interrupt_ring_wptr; | ||
| 142 | struct work_struct interrupt_work; | ||
| 143 | spinlock_t interrupt_lock; | ||
| 144 | |||
| 145 | /* QCM Device instance */ | 137 | /* QCM Device instance */ |
| 146 | struct device_queue_manager *dqm; | 138 | struct device_queue_manager *dqm; |
| 147 | 139 | ||
| 148 | bool init_complete; | 140 | bool init_complete; |
| 149 | /* | ||
| 150 | * Interrupts of interest to KFD are copied | ||
| 151 | * from the HW ring into a SW ring. | ||
| 152 | */ | ||
| 153 | bool interrupts_active; | ||
| 154 | }; | 141 | }; |
| 155 | 142 | ||
| 156 | /* KGD2KFD callbacks */ | 143 | /* KGD2KFD callbacks */ |
| @@ -531,10 +518,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); | |||
| 531 | struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx); | 518 | struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx); |
| 532 | 519 | ||
| 533 | /* Interrupts */ | 520 | /* Interrupts */ |
| 534 | int kfd_interrupt_init(struct kfd_dev *dev); | ||
| 535 | void kfd_interrupt_exit(struct kfd_dev *dev); | ||
| 536 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); | 521 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); |
| 537 | bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); | ||
| 538 | 522 | ||
| 539 | /* Power Management */ | 523 | /* Power Management */ |
| 540 | void kgd2kfd_suspend(struct kfd_dev *kfd); | 524 | void kgd2kfd_suspend(struct kfd_dev *kfd); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 47526780d736..f37cf5efe642 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | |||
| @@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, | |||
| 54 | pr_debug("kfd: in %s\n", __func__); | 54 | pr_debug("kfd: in %s\n", __func__); |
| 55 | 55 | ||
| 56 | found = find_first_zero_bit(pqm->queue_slot_bitmap, | 56 | found = find_first_zero_bit(pqm->queue_slot_bitmap, |
| 57 | max_num_of_queues_per_process); | 57 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); |
| 58 | 58 | ||
| 59 | pr_debug("kfd: the new slot id %lu\n", found); | 59 | pr_debug("kfd: the new slot id %lu\n", found); |
| 60 | 60 | ||
| 61 | if (found >= max_num_of_queues_per_process) { | 61 | if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { |
| 62 | pr_info("amdkfd: Can not open more queues for process with pasid %d\n", | 62 | pr_info("amdkfd: Can not open more queues for process with pasid %d\n", |
| 63 | pqm->process->pasid); | 63 | pqm->process->pasid); |
| 64 | return -ENOMEM; | 64 | return -ENOMEM; |
| @@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) | |||
| 76 | 76 | ||
| 77 | INIT_LIST_HEAD(&pqm->queues); | 77 | INIT_LIST_HEAD(&pqm->queues); |
| 78 | pqm->queue_slot_bitmap = | 78 | pqm->queue_slot_bitmap = |
| 79 | kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, | 79 | kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, |
| 80 | BITS_PER_BYTE), GFP_KERNEL); | 80 | BITS_PER_BYTE), GFP_KERNEL); |
| 81 | if (pqm->queue_slot_bitmap == NULL) | 81 | if (pqm->queue_slot_bitmap == NULL) |
| 82 | return -ENOMEM; | 82 | return -ENOMEM; |
| @@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
| 203 | pqn->kq = NULL; | 203 | pqn->kq = NULL; |
| 204 | retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, | 204 | retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, |
| 205 | &q->properties.vmid); | 205 | &q->properties.vmid); |
| 206 | pr_debug("DQM returned %d for create_queue\n", retval); | ||
| 206 | print_queue(q); | 207 | print_queue(q); |
| 207 | break; | 208 | break; |
| 208 | case KFD_QUEUE_TYPE_DIQ: | 209 | case KFD_QUEUE_TYPE_DIQ: |
| @@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
| 222 | } | 223 | } |
| 223 | 224 | ||
| 224 | if (retval != 0) { | 225 | if (retval != 0) { |
| 225 | pr_err("kfd: error dqm create queue\n"); | 226 | pr_debug("Error dqm create queue\n"); |
| 226 | goto err_create_queue; | 227 | goto err_create_queue; |
| 227 | } | 228 | } |
| 228 | 229 | ||
| @@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
| 241 | err_create_queue: | 242 | err_create_queue: |
| 242 | kfree(pqn); | 243 | kfree(pqn); |
| 243 | err_allocate_pqn: | 244 | err_allocate_pqn: |
| 245 | /* check if queues list is empty unregister process from device */ | ||
| 244 | clear_bit(*qid, pqm->queue_slot_bitmap); | 246 | clear_bit(*qid, pqm->queue_slot_bitmap); |
| 247 | if (list_empty(&pqm->queues)) | ||
| 248 | dev->dqm->unregister_process(dev->dqm, &pdd->qpd); | ||
| 245 | return retval; | 249 | return retval; |
| 246 | } | 250 | } |
| 247 | 251 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 52ce26d6b4fb..dc386ebe5193 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_ | |||
| 145 | } | 145 | } |
| 146 | EXPORT_SYMBOL(drm_fb_helper_add_one_connector); | 146 | EXPORT_SYMBOL(drm_fb_helper_add_one_connector); |
| 147 | 147 | ||
| 148 | static void remove_from_modeset(struct drm_mode_set *set, | ||
| 149 | struct drm_connector *connector) | ||
| 150 | { | ||
| 151 | int i, j; | ||
| 152 | |||
| 153 | for (i = 0; i < set->num_connectors; i++) { | ||
| 154 | if (set->connectors[i] == connector) | ||
| 155 | break; | ||
| 156 | } | ||
| 157 | |||
| 158 | if (i == set->num_connectors) | ||
| 159 | return; | ||
| 160 | |||
| 161 | for (j = i + 1; j < set->num_connectors; j++) { | ||
| 162 | set->connectors[j - 1] = set->connectors[j]; | ||
| 163 | } | ||
| 164 | set->num_connectors--; | ||
| 165 | |||
| 166 | /* because i915 is pissy about this.. | ||
| 167 | * TODO maybe need to makes sure we set it back to !=NULL somewhere? | ||
| 168 | */ | ||
| 169 | if (set->num_connectors == 0) | ||
| 170 | set->fb = NULL; | ||
| 171 | } | ||
| 172 | |||
| 148 | int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, | 173 | int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, |
| 149 | struct drm_connector *connector) | 174 | struct drm_connector *connector) |
| 150 | { | 175 | { |
| @@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, | |||
| 167 | } | 192 | } |
| 168 | fb_helper->connector_count--; | 193 | fb_helper->connector_count--; |
| 169 | kfree(fb_helper_connector); | 194 | kfree(fb_helper_connector); |
| 195 | |||
| 196 | /* also cleanup dangling references to the connector: */ | ||
| 197 | for (i = 0; i < fb_helper->crtc_count; i++) | ||
| 198 | remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector); | ||
| 199 | |||
| 170 | return 0; | 200 | return 0; |
| 171 | } | 201 | } |
| 172 | EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); | 202 | EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); |
| @@ -741,7 +771,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
| 741 | int i, j, rc = 0; | 771 | int i, j, rc = 0; |
| 742 | int start; | 772 | int start; |
| 743 | 773 | ||
| 744 | drm_modeset_lock_all(dev); | 774 | if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { |
| 775 | return -EBUSY; | ||
| 776 | } | ||
| 745 | if (!drm_fb_helper_is_bound(fb_helper)) { | 777 | if (!drm_fb_helper_is_bound(fb_helper)) { |
| 746 | drm_modeset_unlock_all(dev); | 778 | drm_modeset_unlock_all(dev); |
| 747 | return -EBUSY; | 779 | return -EBUSY; |
| @@ -915,7 +947,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, | |||
| 915 | int ret = 0; | 947 | int ret = 0; |
| 916 | int i; | 948 | int i; |
| 917 | 949 | ||
| 918 | drm_modeset_lock_all(dev); | 950 | if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { |
| 951 | return -EBUSY; | ||
| 952 | } | ||
| 919 | if (!drm_fb_helper_is_bound(fb_helper)) { | 953 | if (!drm_fb_helper_is_bound(fb_helper)) { |
| 920 | drm_modeset_unlock_all(dev); | 954 | drm_modeset_unlock_all(dev); |
| 921 | return -EBUSY; | 955 | return -EBUSY; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 121470a83d1a..1bcbe07cecfc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -645,18 +645,6 @@ static int exynos_drm_init(void) | |||
| 645 | if (!is_exynos) | 645 | if (!is_exynos) |
| 646 | return -ENODEV; | 646 | return -ENODEV; |
| 647 | 647 | ||
| 648 | /* | ||
| 649 | * Register device object only in case of Exynos SoC. | ||
| 650 | * | ||
| 651 | * Below codes resolves temporarily infinite loop issue incurred | ||
| 652 | * by Exynos drm driver when using multi-platform kernel. | ||
| 653 | * So these codes will be replaced with more generic way later. | ||
| 654 | */ | ||
| 655 | if (!of_machine_is_compatible("samsung,exynos3") && | ||
| 656 | !of_machine_is_compatible("samsung,exynos4") && | ||
| 657 | !of_machine_is_compatible("samsung,exynos5")) | ||
| 658 | return -ENODEV; | ||
| 659 | |||
| 660 | exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, | 648 | exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, |
| 661 | NULL, 0); | 649 | NULL, 0); |
| 662 | if (IS_ERR(exynos_drm_pdev)) | 650 | if (IS_ERR(exynos_drm_pdev)) |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 5765a161abdd..98051e8e855a 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
| @@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata) | |||
| 1669 | 1669 | ||
| 1670 | static void hdmiphy_conf_reset(struct hdmi_context *hdata) | 1670 | static void hdmiphy_conf_reset(struct hdmi_context *hdata) |
| 1671 | { | 1671 | { |
| 1672 | u8 buffer[2]; | ||
| 1673 | u32 reg; | 1672 | u32 reg; |
| 1674 | 1673 | ||
| 1675 | clk_disable_unprepare(hdata->res.sclk_hdmi); | 1674 | clk_disable_unprepare(hdata->res.sclk_hdmi); |
| @@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata) | |||
| 1677 | clk_prepare_enable(hdata->res.sclk_hdmi); | 1676 | clk_prepare_enable(hdata->res.sclk_hdmi); |
| 1678 | 1677 | ||
| 1679 | /* operation mode */ | 1678 | /* operation mode */ |
| 1680 | buffer[0] = 0x1f; | 1679 | hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, |
| 1681 | buffer[1] = 0x00; | 1680 | HDMI_PHY_ENABLE_MODE_SET); |
| 1682 | |||
| 1683 | if (hdata->hdmiphy_port) | ||
| 1684 | i2c_master_send(hdata->hdmiphy_port, buffer, 2); | ||
| 1685 | 1681 | ||
| 1686 | if (hdata->type == HDMI_TYPE13) | 1682 | if (hdata->type == HDMI_TYPE13) |
| 1687 | reg = HDMI_V13_PHY_RSTOUT; | 1683 | reg = HDMI_V13_PHY_RSTOUT; |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 820b76234ef4..064ed6597def 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -1026,6 +1026,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos) | |||
| 1026 | static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) | 1026 | static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) |
| 1027 | { | 1027 | { |
| 1028 | struct mixer_context *mixer_ctx = mgr_to_mixer(mgr); | 1028 | struct mixer_context *mixer_ctx = mgr_to_mixer(mgr); |
| 1029 | int err; | ||
| 1029 | 1030 | ||
| 1030 | mutex_lock(&mixer_ctx->mixer_mutex); | 1031 | mutex_lock(&mixer_ctx->mixer_mutex); |
| 1031 | if (!mixer_ctx->powered) { | 1032 | if (!mixer_ctx->powered) { |
| @@ -1034,7 +1035,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) | |||
| 1034 | } | 1035 | } |
| 1035 | mutex_unlock(&mixer_ctx->mixer_mutex); | 1036 | mutex_unlock(&mixer_ctx->mixer_mutex); |
| 1036 | 1037 | ||
| 1037 | drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe); | 1038 | err = drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe); |
| 1039 | if (err < 0) { | ||
| 1040 | DRM_DEBUG_KMS("failed to acquire vblank counter\n"); | ||
| 1041 | return; | ||
| 1042 | } | ||
| 1038 | 1043 | ||
| 1039 | atomic_set(&mixer_ctx->wait_vsync_event, 1); | 1044 | atomic_set(&mixer_ctx->wait_vsync_event, 1); |
| 1040 | 1045 | ||
| @@ -1262,8 +1267,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data) | |||
| 1262 | return ret; | 1267 | return ret; |
| 1263 | } | 1268 | } |
| 1264 | 1269 | ||
| 1265 | pm_runtime_enable(dev); | ||
| 1266 | |||
| 1267 | return 0; | 1270 | return 0; |
| 1268 | } | 1271 | } |
| 1269 | 1272 | ||
| @@ -1272,8 +1275,6 @@ static void mixer_unbind(struct device *dev, struct device *master, void *data) | |||
| 1272 | struct mixer_context *ctx = dev_get_drvdata(dev); | 1275 | struct mixer_context *ctx = dev_get_drvdata(dev); |
| 1273 | 1276 | ||
| 1274 | mixer_mgr_remove(&ctx->manager); | 1277 | mixer_mgr_remove(&ctx->manager); |
| 1275 | |||
| 1276 | pm_runtime_disable(dev); | ||
| 1277 | } | 1278 | } |
| 1278 | 1279 | ||
| 1279 | static const struct component_ops mixer_component_ops = { | 1280 | static const struct component_ops mixer_component_ops = { |
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index d4762799351d..a9041d1a8ff0 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | struct tda998x_priv { | 32 | struct tda998x_priv { |
| 33 | struct i2c_client *cec; | 33 | struct i2c_client *cec; |
| 34 | struct i2c_client *hdmi; | 34 | struct i2c_client *hdmi; |
| 35 | struct mutex mutex; | ||
| 36 | struct delayed_work dwork; | ||
| 35 | uint16_t rev; | 37 | uint16_t rev; |
| 36 | uint8_t current_page; | 38 | uint8_t current_page; |
| 37 | int dpms; | 39 | int dpms; |
| @@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) | |||
| 402 | uint8_t addr = REG2ADDR(reg); | 404 | uint8_t addr = REG2ADDR(reg); |
| 403 | int ret; | 405 | int ret; |
| 404 | 406 | ||
| 407 | mutex_lock(&priv->mutex); | ||
| 405 | ret = set_page(priv, reg); | 408 | ret = set_page(priv, reg); |
| 406 | if (ret < 0) | 409 | if (ret < 0) |
| 407 | return ret; | 410 | goto out; |
| 408 | 411 | ||
| 409 | ret = i2c_master_send(client, &addr, sizeof(addr)); | 412 | ret = i2c_master_send(client, &addr, sizeof(addr)); |
| 410 | if (ret < 0) | 413 | if (ret < 0) |
| @@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt) | |||
| 414 | if (ret < 0) | 417 | if (ret < 0) |
| 415 | goto fail; | 418 | goto fail; |
| 416 | 419 | ||
| 417 | return ret; | 420 | goto out; |
| 418 | 421 | ||
| 419 | fail: | 422 | fail: |
| 420 | dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); | 423 | dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); |
| 424 | out: | ||
| 425 | mutex_unlock(&priv->mutex); | ||
| 421 | return ret; | 426 | return ret; |
| 422 | } | 427 | } |
| 423 | 428 | ||
| @@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt) | |||
| 431 | buf[0] = REG2ADDR(reg); | 436 | buf[0] = REG2ADDR(reg); |
| 432 | memcpy(&buf[1], p, cnt); | 437 | memcpy(&buf[1], p, cnt); |
| 433 | 438 | ||
| 439 | mutex_lock(&priv->mutex); | ||
| 434 | ret = set_page(priv, reg); | 440 | ret = set_page(priv, reg); |
| 435 | if (ret < 0) | 441 | if (ret < 0) |
| 436 | return; | 442 | goto out; |
| 437 | 443 | ||
| 438 | ret = i2c_master_send(client, buf, cnt + 1); | 444 | ret = i2c_master_send(client, buf, cnt + 1); |
| 439 | if (ret < 0) | 445 | if (ret < 0) |
| 440 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 446 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
| 447 | out: | ||
| 448 | mutex_unlock(&priv->mutex); | ||
| 441 | } | 449 | } |
| 442 | 450 | ||
| 443 | static int | 451 | static int |
| @@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val) | |||
| 459 | uint8_t buf[] = {REG2ADDR(reg), val}; | 467 | uint8_t buf[] = {REG2ADDR(reg), val}; |
| 460 | int ret; | 468 | int ret; |
| 461 | 469 | ||
| 470 | mutex_lock(&priv->mutex); | ||
| 462 | ret = set_page(priv, reg); | 471 | ret = set_page(priv, reg); |
| 463 | if (ret < 0) | 472 | if (ret < 0) |
| 464 | return; | 473 | goto out; |
| 465 | 474 | ||
| 466 | ret = i2c_master_send(client, buf, sizeof(buf)); | 475 | ret = i2c_master_send(client, buf, sizeof(buf)); |
| 467 | if (ret < 0) | 476 | if (ret < 0) |
| 468 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 477 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
| 478 | out: | ||
| 479 | mutex_unlock(&priv->mutex); | ||
| 469 | } | 480 | } |
| 470 | 481 | ||
| 471 | static void | 482 | static void |
| @@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val) | |||
| 475 | uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; | 486 | uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; |
| 476 | int ret; | 487 | int ret; |
| 477 | 488 | ||
| 489 | mutex_lock(&priv->mutex); | ||
| 478 | ret = set_page(priv, reg); | 490 | ret = set_page(priv, reg); |
| 479 | if (ret < 0) | 491 | if (ret < 0) |
| 480 | return; | 492 | goto out; |
| 481 | 493 | ||
| 482 | ret = i2c_master_send(client, buf, sizeof(buf)); | 494 | ret = i2c_master_send(client, buf, sizeof(buf)); |
| 483 | if (ret < 0) | 495 | if (ret < 0) |
| 484 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); | 496 | dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); |
| 497 | out: | ||
| 498 | mutex_unlock(&priv->mutex); | ||
| 485 | } | 499 | } |
| 486 | 500 | ||
| 487 | static void | 501 | static void |
| @@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv) | |||
| 536 | reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); | 550 | reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); |
| 537 | } | 551 | } |
| 538 | 552 | ||
| 553 | /* handle HDMI connect/disconnect */ | ||
| 554 | static void tda998x_hpd(struct work_struct *work) | ||
| 555 | { | ||
| 556 | struct delayed_work *dwork = to_delayed_work(work); | ||
| 557 | struct tda998x_priv *priv = | ||
| 558 | container_of(dwork, struct tda998x_priv, dwork); | ||
| 559 | |||
| 560 | if (priv->encoder && priv->encoder->dev) | ||
| 561 | drm_kms_helper_hotplug_event(priv->encoder->dev); | ||
| 562 | } | ||
| 563 | |||
| 539 | /* | 564 | /* |
| 540 | * only 2 interrupts may occur: screen plug/unplug and EDID read | 565 | * only 2 interrupts may occur: screen plug/unplug and EDID read |
| 541 | */ | 566 | */ |
| @@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) | |||
| 559 | priv->wq_edid_wait = 0; | 584 | priv->wq_edid_wait = 0; |
| 560 | wake_up(&priv->wq_edid); | 585 | wake_up(&priv->wq_edid); |
| 561 | } else if (cec != 0) { /* HPD change */ | 586 | } else if (cec != 0) { /* HPD change */ |
| 562 | if (priv->encoder && priv->encoder->dev) | 587 | schedule_delayed_work(&priv->dwork, HZ/10); |
| 563 | drm_helper_hpd_irq_event(priv->encoder->dev); | ||
| 564 | } | 588 | } |
| 565 | return IRQ_HANDLED; | 589 | return IRQ_HANDLED; |
| 566 | } | 590 | } |
| @@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv) | |||
| 1170 | /* disable all IRQs and free the IRQ handler */ | 1194 | /* disable all IRQs and free the IRQ handler */ |
| 1171 | cec_write(priv, REG_CEC_RXSHPDINTENA, 0); | 1195 | cec_write(priv, REG_CEC_RXSHPDINTENA, 0); |
| 1172 | reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); | 1196 | reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); |
| 1173 | if (priv->hdmi->irq) | 1197 | if (priv->hdmi->irq) { |
| 1174 | free_irq(priv->hdmi->irq, priv); | 1198 | free_irq(priv->hdmi->irq, priv); |
| 1199 | cancel_delayed_work_sync(&priv->dwork); | ||
| 1200 | } | ||
| 1175 | 1201 | ||
| 1176 | i2c_unregister_device(priv->cec); | 1202 | i2c_unregister_device(priv->cec); |
| 1177 | } | 1203 | } |
| @@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
| 1255 | struct device_node *np = client->dev.of_node; | 1281 | struct device_node *np = client->dev.of_node; |
| 1256 | u32 video; | 1282 | u32 video; |
| 1257 | int rev_lo, rev_hi, ret; | 1283 | int rev_lo, rev_hi, ret; |
| 1284 | unsigned short cec_addr; | ||
| 1258 | 1285 | ||
| 1259 | priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); | 1286 | priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); |
| 1260 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); | 1287 | priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); |
| @@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
| 1262 | 1289 | ||
| 1263 | priv->current_page = 0xff; | 1290 | priv->current_page = 0xff; |
| 1264 | priv->hdmi = client; | 1291 | priv->hdmi = client; |
| 1265 | priv->cec = i2c_new_dummy(client->adapter, 0x34); | 1292 | /* CEC I2C address bound to TDA998x I2C addr by configuration pins */ |
| 1293 | cec_addr = 0x34 + (client->addr & 0x03); | ||
| 1294 | priv->cec = i2c_new_dummy(client->adapter, cec_addr); | ||
| 1266 | if (!priv->cec) | 1295 | if (!priv->cec) |
| 1267 | return -ENODEV; | 1296 | return -ENODEV; |
| 1268 | 1297 | ||
| 1269 | priv->dpms = DRM_MODE_DPMS_OFF; | 1298 | priv->dpms = DRM_MODE_DPMS_OFF; |
| 1270 | 1299 | ||
| 1300 | mutex_init(&priv->mutex); /* protect the page access */ | ||
| 1301 | |||
| 1271 | /* wake up the device: */ | 1302 | /* wake up the device: */ |
| 1272 | cec_write(priv, REG_CEC_ENAMODS, | 1303 | cec_write(priv, REG_CEC_ENAMODS, |
| 1273 | CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); | 1304 | CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); |
| @@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv) | |||
| 1323 | if (client->irq) { | 1354 | if (client->irq) { |
| 1324 | int irqf_trigger; | 1355 | int irqf_trigger; |
| 1325 | 1356 | ||
| 1326 | /* init read EDID waitqueue */ | 1357 | /* init read EDID waitqueue and HDP work */ |
| 1327 | init_waitqueue_head(&priv->wq_edid); | 1358 | init_waitqueue_head(&priv->wq_edid); |
| 1359 | INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd); | ||
| 1328 | 1360 | ||
| 1329 | /* clear pending interrupts */ | 1361 | /* clear pending interrupts */ |
| 1330 | reg_read(priv, REG_INT_FLAGS_0); | 1362 | reg_read(priv, REG_INT_FLAGS_0); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 574057cd1d09..7643300828c3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev) | |||
| 462 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | 462 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
| 463 | dev_priv->pch_type = PCH_LPT; | 463 | dev_priv->pch_type = PCH_LPT; |
| 464 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | 464 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
| 465 | WARN_ON(!IS_HASWELL(dev)); | 465 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
| 466 | WARN_ON(IS_HSW_ULT(dev)); | 466 | WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); |
| 467 | } else if (IS_BROADWELL(dev)) { | ||
| 468 | dev_priv->pch_type = PCH_LPT; | ||
| 469 | dev_priv->pch_id = | ||
| 470 | INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; | ||
| 471 | DRM_DEBUG_KMS("This is Broadwell, assuming " | ||
| 472 | "LynxPoint LP PCH\n"); | ||
| 473 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 467 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
| 474 | dev_priv->pch_type = PCH_LPT; | 468 | dev_priv->pch_type = PCH_LPT; |
| 475 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | 469 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
| 476 | WARN_ON(!IS_HASWELL(dev)); | 470 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
| 477 | WARN_ON(!IS_HSW_ULT(dev)); | 471 | WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); |
| 478 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { | 472 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { |
| 479 | dev_priv->pch_type = PCH_SPT; | 473 | dev_priv->pch_type = PCH_SPT; |
| 480 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); | 474 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e9f891c432f8..9d7a7155bf02 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table { | |||
| 2159 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ | 2159 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
| 2160 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) | 2160 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) |
| 2161 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ | 2161 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
| 2162 | ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ | 2162 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
| 2163 | (INTEL_DEVID(dev) & 0xf) == 0x6 || \ | ||
| 2164 | (INTEL_DEVID(dev) & 0xf) == 0xe)) | 2163 | (INTEL_DEVID(dev) & 0xf) == 0xe)) |
| 2165 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ | 2164 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
| 2166 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | 2165 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c11603b4cf1d..5f614828d365 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, | |||
| 3148 | u32 size = i915_gem_obj_ggtt_size(obj); | 3148 | u32 size = i915_gem_obj_ggtt_size(obj); |
| 3149 | uint64_t val; | 3149 | uint64_t val; |
| 3150 | 3150 | ||
| 3151 | /* Adjust fence size to match tiled area */ | ||
| 3152 | if (obj->tiling_mode != I915_TILING_NONE) { | ||
| 3153 | uint32_t row_size = obj->stride * | ||
| 3154 | (obj->tiling_mode == I915_TILING_Y ? 32 : 8); | ||
| 3155 | size = (size / row_size) * row_size; | ||
| 3156 | } | ||
| 3157 | |||
| 3151 | val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & | 3158 | val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & |
| 3152 | 0xfffff000) << 32; | 3159 | 0xfffff000) << 32; |
| 3153 | val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; | 3160 | val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; |
| @@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev) | |||
| 4884 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4891 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
| 4885 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); | 4892 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); |
| 4886 | 4893 | ||
| 4887 | /* | 4894 | ret = i915_ppgtt_init_hw(dev); |
| 4888 | * XXX: Contexts should only be initialized once. Doing a switch to the | ||
| 4889 | * default context switch however is something we'd like to do after | ||
| 4890 | * reset or thaw (the latter may not actually be necessary for HW, but | ||
| 4891 | * goes with our code better). Context switching requires rings (for | ||
| 4892 | * the do_switch), but before enabling PPGTT. So don't move this. | ||
| 4893 | */ | ||
| 4894 | ret = i915_gem_context_enable(dev_priv); | ||
| 4895 | if (ret && ret != -EIO) { | 4895 | if (ret && ret != -EIO) { |
| 4896 | DRM_ERROR("Context enable failed %d\n", ret); | 4896 | DRM_ERROR("PPGTT enable failed %d\n", ret); |
| 4897 | i915_gem_cleanup_ringbuffer(dev); | 4897 | i915_gem_cleanup_ringbuffer(dev); |
| 4898 | |||
| 4899 | return ret; | ||
| 4900 | } | 4898 | } |
| 4901 | 4899 | ||
| 4902 | ret = i915_ppgtt_init_hw(dev); | 4900 | ret = i915_gem_context_enable(dev_priv); |
| 4903 | if (ret && ret != -EIO) { | 4901 | if (ret && ret != -EIO) { |
| 4904 | DRM_ERROR("PPGTT enable failed %d\n", ret); | 4902 | DRM_ERROR("Context enable failed %d\n", ret); |
| 4905 | i915_gem_cleanup_ringbuffer(dev); | 4903 | i915_gem_cleanup_ringbuffer(dev); |
| 4904 | |||
| 4905 | return ret; | ||
| 4906 | } | 4906 | } |
| 4907 | 4907 | ||
| 4908 | return ret; | 4908 | return ret; |
| @@ -5155,7 +5155,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) | |||
| 5155 | if (!mutex_is_locked(mutex)) | 5155 | if (!mutex_is_locked(mutex)) |
| 5156 | return false; | 5156 | return false; |
| 5157 | 5157 | ||
| 5158 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) | 5158 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) |
| 5159 | return mutex->owner == task; | 5159 | return mutex->owner == task; |
| 5160 | #else | 5160 | #else |
| 5161 | /* Since UP may be pre-empted, we cannot assume that we own the lock */ | 5161 | /* Since UP may be pre-empted, we cannot assume that we own the lock */ |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d0d3dfbe6d2a..b051a238baf9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -292,6 +292,23 @@ void gen6_enable_rps_interrupts(struct drm_device *dev) | |||
| 292 | spin_unlock_irq(&dev_priv->irq_lock); | 292 | spin_unlock_irq(&dev_priv->irq_lock); |
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) | ||
| 296 | { | ||
| 297 | /* | ||
| 298 | * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer | ||
| 299 | * if GEN6_PM_UP_EI_EXPIRED is masked. | ||
| 300 | * | ||
| 301 | * TODO: verify if this can be reproduced on VLV,CHV. | ||
| 302 | */ | ||
| 303 | if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) | ||
| 304 | mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; | ||
| 305 | |||
| 306 | if (INTEL_INFO(dev_priv)->gen >= 8) | ||
| 307 | mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; | ||
| 308 | |||
| 309 | return mask; | ||
| 310 | } | ||
| 311 | |||
| 295 | void gen6_disable_rps_interrupts(struct drm_device *dev) | 312 | void gen6_disable_rps_interrupts(struct drm_device *dev) |
| 296 | { | 313 | { |
| 297 | struct drm_i915_private *dev_priv = dev->dev_private; | 314 | struct drm_i915_private *dev_priv = dev->dev_private; |
| @@ -304,8 +321,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) | |||
| 304 | 321 | ||
| 305 | spin_lock_irq(&dev_priv->irq_lock); | 322 | spin_lock_irq(&dev_priv->irq_lock); |
| 306 | 323 | ||
| 307 | I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? | 324 | I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); |
| 308 | ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); | ||
| 309 | 325 | ||
| 310 | __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 326 | __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
| 311 | I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & | 327 | I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e2af1383b179..e7a16f119a29 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -9815,7 +9815,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 9815 | if (obj->tiling_mode != work->old_fb_obj->tiling_mode) | 9815 | if (obj->tiling_mode != work->old_fb_obj->tiling_mode) |
| 9816 | /* vlv: DISPLAY_FLIP fails to change tiling */ | 9816 | /* vlv: DISPLAY_FLIP fails to change tiling */ |
| 9817 | ring = NULL; | 9817 | ring = NULL; |
| 9818 | } else if (IS_IVYBRIDGE(dev)) { | 9818 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
| 9819 | ring = &dev_priv->ring[BCS]; | 9819 | ring = &dev_priv->ring[BCS]; |
| 9820 | } else if (INTEL_INFO(dev)->gen >= 7) { | 9820 | } else if (INTEL_INFO(dev)->gen >= 7) { |
| 9821 | ring = obj->ring; | 9821 | ring = obj->ring; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 25fdbb16d4e0..3b40a17b8852 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -794,6 +794,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | |||
| 794 | void gen6_reset_rps_interrupts(struct drm_device *dev); | 794 | void gen6_reset_rps_interrupts(struct drm_device *dev); |
| 795 | void gen6_enable_rps_interrupts(struct drm_device *dev); | 795 | void gen6_enable_rps_interrupts(struct drm_device *dev); |
| 796 | void gen6_disable_rps_interrupts(struct drm_device *dev); | 796 | void gen6_disable_rps_interrupts(struct drm_device *dev); |
| 797 | u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); | ||
| 797 | void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); | 798 | void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); |
| 798 | void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); | 799 | void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); |
| 799 | static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) | 800 | static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 4d63839bd9b4..dfb783a8f2c3 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) | |||
| 962 | 962 | ||
| 963 | WARN_ON(panel->backlight.max == 0); | 963 | WARN_ON(panel->backlight.max == 0); |
| 964 | 964 | ||
| 965 | if (panel->backlight.level == 0) { | 965 | if (panel->backlight.level <= panel->backlight.min) { |
| 966 | panel->backlight.level = panel->backlight.max; | 966 | panel->backlight.level = panel->backlight.max; |
| 967 | if (panel->backlight.device) | 967 | if (panel->backlight.device) |
| 968 | panel->backlight.device->props.brightness = | 968 | panel->backlight.device->props.brightness = |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 964b28e3c630..bf814a64582a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -4363,16 +4363,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) | |||
| 4363 | mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); | 4363 | mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); |
| 4364 | mask &= dev_priv->pm_rps_events; | 4364 | mask &= dev_priv->pm_rps_events; |
| 4365 | 4365 | ||
| 4366 | /* IVB and SNB hard hangs on looping batchbuffer | 4366 | return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); |
| 4367 | * if GEN6_PM_UP_EI_EXPIRED is masked. | ||
| 4368 | */ | ||
| 4369 | if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev)) | ||
| 4370 | mask |= GEN6_PM_RP_UP_EI_EXPIRED; | ||
| 4371 | |||
| 4372 | if (IS_GEN8(dev_priv->dev)) | ||
| 4373 | mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; | ||
| 4374 | |||
| 4375 | return ~mask; | ||
| 4376 | } | 4367 | } |
| 4377 | 4368 | ||
| 4378 | /* gen6_set_rps is called to update the frequency request, but should also be | 4369 | /* gen6_set_rps is called to update the frequency request, but should also be |
| @@ -4441,7 +4432,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
| 4441 | return; | 4432 | return; |
| 4442 | 4433 | ||
| 4443 | /* Mask turbo interrupt so that they will not come in between */ | 4434 | /* Mask turbo interrupt so that they will not come in between */ |
| 4444 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | 4435 | I915_WRITE(GEN6_PMINTRMSK, |
| 4436 | gen6_sanitize_rps_pm_mask(dev_priv, ~0)); | ||
| 4445 | 4437 | ||
| 4446 | vlv_force_gfx_clock(dev_priv, true); | 4438 | vlv_force_gfx_clock(dev_priv, true); |
| 4447 | 4439 | ||
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dcde3798b45..64fdae558d36 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
| @@ -6033,6 +6033,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, | |||
| 6033 | radeon_ring_write(ring, 0); | 6033 | radeon_ring_write(ring, 0); |
| 6034 | radeon_ring_write(ring, 1 << vm_id); | 6034 | radeon_ring_write(ring, 1 << vm_id); |
| 6035 | 6035 | ||
| 6036 | /* wait for the invalidate to complete */ | ||
| 6037 | radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
| 6038 | radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ | ||
| 6039 | WAIT_REG_MEM_FUNCTION(0) | /* always */ | ||
| 6040 | WAIT_REG_MEM_ENGINE(0))); /* me */ | ||
| 6041 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | ||
| 6042 | radeon_ring_write(ring, 0); | ||
| 6043 | radeon_ring_write(ring, 0); /* ref */ | ||
| 6044 | radeon_ring_write(ring, 0); /* mask */ | ||
| 6045 | radeon_ring_write(ring, 0x20); /* poll interval */ | ||
| 6046 | |||
| 6036 | /* compute doesn't have PFP */ | 6047 | /* compute doesn't have PFP */ |
| 6037 | if (usepfp) { | 6048 | if (usepfp) { |
| 6038 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ | 6049 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ |
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index dde5c7e29eb2..42cd0cffe210 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
| @@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev, | |||
| 816 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 816 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
| 817 | if (flags & R600_PTE_SYSTEM) { | 817 | if (flags & R600_PTE_SYSTEM) { |
| 818 | value = radeon_vm_map_gart(rdev, addr); | 818 | value = radeon_vm_map_gart(rdev, addr); |
| 819 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
| 820 | } else if (flags & R600_PTE_VALID) { | 819 | } else if (flags & R600_PTE_VALID) { |
| 821 | value = addr; | 820 | value = addr; |
| 822 | } else { | 821 | } else { |
| @@ -903,6 +902,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib) | |||
| 903 | void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, | 902 | void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
| 904 | unsigned vm_id, uint64_t pd_addr) | 903 | unsigned vm_id, uint64_t pd_addr) |
| 905 | { | 904 | { |
| 905 | u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | | ||
| 906 | SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ | ||
| 907 | |||
| 906 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | 908 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
| 907 | if (vm_id < 8) { | 909 | if (vm_id < 8) { |
| 908 | radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); | 910 | radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); |
| @@ -943,5 +945,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, | |||
| 943 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | 945 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
| 944 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | 946 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
| 945 | radeon_ring_write(ring, 1 << vm_id); | 947 | radeon_ring_write(ring, 1 << vm_id); |
| 948 | |||
| 949 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); | ||
| 950 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | ||
| 951 | radeon_ring_write(ring, 0); | ||
| 952 | radeon_ring_write(ring, 0); /* reference */ | ||
| 953 | radeon_ring_write(ring, 0); /* mask */ | ||
| 954 | radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ | ||
| 946 | } | 955 | } |
| 947 | 956 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 360de9f1f491..aea48c89b241 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, | |||
| 2516 | radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); | 2516 | radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); |
| 2517 | radeon_ring_write(ring, 1 << vm_id); | 2517 | radeon_ring_write(ring, 1 << vm_id); |
| 2518 | 2518 | ||
| 2519 | /* wait for the invalidate to complete */ | ||
| 2520 | radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
| 2521 | radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ | ||
| 2522 | WAIT_REG_MEM_ENGINE(0))); /* me */ | ||
| 2523 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | ||
| 2524 | radeon_ring_write(ring, 0); | ||
| 2525 | radeon_ring_write(ring, 0); /* ref */ | ||
| 2526 | radeon_ring_write(ring, 0); /* mask */ | ||
| 2527 | radeon_ring_write(ring, 0x20); /* poll interval */ | ||
| 2528 | |||
| 2519 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ | 2529 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ |
| 2520 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | 2530 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
| 2521 | radeon_ring_write(ring, 0x0); | 2531 | radeon_ring_write(ring, 0x0); |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 50f88611ff60..ce787a9f12c0 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c | |||
| @@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev, | |||
| 372 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 372 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
| 373 | if (flags & R600_PTE_SYSTEM) { | 373 | if (flags & R600_PTE_SYSTEM) { |
| 374 | value = radeon_vm_map_gart(rdev, addr); | 374 | value = radeon_vm_map_gart(rdev, addr); |
| 375 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
| 376 | } else if (flags & R600_PTE_VALID) { | 375 | } else if (flags & R600_PTE_VALID) { |
| 377 | value = addr; | 376 | value = addr; |
| 378 | } else { | 377 | } else { |
| @@ -463,5 +462,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, | |||
| 463 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); | 462 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
| 464 | radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); | 463 | radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); |
| 465 | radeon_ring_write(ring, 1 << vm_id); | 464 | radeon_ring_write(ring, 1 << vm_id); |
| 465 | |||
| 466 | /* wait for invalidate to complete */ | ||
| 467 | radeon_ring_write(ring, DMA_SRBM_READ_PACKET); | ||
| 468 | radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2)); | ||
| 469 | radeon_ring_write(ring, 0); /* mask */ | ||
| 470 | radeon_ring_write(ring, 0); /* value */ | ||
| 466 | } | 471 | } |
| 467 | 472 | ||
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 2e12e4d69253..ad7125486894 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h | |||
| @@ -1133,6 +1133,23 @@ | |||
| 1133 | #define PACKET3_MEM_SEMAPHORE 0x39 | 1133 | #define PACKET3_MEM_SEMAPHORE 0x39 |
| 1134 | #define PACKET3_MPEG_INDEX 0x3A | 1134 | #define PACKET3_MPEG_INDEX 0x3A |
| 1135 | #define PACKET3_WAIT_REG_MEM 0x3C | 1135 | #define PACKET3_WAIT_REG_MEM 0x3C |
| 1136 | #define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) | ||
| 1137 | /* 0 - always | ||
| 1138 | * 1 - < | ||
| 1139 | * 2 - <= | ||
| 1140 | * 3 - == | ||
| 1141 | * 4 - != | ||
| 1142 | * 5 - >= | ||
| 1143 | * 6 - > | ||
| 1144 | */ | ||
| 1145 | #define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) | ||
| 1146 | /* 0 - reg | ||
| 1147 | * 1 - mem | ||
| 1148 | */ | ||
| 1149 | #define WAIT_REG_MEM_ENGINE(x) ((x) << 8) | ||
| 1150 | /* 0 - me | ||
| 1151 | * 1 - pfp | ||
| 1152 | */ | ||
| 1136 | #define PACKET3_MEM_WRITE 0x3D | 1153 | #define PACKET3_MEM_WRITE 0x3D |
| 1137 | #define PACKET3_PFP_SYNC_ME 0x42 | 1154 | #define PACKET3_PFP_SYNC_ME 0x42 |
| 1138 | #define PACKET3_SURFACE_SYNC 0x43 | 1155 | #define PACKET3_SURFACE_SYNC 0x43 |
| @@ -1272,6 +1289,13 @@ | |||
| 1272 | (1 << 21) | \ | 1289 | (1 << 21) | \ |
| 1273 | (((n) & 0xFFFFF) << 0)) | 1290 | (((n) & 0xFFFFF) << 0)) |
| 1274 | 1291 | ||
| 1292 | #define DMA_SRBM_POLL_PACKET ((9 << 28) | \ | ||
| 1293 | (1 << 27) | \ | ||
| 1294 | (1 << 26)) | ||
| 1295 | |||
| 1296 | #define DMA_SRBM_READ_PACKET ((9 << 28) | \ | ||
| 1297 | (1 << 27)) | ||
| 1298 | |||
| 1275 | /* async DMA Packet types */ | 1299 | /* async DMA Packet types */ |
| 1276 | #define DMA_PACKET_WRITE 0x2 | 1300 | #define DMA_PACKET_WRITE 0x2 |
| 1277 | #define DMA_PACKET_COPY 0x3 | 1301 | #define DMA_PACKET_COPY 0x3 |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 74f06d540591..279801ca5110 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
| 644 | return r; | 644 | return r; |
| 645 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | 645 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
| 646 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; | 646 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
| 647 | rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; | ||
| 647 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; | 648 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
| 648 | return radeon_gart_table_ram_alloc(rdev); | 649 | return radeon_gart_table_ram_alloc(rdev); |
| 649 | } | 650 | } |
| @@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev) | |||
| 681 | WREG32(RADEON_AIC_HI_ADDR, 0); | 682 | WREG32(RADEON_AIC_HI_ADDR, 0); |
| 682 | } | 683 | } |
| 683 | 684 | ||
| 685 | uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) | ||
| 686 | { | ||
| 687 | return addr; | ||
| 688 | } | ||
| 689 | |||
| 684 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, | 690 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
| 685 | uint64_t addr, uint32_t flags) | 691 | uint64_t entry) |
| 686 | { | 692 | { |
| 687 | u32 *gtt = rdev->gart.ptr; | 693 | u32 *gtt = rdev->gart.ptr; |
| 688 | gtt[i] = cpu_to_le32(lower_32_bits(addr)); | 694 | gtt[i] = cpu_to_le32(lower_32_bits(entry)); |
| 689 | } | 695 | } |
| 690 | 696 | ||
| 691 | void r100_pci_gart_fini(struct radeon_device *rdev) | 697 | void r100_pci_gart_fini(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 064ad5569cca..08d68f3e13e9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
| 73 | #define R300_PTE_WRITEABLE (1 << 2) | 73 | #define R300_PTE_WRITEABLE (1 << 2) |
| 74 | #define R300_PTE_READABLE (1 << 3) | 74 | #define R300_PTE_READABLE (1 << 3) |
| 75 | 75 | ||
| 76 | void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | 76 | uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) |
| 77 | uint64_t addr, uint32_t flags) | ||
| 78 | { | 77 | { |
| 79 | void __iomem *ptr = rdev->gart.ptr; | ||
| 80 | |||
| 81 | addr = (lower_32_bits(addr) >> 8) | | 78 | addr = (lower_32_bits(addr) >> 8) | |
| 82 | ((upper_32_bits(addr) & 0xff) << 24); | 79 | ((upper_32_bits(addr) & 0xff) << 24); |
| 83 | if (flags & RADEON_GART_PAGE_READ) | 80 | if (flags & RADEON_GART_PAGE_READ) |
| @@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
| 86 | addr |= R300_PTE_WRITEABLE; | 83 | addr |= R300_PTE_WRITEABLE; |
| 87 | if (!(flags & RADEON_GART_PAGE_SNOOP)) | 84 | if (!(flags & RADEON_GART_PAGE_SNOOP)) |
| 88 | addr |= R300_PTE_UNSNOOPED; | 85 | addr |= R300_PTE_UNSNOOPED; |
| 86 | return addr; | ||
| 87 | } | ||
| 88 | |||
| 89 | void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
| 90 | uint64_t entry) | ||
| 91 | { | ||
| 92 | void __iomem *ptr = rdev->gart.ptr; | ||
| 93 | |||
| 89 | /* on x86 we want this to be CPU endian, on powerpc | 94 | /* on x86 we want this to be CPU endian, on powerpc |
| 90 | * on powerpc without HW swappers, it'll get swapped on way | 95 | * on powerpc without HW swappers, it'll get swapped on way |
| 91 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ | 96 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
| 92 | writel(addr, ((void __iomem *)ptr) + (i * 4)); | 97 | writel(entry, ((void __iomem *)ptr) + (i * 4)); |
| 93 | } | 98 | } |
| 94 | 99 | ||
| 95 | int rv370_pcie_gart_init(struct radeon_device *rdev) | 100 | int rv370_pcie_gart_init(struct radeon_device *rdev) |
| @@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
| 109 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); | 114 | DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
| 110 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; | 115 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
| 111 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; | 116 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
| 117 | rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; | ||
| 112 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; | 118 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
| 113 | return radeon_gart_table_vram_alloc(rdev); | 119 | return radeon_gart_table_vram_alloc(rdev); |
| 114 | } | 120 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 54529b837afa..3f2a8d3febca 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev); | |||
| 242 | * Dummy page | 242 | * Dummy page |
| 243 | */ | 243 | */ |
| 244 | struct radeon_dummy_page { | 244 | struct radeon_dummy_page { |
| 245 | uint64_t entry; | ||
| 245 | struct page *page; | 246 | struct page *page; |
| 246 | dma_addr_t addr; | 247 | dma_addr_t addr; |
| 247 | }; | 248 | }; |
| @@ -645,7 +646,7 @@ struct radeon_gart { | |||
| 645 | unsigned num_cpu_pages; | 646 | unsigned num_cpu_pages; |
| 646 | unsigned table_size; | 647 | unsigned table_size; |
| 647 | struct page **pages; | 648 | struct page **pages; |
| 648 | dma_addr_t *pages_addr; | 649 | uint64_t *pages_entry; |
| 649 | bool ready; | 650 | bool ready; |
| 650 | }; | 651 | }; |
| 651 | 652 | ||
| @@ -1847,8 +1848,9 @@ struct radeon_asic { | |||
| 1847 | /* gart */ | 1848 | /* gart */ |
| 1848 | struct { | 1849 | struct { |
| 1849 | void (*tlb_flush)(struct radeon_device *rdev); | 1850 | void (*tlb_flush)(struct radeon_device *rdev); |
| 1851 | uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); | ||
| 1850 | void (*set_page)(struct radeon_device *rdev, unsigned i, | 1852 | void (*set_page)(struct radeon_device *rdev, unsigned i, |
| 1851 | uint64_t addr, uint32_t flags); | 1853 | uint64_t entry); |
| 1852 | } gart; | 1854 | } gart; |
| 1853 | struct { | 1855 | struct { |
| 1854 | int (*init)(struct radeon_device *rdev); | 1856 | int (*init)(struct radeon_device *rdev); |
| @@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) | |||
| 2852 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) | 2854 | #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
| 2853 | #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) | 2855 | #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) |
| 2854 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) | 2856 | #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) |
| 2855 | #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) | 2857 | #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) |
| 2858 | #define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) | ||
| 2856 | #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) | 2859 | #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) |
| 2857 | #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) | 2860 | #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) |
| 2858 | #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) | 2861 | #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 850de57069be..ed0e10eee2dc 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev) | |||
| 159 | DRM_INFO("Forcing AGP to PCIE mode\n"); | 159 | DRM_INFO("Forcing AGP to PCIE mode\n"); |
| 160 | rdev->flags |= RADEON_IS_PCIE; | 160 | rdev->flags |= RADEON_IS_PCIE; |
| 161 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; | 161 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
| 162 | rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; | ||
| 162 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; | 163 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
| 163 | } else { | 164 | } else { |
| 164 | DRM_INFO("Forcing AGP to PCI mode\n"); | 165 | DRM_INFO("Forcing AGP to PCI mode\n"); |
| 165 | rdev->flags |= RADEON_IS_PCI; | 166 | rdev->flags |= RADEON_IS_PCI; |
| 166 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; | 167 | rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
| 168 | rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; | ||
| 167 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; | 169 | rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
| 168 | } | 170 | } |
| 169 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | 171 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
| @@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = { | |||
| 199 | .mc_wait_for_idle = &r100_mc_wait_for_idle, | 201 | .mc_wait_for_idle = &r100_mc_wait_for_idle, |
| 200 | .gart = { | 202 | .gart = { |
| 201 | .tlb_flush = &r100_pci_gart_tlb_flush, | 203 | .tlb_flush = &r100_pci_gart_tlb_flush, |
| 204 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
| 202 | .set_page = &r100_pci_gart_set_page, | 205 | .set_page = &r100_pci_gart_set_page, |
| 203 | }, | 206 | }, |
| 204 | .ring = { | 207 | .ring = { |
| @@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = { | |||
| 265 | .mc_wait_for_idle = &r100_mc_wait_for_idle, | 268 | .mc_wait_for_idle = &r100_mc_wait_for_idle, |
| 266 | .gart = { | 269 | .gart = { |
| 267 | .tlb_flush = &r100_pci_gart_tlb_flush, | 270 | .tlb_flush = &r100_pci_gart_tlb_flush, |
| 271 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
| 268 | .set_page = &r100_pci_gart_set_page, | 272 | .set_page = &r100_pci_gart_set_page, |
| 269 | }, | 273 | }, |
| 270 | .ring = { | 274 | .ring = { |
| @@ -333,6 +337,20 @@ static struct radeon_asic_ring r300_gfx_ring = { | |||
| 333 | .set_wptr = &r100_gfx_set_wptr, | 337 | .set_wptr = &r100_gfx_set_wptr, |
| 334 | }; | 338 | }; |
| 335 | 339 | ||
| 340 | static struct radeon_asic_ring rv515_gfx_ring = { | ||
| 341 | .ib_execute = &r100_ring_ib_execute, | ||
| 342 | .emit_fence = &r300_fence_ring_emit, | ||
| 343 | .emit_semaphore = &r100_semaphore_ring_emit, | ||
| 344 | .cs_parse = &r300_cs_parse, | ||
| 345 | .ring_start = &rv515_ring_start, | ||
| 346 | .ring_test = &r100_ring_test, | ||
| 347 | .ib_test = &r100_ib_test, | ||
| 348 | .is_lockup = &r100_gpu_is_lockup, | ||
| 349 | .get_rptr = &r100_gfx_get_rptr, | ||
| 350 | .get_wptr = &r100_gfx_get_wptr, | ||
| 351 | .set_wptr = &r100_gfx_set_wptr, | ||
| 352 | }; | ||
| 353 | |||
| 336 | static struct radeon_asic r300_asic = { | 354 | static struct radeon_asic r300_asic = { |
| 337 | .init = &r300_init, | 355 | .init = &r300_init, |
| 338 | .fini = &r300_fini, | 356 | .fini = &r300_fini, |
| @@ -345,6 +363,7 @@ static struct radeon_asic r300_asic = { | |||
| 345 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 363 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
| 346 | .gart = { | 364 | .gart = { |
| 347 | .tlb_flush = &r100_pci_gart_tlb_flush, | 365 | .tlb_flush = &r100_pci_gart_tlb_flush, |
| 366 | .get_page_entry = &r100_pci_gart_get_page_entry, | ||
| 348 | .set_page = &r100_pci_gart_set_page, | 367 | .set_page = &r100_pci_gart_set_page, |
| 349 | }, | 368 | }, |
| 350 | .ring = { | 369 | .ring = { |
| @@ -411,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = { | |||
| 411 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 430 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
| 412 | .gart = { | 431 | .gart = { |
| 413 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 432 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 433 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
| 414 | .set_page = &rv370_pcie_gart_set_page, | 434 | .set_page = &rv370_pcie_gart_set_page, |
| 415 | }, | 435 | }, |
| 416 | .ring = { | 436 | .ring = { |
| @@ -477,6 +497,7 @@ static struct radeon_asic r420_asic = { | |||
| 477 | .mc_wait_for_idle = &r300_mc_wait_for_idle, | 497 | .mc_wait_for_idle = &r300_mc_wait_for_idle, |
| 478 | .gart = { | 498 | .gart = { |
| 479 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 499 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 500 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
| 480 | .set_page = &rv370_pcie_gart_set_page, | 501 | .set_page = &rv370_pcie_gart_set_page, |
| 481 | }, | 502 | }, |
| 482 | .ring = { | 503 | .ring = { |
| @@ -543,6 +564,7 @@ static struct radeon_asic rs400_asic = { | |||
| 543 | .mc_wait_for_idle = &rs400_mc_wait_for_idle, | 564 | .mc_wait_for_idle = &rs400_mc_wait_for_idle, |
| 544 | .gart = { | 565 | .gart = { |
| 545 | .tlb_flush = &rs400_gart_tlb_flush, | 566 | .tlb_flush = &rs400_gart_tlb_flush, |
| 567 | .get_page_entry = &rs400_gart_get_page_entry, | ||
| 546 | .set_page = &rs400_gart_set_page, | 568 | .set_page = &rs400_gart_set_page, |
| 547 | }, | 569 | }, |
| 548 | .ring = { | 570 | .ring = { |
| @@ -609,6 +631,7 @@ static struct radeon_asic rs600_asic = { | |||
| 609 | .mc_wait_for_idle = &rs600_mc_wait_for_idle, | 631 | .mc_wait_for_idle = &rs600_mc_wait_for_idle, |
| 610 | .gart = { | 632 | .gart = { |
| 611 | .tlb_flush = &rs600_gart_tlb_flush, | 633 | .tlb_flush = &rs600_gart_tlb_flush, |
| 634 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 612 | .set_page = &rs600_gart_set_page, | 635 | .set_page = &rs600_gart_set_page, |
| 613 | }, | 636 | }, |
| 614 | .ring = { | 637 | .ring = { |
| @@ -677,6 +700,7 @@ static struct radeon_asic rs690_asic = { | |||
| 677 | .mc_wait_for_idle = &rs690_mc_wait_for_idle, | 700 | .mc_wait_for_idle = &rs690_mc_wait_for_idle, |
| 678 | .gart = { | 701 | .gart = { |
| 679 | .tlb_flush = &rs400_gart_tlb_flush, | 702 | .tlb_flush = &rs400_gart_tlb_flush, |
| 703 | .get_page_entry = &rs400_gart_get_page_entry, | ||
| 680 | .set_page = &rs400_gart_set_page, | 704 | .set_page = &rs400_gart_set_page, |
| 681 | }, | 705 | }, |
| 682 | .ring = { | 706 | .ring = { |
| @@ -745,10 +769,11 @@ static struct radeon_asic rv515_asic = { | |||
| 745 | .mc_wait_for_idle = &rv515_mc_wait_for_idle, | 769 | .mc_wait_for_idle = &rv515_mc_wait_for_idle, |
| 746 | .gart = { | 770 | .gart = { |
| 747 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 771 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 772 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
| 748 | .set_page = &rv370_pcie_gart_set_page, | 773 | .set_page = &rv370_pcie_gart_set_page, |
| 749 | }, | 774 | }, |
| 750 | .ring = { | 775 | .ring = { |
| 751 | [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring | 776 | [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring |
| 752 | }, | 777 | }, |
| 753 | .irq = { | 778 | .irq = { |
| 754 | .set = &rs600_irq_set, | 779 | .set = &rs600_irq_set, |
| @@ -811,10 +836,11 @@ static struct radeon_asic r520_asic = { | |||
| 811 | .mc_wait_for_idle = &r520_mc_wait_for_idle, | 836 | .mc_wait_for_idle = &r520_mc_wait_for_idle, |
| 812 | .gart = { | 837 | .gart = { |
| 813 | .tlb_flush = &rv370_pcie_gart_tlb_flush, | 838 | .tlb_flush = &rv370_pcie_gart_tlb_flush, |
| 839 | .get_page_entry = &rv370_pcie_gart_get_page_entry, | ||
| 814 | .set_page = &rv370_pcie_gart_set_page, | 840 | .set_page = &rv370_pcie_gart_set_page, |
| 815 | }, | 841 | }, |
| 816 | .ring = { | 842 | .ring = { |
| 817 | [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring | 843 | [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring |
| 818 | }, | 844 | }, |
| 819 | .irq = { | 845 | .irq = { |
| 820 | .set = &rs600_irq_set, | 846 | .set = &rs600_irq_set, |
| @@ -905,6 +931,7 @@ static struct radeon_asic r600_asic = { | |||
| 905 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 931 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 906 | .gart = { | 932 | .gart = { |
| 907 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 933 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
| 934 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 908 | .set_page = &rs600_gart_set_page, | 935 | .set_page = &rs600_gart_set_page, |
| 909 | }, | 936 | }, |
| 910 | .ring = { | 937 | .ring = { |
| @@ -990,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = { | |||
| 990 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1017 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 991 | .gart = { | 1018 | .gart = { |
| 992 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1019 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
| 1020 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 993 | .set_page = &rs600_gart_set_page, | 1021 | .set_page = &rs600_gart_set_page, |
| 994 | }, | 1022 | }, |
| 995 | .ring = { | 1023 | .ring = { |
| @@ -1081,6 +1109,7 @@ static struct radeon_asic rs780_asic = { | |||
| 1081 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1109 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 1082 | .gart = { | 1110 | .gart = { |
| 1083 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1111 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
| 1112 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 1084 | .set_page = &rs600_gart_set_page, | 1113 | .set_page = &rs600_gart_set_page, |
| 1085 | }, | 1114 | }, |
| 1086 | .ring = { | 1115 | .ring = { |
| @@ -1185,6 +1214,7 @@ static struct radeon_asic rv770_asic = { | |||
| 1185 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1214 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 1186 | .gart = { | 1215 | .gart = { |
| 1187 | .tlb_flush = &r600_pcie_gart_tlb_flush, | 1216 | .tlb_flush = &r600_pcie_gart_tlb_flush, |
| 1217 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 1188 | .set_page = &rs600_gart_set_page, | 1218 | .set_page = &rs600_gart_set_page, |
| 1189 | }, | 1219 | }, |
| 1190 | .ring = { | 1220 | .ring = { |
| @@ -1303,6 +1333,7 @@ static struct radeon_asic evergreen_asic = { | |||
| 1303 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1333 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 1304 | .gart = { | 1334 | .gart = { |
| 1305 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1335 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
| 1336 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 1306 | .set_page = &rs600_gart_set_page, | 1337 | .set_page = &rs600_gart_set_page, |
| 1307 | }, | 1338 | }, |
| 1308 | .ring = { | 1339 | .ring = { |
| @@ -1395,6 +1426,7 @@ static struct radeon_asic sumo_asic = { | |||
| 1395 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1426 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 1396 | .gart = { | 1427 | .gart = { |
| 1397 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1428 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
| 1429 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 1398 | .set_page = &rs600_gart_set_page, | 1430 | .set_page = &rs600_gart_set_page, |
| 1399 | }, | 1431 | }, |
| 1400 | .ring = { | 1432 | .ring = { |
| @@ -1486,6 +1518,7 @@ static struct radeon_asic btc_asic = { | |||
| 1486 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1518 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 1487 | .gart = { | 1519 | .gart = { |
| 1488 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, | 1520 | .tlb_flush = &evergreen_pcie_gart_tlb_flush, |
| 1521 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 1489 | .set_page = &rs600_gart_set_page, | 1522 | .set_page = &rs600_gart_set_page, |
| 1490 | }, | 1523 | }, |
| 1491 | .ring = { | 1524 | .ring = { |
| @@ -1621,6 +1654,7 @@ static struct radeon_asic cayman_asic = { | |||
| 1621 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1654 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 1622 | .gart = { | 1655 | .gart = { |
| 1623 | .tlb_flush = &cayman_pcie_gart_tlb_flush, | 1656 | .tlb_flush = &cayman_pcie_gart_tlb_flush, |
| 1657 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 1624 | .set_page = &rs600_gart_set_page, | 1658 | .set_page = &rs600_gart_set_page, |
| 1625 | }, | 1659 | }, |
| 1626 | .vm = { | 1660 | .vm = { |
| @@ -1724,6 +1758,7 @@ static struct radeon_asic trinity_asic = { | |||
| 1724 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, | 1758 | .get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
| 1725 | .gart = { | 1759 | .gart = { |
| 1726 | .tlb_flush = &cayman_pcie_gart_tlb_flush, | 1760 | .tlb_flush = &cayman_pcie_gart_tlb_flush, |
| 1761 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 1727 | .set_page = &rs600_gart_set_page, | 1762 | .set_page = &rs600_gart_set_page, |
| 1728 | }, | 1763 | }, |
| 1729 | .vm = { | 1764 | .vm = { |
| @@ -1857,6 +1892,7 @@ static struct radeon_asic si_asic = { | |||
| 1857 | .get_gpu_clock_counter = &si_get_gpu_clock_counter, | 1892 | .get_gpu_clock_counter = &si_get_gpu_clock_counter, |
| 1858 | .gart = { | 1893 | .gart = { |
| 1859 | .tlb_flush = &si_pcie_gart_tlb_flush, | 1894 | .tlb_flush = &si_pcie_gart_tlb_flush, |
| 1895 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 1860 | .set_page = &rs600_gart_set_page, | 1896 | .set_page = &rs600_gart_set_page, |
| 1861 | }, | 1897 | }, |
| 1862 | .vm = { | 1898 | .vm = { |
| @@ -2018,6 +2054,7 @@ static struct radeon_asic ci_asic = { | |||
| 2018 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, | 2054 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, |
| 2019 | .gart = { | 2055 | .gart = { |
| 2020 | .tlb_flush = &cik_pcie_gart_tlb_flush, | 2056 | .tlb_flush = &cik_pcie_gart_tlb_flush, |
| 2057 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 2021 | .set_page = &rs600_gart_set_page, | 2058 | .set_page = &rs600_gart_set_page, |
| 2022 | }, | 2059 | }, |
| 2023 | .vm = { | 2060 | .vm = { |
| @@ -2125,6 +2162,7 @@ static struct radeon_asic kv_asic = { | |||
| 2125 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, | 2162 | .get_gpu_clock_counter = &cik_get_gpu_clock_counter, |
| 2126 | .gart = { | 2163 | .gart = { |
| 2127 | .tlb_flush = &cik_pcie_gart_tlb_flush, | 2164 | .tlb_flush = &cik_pcie_gart_tlb_flush, |
| 2165 | .get_page_entry = &rs600_gart_get_page_entry, | ||
| 2128 | .set_page = &rs600_gart_set_page, | 2166 | .set_page = &rs600_gart_set_page, |
| 2129 | }, | 2167 | }, |
| 2130 | .vm = { | 2168 | .vm = { |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 2a45d548d5ec..8d787d115653 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); | |||
| 67 | int r100_asic_reset(struct radeon_device *rdev); | 67 | int r100_asic_reset(struct radeon_device *rdev); |
| 68 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | 68 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 69 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | 69 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
| 70 | uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
| 70 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, | 71 | void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
| 71 | uint64_t addr, uint32_t flags); | 72 | uint64_t entry); |
| 72 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); | 73 | void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); |
| 73 | int r100_irq_set(struct radeon_device *rdev); | 74 | int r100_irq_set(struct radeon_device *rdev); |
| 74 | int r100_irq_process(struct radeon_device *rdev); | 75 | int r100_irq_process(struct radeon_device *rdev); |
| @@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev, | |||
| 172 | struct radeon_fence *fence); | 173 | struct radeon_fence *fence); |
| 173 | extern int r300_cs_parse(struct radeon_cs_parser *p); | 174 | extern int r300_cs_parse(struct radeon_cs_parser *p); |
| 174 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); | 175 | extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
| 176 | extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
| 175 | extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, | 177 | extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
| 176 | uint64_t addr, uint32_t flags); | 178 | uint64_t entry); |
| 177 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); | 179 | extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
| 178 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); | 180 | extern int rv370_get_pcie_lanes(struct radeon_device *rdev); |
| 179 | extern void r300_set_reg_safe(struct radeon_device *rdev); | 181 | extern void r300_set_reg_safe(struct radeon_device *rdev); |
| @@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev); | |||
| 208 | extern int rs400_suspend(struct radeon_device *rdev); | 210 | extern int rs400_suspend(struct radeon_device *rdev); |
| 209 | extern int rs400_resume(struct radeon_device *rdev); | 211 | extern int rs400_resume(struct radeon_device *rdev); |
| 210 | void rs400_gart_tlb_flush(struct radeon_device *rdev); | 212 | void rs400_gart_tlb_flush(struct radeon_device *rdev); |
| 213 | uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
| 211 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | 214 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, |
| 212 | uint64_t addr, uint32_t flags); | 215 | uint64_t entry); |
| 213 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 216 | uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
| 214 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 217 | void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 215 | int rs400_gart_init(struct radeon_device *rdev); | 218 | int rs400_gart_init(struct radeon_device *rdev); |
| @@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev); | |||
| 232 | void rs600_irq_disable(struct radeon_device *rdev); | 235 | void rs600_irq_disable(struct radeon_device *rdev); |
| 233 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); | 236 | u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
| 234 | void rs600_gart_tlb_flush(struct radeon_device *rdev); | 237 | void rs600_gart_tlb_flush(struct radeon_device *rdev); |
| 238 | uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); | ||
| 235 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | 239 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, |
| 236 | uint64_t addr, uint32_t flags); | 240 | uint64_t entry); |
| 237 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 241 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
| 238 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 242 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
| 239 | void rs600_bandwidth_update(struct radeon_device *rdev); | 243 | void rs600_bandwidth_update(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0ec65168f331..bd7519fdd3f4 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev) | |||
| 774 | rdev->dummy_page.page = NULL; | 774 | rdev->dummy_page.page = NULL; |
| 775 | return -ENOMEM; | 775 | return -ENOMEM; |
| 776 | } | 776 | } |
| 777 | rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr, | ||
| 778 | RADEON_GART_PAGE_DUMMY); | ||
| 777 | return 0; | 779 | return 0; |
| 778 | } | 780 | } |
| 779 | 781 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 84146d5901aa..5450fa95a47e 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) | |||
| 165 | radeon_bo_unpin(rdev->gart.robj); | 165 | radeon_bo_unpin(rdev->gart.robj); |
| 166 | radeon_bo_unreserve(rdev->gart.robj); | 166 | radeon_bo_unreserve(rdev->gart.robj); |
| 167 | rdev->gart.table_addr = gpu_addr; | 167 | rdev->gart.table_addr = gpu_addr; |
| 168 | |||
| 169 | if (!r) { | ||
| 170 | int i; | ||
| 171 | |||
| 172 | /* We might have dropped some GART table updates while it wasn't | ||
| 173 | * mapped, restore all entries | ||
| 174 | */ | ||
| 175 | for (i = 0; i < rdev->gart.num_gpu_pages; i++) | ||
| 176 | radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); | ||
| 177 | mb(); | ||
| 178 | radeon_gart_tlb_flush(rdev); | ||
| 179 | } | ||
| 180 | |||
| 168 | return r; | 181 | return r; |
| 169 | } | 182 | } |
| 170 | 183 | ||
| @@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
| 228 | unsigned t; | 241 | unsigned t; |
| 229 | unsigned p; | 242 | unsigned p; |
| 230 | int i, j; | 243 | int i, j; |
| 231 | u64 page_base; | ||
| 232 | 244 | ||
| 233 | if (!rdev->gart.ready) { | 245 | if (!rdev->gart.ready) { |
| 234 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); | 246 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
| @@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
| 239 | for (i = 0; i < pages; i++, p++) { | 251 | for (i = 0; i < pages; i++, p++) { |
| 240 | if (rdev->gart.pages[p]) { | 252 | if (rdev->gart.pages[p]) { |
| 241 | rdev->gart.pages[p] = NULL; | 253 | rdev->gart.pages[p] = NULL; |
| 242 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; | ||
| 243 | page_base = rdev->gart.pages_addr[p]; | ||
| 244 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 254 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
| 255 | rdev->gart.pages_entry[t] = rdev->dummy_page.entry; | ||
| 245 | if (rdev->gart.ptr) { | 256 | if (rdev->gart.ptr) { |
| 246 | radeon_gart_set_page(rdev, t, page_base, | 257 | radeon_gart_set_page(rdev, t, |
| 247 | RADEON_GART_PAGE_DUMMY); | 258 | rdev->dummy_page.entry); |
| 248 | } | 259 | } |
| 249 | page_base += RADEON_GPU_PAGE_SIZE; | ||
| 250 | } | 260 | } |
| 251 | } | 261 | } |
| 252 | } | 262 | } |
| @@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
| 274 | { | 284 | { |
| 275 | unsigned t; | 285 | unsigned t; |
| 276 | unsigned p; | 286 | unsigned p; |
| 277 | uint64_t page_base; | 287 | uint64_t page_base, page_entry; |
| 278 | int i, j; | 288 | int i, j; |
| 279 | 289 | ||
| 280 | if (!rdev->gart.ready) { | 290 | if (!rdev->gart.ready) { |
| @@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
| 285 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); | 295 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
| 286 | 296 | ||
| 287 | for (i = 0; i < pages; i++, p++) { | 297 | for (i = 0; i < pages; i++, p++) { |
| 288 | rdev->gart.pages_addr[p] = dma_addr[i]; | ||
| 289 | rdev->gart.pages[p] = pagelist[i]; | 298 | rdev->gart.pages[p] = pagelist[i]; |
| 290 | if (rdev->gart.ptr) { | 299 | page_base = dma_addr[i]; |
| 291 | page_base = rdev->gart.pages_addr[p]; | 300 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
| 292 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 301 | page_entry = radeon_gart_get_page_entry(page_base, flags); |
| 293 | radeon_gart_set_page(rdev, t, page_base, flags); | 302 | rdev->gart.pages_entry[t] = page_entry; |
| 294 | page_base += RADEON_GPU_PAGE_SIZE; | 303 | if (rdev->gart.ptr) { |
| 304 | radeon_gart_set_page(rdev, t, page_entry); | ||
| 295 | } | 305 | } |
| 306 | page_base += RADEON_GPU_PAGE_SIZE; | ||
| 296 | } | 307 | } |
| 297 | } | 308 | } |
| 298 | mb(); | 309 | mb(); |
| @@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
| 334 | radeon_gart_fini(rdev); | 345 | radeon_gart_fini(rdev); |
| 335 | return -ENOMEM; | 346 | return -ENOMEM; |
| 336 | } | 347 | } |
| 337 | rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * | 348 | rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) * |
| 338 | rdev->gart.num_cpu_pages); | 349 | rdev->gart.num_gpu_pages); |
| 339 | if (rdev->gart.pages_addr == NULL) { | 350 | if (rdev->gart.pages_entry == NULL) { |
| 340 | radeon_gart_fini(rdev); | 351 | radeon_gart_fini(rdev); |
| 341 | return -ENOMEM; | 352 | return -ENOMEM; |
| 342 | } | 353 | } |
| 343 | /* set GART entry to point to the dummy page by default */ | 354 | /* set GART entry to point to the dummy page by default */ |
| 344 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { | 355 | for (i = 0; i < rdev->gart.num_gpu_pages; i++) |
| 345 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; | 356 | rdev->gart.pages_entry[i] = rdev->dummy_page.entry; |
| 346 | } | ||
| 347 | return 0; | 357 | return 0; |
| 348 | } | 358 | } |
| 349 | 359 | ||
| @@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev) | |||
| 356 | */ | 366 | */ |
| 357 | void radeon_gart_fini(struct radeon_device *rdev) | 367 | void radeon_gart_fini(struct radeon_device *rdev) |
| 358 | { | 368 | { |
| 359 | if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { | 369 | if (rdev->gart.ready) { |
| 360 | /* unbind pages */ | 370 | /* unbind pages */ |
| 361 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); | 371 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
| 362 | } | 372 | } |
| 363 | rdev->gart.ready = false; | 373 | rdev->gart.ready = false; |
| 364 | vfree(rdev->gart.pages); | 374 | vfree(rdev->gart.pages); |
| 365 | vfree(rdev->gart.pages_addr); | 375 | vfree(rdev->gart.pages_entry); |
| 366 | rdev->gart.pages = NULL; | 376 | rdev->gart.pages = NULL; |
| 367 | rdev->gart.pages_addr = NULL; | 377 | rdev->gart.pages_entry = NULL; |
| 368 | 378 | ||
| 369 | radeon_dummy_page_fini(rdev); | 379 | radeon_dummy_page_fini(rdev); |
| 370 | } | 380 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index a46f73737994..d0b4f7d1140d 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -576,7 +576,7 @@ error_unreserve: | |||
| 576 | error_free: | 576 | error_free: |
| 577 | drm_free_large(vm_bos); | 577 | drm_free_large(vm_bos); |
| 578 | 578 | ||
| 579 | if (r) | 579 | if (r && r != -ERESTARTSYS) |
| 580 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); | 580 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
| 581 | } | 581 | } |
| 582 | 582 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 8bf87f1203cc..bef9a0953284 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c | |||
| @@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd) | |||
| 436 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, | 436 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, |
| 437 | uint32_t hpd_size, uint64_t hpd_gpu_addr) | 437 | uint32_t hpd_size, uint64_t hpd_gpu_addr) |
| 438 | { | 438 | { |
| 439 | uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; | 439 | uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; |
| 440 | uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); | 440 | uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); |
| 441 | 441 | ||
| 442 | lock_srbm(kgd, mec, pipe, 0, 0); | 442 | lock_srbm(kgd, mec, pipe, 0, 0); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 32522cc940a1..f7da8fe96a66 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -1287,8 +1287,39 @@ dpm_failed: | |||
| 1287 | return ret; | 1287 | return ret; |
| 1288 | } | 1288 | } |
| 1289 | 1289 | ||
| 1290 | struct radeon_dpm_quirk { | ||
| 1291 | u32 chip_vendor; | ||
| 1292 | u32 chip_device; | ||
| 1293 | u32 subsys_vendor; | ||
| 1294 | u32 subsys_device; | ||
| 1295 | }; | ||
| 1296 | |||
| 1297 | /* cards with dpm stability problems */ | ||
| 1298 | static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { | ||
| 1299 | /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ | ||
| 1300 | { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, | ||
| 1301 | /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ | ||
| 1302 | { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, | ||
| 1303 | { 0, 0, 0, 0 }, | ||
| 1304 | }; | ||
| 1305 | |||
| 1290 | int radeon_pm_init(struct radeon_device *rdev) | 1306 | int radeon_pm_init(struct radeon_device *rdev) |
| 1291 | { | 1307 | { |
| 1308 | struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; | ||
| 1309 | bool disable_dpm = false; | ||
| 1310 | |||
| 1311 | /* Apply dpm quirks */ | ||
| 1312 | while (p && p->chip_device != 0) { | ||
| 1313 | if (rdev->pdev->vendor == p->chip_vendor && | ||
| 1314 | rdev->pdev->device == p->chip_device && | ||
| 1315 | rdev->pdev->subsystem_vendor == p->subsys_vendor && | ||
| 1316 | rdev->pdev->subsystem_device == p->subsys_device) { | ||
| 1317 | disable_dpm = true; | ||
| 1318 | break; | ||
| 1319 | } | ||
| 1320 | ++p; | ||
| 1321 | } | ||
| 1322 | |||
| 1292 | /* enable dpm on rv6xx+ */ | 1323 | /* enable dpm on rv6xx+ */ |
| 1293 | switch (rdev->family) { | 1324 | switch (rdev->family) { |
| 1294 | case CHIP_RV610: | 1325 | case CHIP_RV610: |
| @@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 1344 | (!(rdev->flags & RADEON_IS_IGP)) && | 1375 | (!(rdev->flags & RADEON_IS_IGP)) && |
| 1345 | (!rdev->smc_fw)) | 1376 | (!rdev->smc_fw)) |
| 1346 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1377 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
| 1378 | else if (disable_dpm && (radeon_dpm == -1)) | ||
| 1379 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
| 1347 | else if (radeon_dpm == 0) | 1380 | else if (radeon_dpm == 0) |
| 1348 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1381 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
| 1349 | else | 1382 | else |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index cde48c42b30a..06d2246d07f1 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
| @@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | |||
| 587 | uint64_t result; | 587 | uint64_t result; |
| 588 | 588 | ||
| 589 | /* page table offset */ | 589 | /* page table offset */ |
| 590 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; | 590 | result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; |
| 591 | 591 | result &= ~RADEON_GPU_PAGE_MASK; | |
| 592 | /* in case cpu page size != gpu page size*/ | ||
| 593 | result |= addr & (~PAGE_MASK); | ||
| 594 | 592 | ||
| 595 | return result; | 593 | return result; |
| 596 | } | 594 | } |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c5799f16aa4b..34e3235f41d2 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev) | |||
| 212 | #define RS400_PTE_WRITEABLE (1 << 2) | 212 | #define RS400_PTE_WRITEABLE (1 << 2) |
| 213 | #define RS400_PTE_READABLE (1 << 3) | 213 | #define RS400_PTE_READABLE (1 << 3) |
| 214 | 214 | ||
| 215 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | 215 | uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) |
| 216 | uint64_t addr, uint32_t flags) | ||
| 217 | { | 216 | { |
| 218 | uint32_t entry; | 217 | uint32_t entry; |
| 219 | u32 *gtt = rdev->gart.ptr; | ||
| 220 | 218 | ||
| 221 | entry = (lower_32_bits(addr) & PAGE_MASK) | | 219 | entry = (lower_32_bits(addr) & PAGE_MASK) | |
| 222 | ((upper_32_bits(addr) & 0xff) << 4); | 220 | ((upper_32_bits(addr) & 0xff) << 4); |
| @@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
| 226 | entry |= RS400_PTE_WRITEABLE; | 224 | entry |= RS400_PTE_WRITEABLE; |
| 227 | if (!(flags & RADEON_GART_PAGE_SNOOP)) | 225 | if (!(flags & RADEON_GART_PAGE_SNOOP)) |
| 228 | entry |= RS400_PTE_UNSNOOPED; | 226 | entry |= RS400_PTE_UNSNOOPED; |
| 229 | entry = cpu_to_le32(entry); | 227 | return entry; |
| 230 | gtt[i] = entry; | 228 | } |
| 229 | |||
| 230 | void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
| 231 | uint64_t entry) | ||
| 232 | { | ||
| 233 | u32 *gtt = rdev->gart.ptr; | ||
| 234 | gtt[i] = cpu_to_le32(lower_32_bits(entry)); | ||
| 231 | } | 235 | } |
| 232 | 236 | ||
| 233 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) | 237 | int rs400_mc_wait_for_idle(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9acb1c3c005b..74bce91aecc1 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev) | |||
| 625 | radeon_gart_table_vram_free(rdev); | 625 | radeon_gart_table_vram_free(rdev); |
| 626 | } | 626 | } |
| 627 | 627 | ||
| 628 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | 628 | uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) |
| 629 | uint64_t addr, uint32_t flags) | ||
| 630 | { | 629 | { |
| 631 | void __iomem *ptr = (void *)rdev->gart.ptr; | ||
| 632 | |||
| 633 | addr = addr & 0xFFFFFFFFFFFFF000ULL; | 630 | addr = addr & 0xFFFFFFFFFFFFF000ULL; |
| 634 | addr |= R600_PTE_SYSTEM; | 631 | addr |= R600_PTE_SYSTEM; |
| 635 | if (flags & RADEON_GART_PAGE_VALID) | 632 | if (flags & RADEON_GART_PAGE_VALID) |
| @@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | |||
| 640 | addr |= R600_PTE_WRITEABLE; | 637 | addr |= R600_PTE_WRITEABLE; |
| 641 | if (flags & RADEON_GART_PAGE_SNOOP) | 638 | if (flags & RADEON_GART_PAGE_SNOOP) |
| 642 | addr |= R600_PTE_SNOOPED; | 639 | addr |= R600_PTE_SNOOPED; |
| 643 | writeq(addr, ptr + (i * 8)); | 640 | return addr; |
| 641 | } | ||
| 642 | |||
| 643 | void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, | ||
| 644 | uint64_t entry) | ||
| 645 | { | ||
| 646 | void __iomem *ptr = (void *)rdev->gart.ptr; | ||
| 647 | writeq(entry, ptr + (i * 8)); | ||
| 644 | } | 648 | } |
| 645 | 649 | ||
| 646 | int rs600_irq_set(struct radeon_device *rdev) | 650 | int rs600_irq_set(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 60df444bd075..5d89b874a1a2 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, | |||
| 5057 | radeon_ring_write(ring, 0); | 5057 | radeon_ring_write(ring, 0); |
| 5058 | radeon_ring_write(ring, 1 << vm_id); | 5058 | radeon_ring_write(ring, 1 << vm_id); |
| 5059 | 5059 | ||
| 5060 | /* wait for the invalidate to complete */ | ||
| 5061 | radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
| 5062 | radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ | ||
| 5063 | WAIT_REG_MEM_ENGINE(0))); /* me */ | ||
| 5064 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | ||
| 5065 | radeon_ring_write(ring, 0); | ||
| 5066 | radeon_ring_write(ring, 0); /* ref */ | ||
| 5067 | radeon_ring_write(ring, 0); /* mask */ | ||
| 5068 | radeon_ring_write(ring, 0x20); /* poll interval */ | ||
| 5069 | |||
| 5060 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ | 5070 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ |
| 5061 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | 5071 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
| 5062 | radeon_ring_write(ring, 0x0); | 5072 | radeon_ring_write(ring, 0x0); |
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index f5cc777e1c5f..83207929fc62 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c | |||
| @@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev, | |||
| 123 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | 123 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
| 124 | if (flags & R600_PTE_SYSTEM) { | 124 | if (flags & R600_PTE_SYSTEM) { |
| 125 | value = radeon_vm_map_gart(rdev, addr); | 125 | value = radeon_vm_map_gart(rdev, addr); |
| 126 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
| 127 | } else if (flags & R600_PTE_VALID) { | 126 | } else if (flags & R600_PTE_VALID) { |
| 128 | value = addr; | 127 | value = addr; |
| 129 | } else { | 128 | } else { |
| @@ -206,6 +205,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, | |||
| 206 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); | 205 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
| 207 | radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); | 206 | radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); |
| 208 | radeon_ring_write(ring, 1 << vm_id); | 207 | radeon_ring_write(ring, 1 << vm_id); |
| 208 | |||
| 209 | /* wait for invalidate to complete */ | ||
| 210 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); | ||
| 211 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST); | ||
| 212 | radeon_ring_write(ring, 0xff << 16); /* retry */ | ||
| 213 | radeon_ring_write(ring, 1 << vm_id); /* mask */ | ||
| 214 | radeon_ring_write(ring, 0); /* value */ | ||
| 215 | radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ | ||
| 209 | } | 216 | } |
| 210 | 217 | ||
| 211 | /** | 218 | /** |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 32e354b8b0ab..eff8a6444956 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev) | |||
| 2908 | return ret; | 2908 | return ret; |
| 2909 | } | 2909 | } |
| 2910 | 2910 | ||
| 2911 | struct si_dpm_quirk { | ||
| 2912 | u32 chip_vendor; | ||
| 2913 | u32 chip_device; | ||
| 2914 | u32 subsys_vendor; | ||
| 2915 | u32 subsys_device; | ||
| 2916 | u32 max_sclk; | ||
| 2917 | u32 max_mclk; | ||
| 2918 | }; | ||
| 2919 | |||
| 2920 | /* cards with dpm stability problems */ | ||
| 2921 | static struct si_dpm_quirk si_dpm_quirk_list[] = { | ||
| 2922 | /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ | ||
| 2923 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, | ||
| 2924 | { 0, 0, 0, 0 }, | ||
| 2925 | }; | ||
| 2926 | |||
| 2911 | static void si_apply_state_adjust_rules(struct radeon_device *rdev, | 2927 | static void si_apply_state_adjust_rules(struct radeon_device *rdev, |
| 2912 | struct radeon_ps *rps) | 2928 | struct radeon_ps *rps) |
| 2913 | { | 2929 | { |
| @@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2918 | u32 mclk, sclk; | 2934 | u32 mclk, sclk; |
| 2919 | u16 vddc, vddci; | 2935 | u16 vddc, vddci; |
| 2920 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; | 2936 | u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; |
| 2937 | u32 max_sclk = 0, max_mclk = 0; | ||
| 2921 | int i; | 2938 | int i; |
| 2939 | struct si_dpm_quirk *p = si_dpm_quirk_list; | ||
| 2940 | |||
| 2941 | /* Apply dpm quirks */ | ||
| 2942 | while (p && p->chip_device != 0) { | ||
| 2943 | if (rdev->pdev->vendor == p->chip_vendor && | ||
| 2944 | rdev->pdev->device == p->chip_device && | ||
| 2945 | rdev->pdev->subsystem_vendor == p->subsys_vendor && | ||
| 2946 | rdev->pdev->subsystem_device == p->subsys_device) { | ||
| 2947 | max_sclk = p->max_sclk; | ||
| 2948 | max_mclk = p->max_mclk; | ||
| 2949 | break; | ||
| 2950 | } | ||
| 2951 | ++p; | ||
| 2952 | } | ||
| 2922 | 2953 | ||
| 2923 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 2954 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
| 2924 | ni_dpm_vblank_too_short(rdev)) | 2955 | ni_dpm_vblank_too_short(rdev)) |
| @@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2972 | if (ps->performance_levels[i].mclk > max_mclk_vddc) | 3003 | if (ps->performance_levels[i].mclk > max_mclk_vddc) |
| 2973 | ps->performance_levels[i].mclk = max_mclk_vddc; | 3004 | ps->performance_levels[i].mclk = max_mclk_vddc; |
| 2974 | } | 3005 | } |
| 3006 | if (max_mclk) { | ||
| 3007 | if (ps->performance_levels[i].mclk > max_mclk) | ||
| 3008 | ps->performance_levels[i].mclk = max_mclk; | ||
| 3009 | } | ||
| 3010 | if (max_sclk) { | ||
| 3011 | if (ps->performance_levels[i].sclk > max_sclk) | ||
| 3012 | ps->performance_levels[i].sclk = max_sclk; | ||
| 3013 | } | ||
| 2975 | } | 3014 | } |
| 2976 | 3015 | ||
| 2977 | /* XXX validate the min clocks required for display */ | 3016 | /* XXX validate the min clocks required for display */ |
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 4069be89e585..84999242c747 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h | |||
| @@ -1632,6 +1632,23 @@ | |||
| 1632 | #define PACKET3_MPEG_INDEX 0x3A | 1632 | #define PACKET3_MPEG_INDEX 0x3A |
| 1633 | #define PACKET3_COPY_DW 0x3B | 1633 | #define PACKET3_COPY_DW 0x3B |
| 1634 | #define PACKET3_WAIT_REG_MEM 0x3C | 1634 | #define PACKET3_WAIT_REG_MEM 0x3C |
| 1635 | #define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) | ||
| 1636 | /* 0 - always | ||
| 1637 | * 1 - < | ||
| 1638 | * 2 - <= | ||
| 1639 | * 3 - == | ||
| 1640 | * 4 - != | ||
| 1641 | * 5 - >= | ||
| 1642 | * 6 - > | ||
| 1643 | */ | ||
| 1644 | #define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) | ||
| 1645 | /* 0 - reg | ||
| 1646 | * 1 - mem | ||
| 1647 | */ | ||
| 1648 | #define WAIT_REG_MEM_ENGINE(x) ((x) << 8) | ||
| 1649 | /* 0 - me | ||
| 1650 | * 1 - pfp | ||
| 1651 | */ | ||
| 1635 | #define PACKET3_MEM_WRITE 0x3D | 1652 | #define PACKET3_MEM_WRITE 0x3D |
| 1636 | #define PACKET3_COPY_DATA 0x40 | 1653 | #define PACKET3_COPY_DATA 0x40 |
| 1637 | #define PACKET3_CP_DMA 0x41 | 1654 | #define PACKET3_CP_DMA 0x41 |
| @@ -1835,6 +1852,7 @@ | |||
| 1835 | #define DMA_PACKET_TRAP 0x7 | 1852 | #define DMA_PACKET_TRAP 0x7 |
| 1836 | #define DMA_PACKET_SRBM_WRITE 0x9 | 1853 | #define DMA_PACKET_SRBM_WRITE 0x9 |
| 1837 | #define DMA_PACKET_CONSTANT_FILL 0xd | 1854 | #define DMA_PACKET_CONSTANT_FILL 0xd |
| 1855 | #define DMA_PACKET_POLL_REG_MEM 0xe | ||
| 1838 | #define DMA_PACKET_NOP 0xf | 1856 | #define DMA_PACKET_NOP 0xf |
| 1839 | 1857 | ||
| 1840 | #define VCE_STATUS 0x20004 | 1858 | #define VCE_STATUS 0x20004 |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7b5d22110f25..6c6b655defcf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv, | |||
| 406 | if (unlikely(ret != 0)) | 406 | if (unlikely(ret != 0)) |
| 407 | --dev_priv->num_3d_resources; | 407 | --dev_priv->num_3d_resources; |
| 408 | } else if (unhide_svga) { | 408 | } else if (unhide_svga) { |
| 409 | mutex_lock(&dev_priv->hw_mutex); | ||
| 410 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 409 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
| 411 | vmw_read(dev_priv, SVGA_REG_ENABLE) & | 410 | vmw_read(dev_priv, SVGA_REG_ENABLE) & |
| 412 | ~SVGA_REG_ENABLE_HIDE); | 411 | ~SVGA_REG_ENABLE_HIDE); |
| 413 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 414 | } | 412 | } |
| 415 | 413 | ||
| 416 | mutex_unlock(&dev_priv->release_mutex); | 414 | mutex_unlock(&dev_priv->release_mutex); |
| @@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv, | |||
| 433 | mutex_lock(&dev_priv->release_mutex); | 431 | mutex_lock(&dev_priv->release_mutex); |
| 434 | if (unlikely(--dev_priv->num_3d_resources == 0)) | 432 | if (unlikely(--dev_priv->num_3d_resources == 0)) |
| 435 | vmw_release_device(dev_priv); | 433 | vmw_release_device(dev_priv); |
| 436 | else if (hide_svga) { | 434 | else if (hide_svga) |
| 437 | mutex_lock(&dev_priv->hw_mutex); | ||
| 438 | vmw_write(dev_priv, SVGA_REG_ENABLE, | 435 | vmw_write(dev_priv, SVGA_REG_ENABLE, |
| 439 | vmw_read(dev_priv, SVGA_REG_ENABLE) | | 436 | vmw_read(dev_priv, SVGA_REG_ENABLE) | |
| 440 | SVGA_REG_ENABLE_HIDE); | 437 | SVGA_REG_ENABLE_HIDE); |
| 441 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 442 | } | ||
| 443 | 438 | ||
| 444 | n3d = (int32_t) dev_priv->num_3d_resources; | 439 | n3d = (int32_t) dev_priv->num_3d_resources; |
| 445 | mutex_unlock(&dev_priv->release_mutex); | 440 | mutex_unlock(&dev_priv->release_mutex); |
| @@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 600 | dev_priv->dev = dev; | 595 | dev_priv->dev = dev; |
| 601 | dev_priv->vmw_chipset = chipset; | 596 | dev_priv->vmw_chipset = chipset; |
| 602 | dev_priv->last_read_seqno = (uint32_t) -100; | 597 | dev_priv->last_read_seqno = (uint32_t) -100; |
| 603 | mutex_init(&dev_priv->hw_mutex); | ||
| 604 | mutex_init(&dev_priv->cmdbuf_mutex); | 598 | mutex_init(&dev_priv->cmdbuf_mutex); |
| 605 | mutex_init(&dev_priv->release_mutex); | 599 | mutex_init(&dev_priv->release_mutex); |
| 606 | mutex_init(&dev_priv->binding_mutex); | 600 | mutex_init(&dev_priv->binding_mutex); |
| 607 | rwlock_init(&dev_priv->resource_lock); | 601 | rwlock_init(&dev_priv->resource_lock); |
| 608 | ttm_lock_init(&dev_priv->reservation_sem); | 602 | ttm_lock_init(&dev_priv->reservation_sem); |
| 603 | spin_lock_init(&dev_priv->hw_lock); | ||
| 604 | spin_lock_init(&dev_priv->waiter_lock); | ||
| 605 | spin_lock_init(&dev_priv->cap_lock); | ||
| 609 | 606 | ||
| 610 | for (i = vmw_res_context; i < vmw_res_max; ++i) { | 607 | for (i = vmw_res_context; i < vmw_res_max; ++i) { |
| 611 | idr_init(&dev_priv->res_idr[i]); | 608 | idr_init(&dev_priv->res_idr[i]); |
| @@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 626 | 623 | ||
| 627 | dev_priv->enable_fb = enable_fbdev; | 624 | dev_priv->enable_fb = enable_fbdev; |
| 628 | 625 | ||
| 629 | mutex_lock(&dev_priv->hw_mutex); | ||
| 630 | |||
| 631 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 626 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| 632 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); | 627 | svga_id = vmw_read(dev_priv, SVGA_REG_ID); |
| 633 | if (svga_id != SVGA_ID_2) { | 628 | if (svga_id != SVGA_ID_2) { |
| 634 | ret = -ENOSYS; | 629 | ret = -ENOSYS; |
| 635 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); | 630 | DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); |
| 636 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 637 | goto out_err0; | 631 | goto out_err0; |
| 638 | } | 632 | } |
| 639 | 633 | ||
| @@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 683 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 677 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
| 684 | 678 | ||
| 685 | ret = vmw_dma_masks(dev_priv); | 679 | ret = vmw_dma_masks(dev_priv); |
| 686 | if (unlikely(ret != 0)) { | 680 | if (unlikely(ret != 0)) |
| 687 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 688 | goto out_err0; | 681 | goto out_err0; |
| 689 | } | ||
| 690 | 682 | ||
| 691 | /* | 683 | /* |
| 692 | * Limit back buffer size to VRAM size. Remove this once | 684 | * Limit back buffer size to VRAM size. Remove this once |
| @@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 695 | if (dev_priv->prim_bb_mem > dev_priv->vram_size) | 687 | if (dev_priv->prim_bb_mem > dev_priv->vram_size) |
| 696 | dev_priv->prim_bb_mem = dev_priv->vram_size; | 688 | dev_priv->prim_bb_mem = dev_priv->vram_size; |
| 697 | 689 | ||
| 698 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 699 | |||
| 700 | vmw_print_capabilities(dev_priv->capabilities); | 690 | vmw_print_capabilities(dev_priv->capabilities); |
| 701 | 691 | ||
| 702 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { | 692 | if (dev_priv->capabilities & SVGA_CAP_GMR2) { |
| @@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev, | |||
| 1160 | if (unlikely(ret != 0)) | 1150 | if (unlikely(ret != 0)) |
| 1161 | return ret; | 1151 | return ret; |
| 1162 | vmw_kms_save_vga(dev_priv); | 1152 | vmw_kms_save_vga(dev_priv); |
| 1163 | mutex_lock(&dev_priv->hw_mutex); | ||
| 1164 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); | 1153 | vmw_write(dev_priv, SVGA_REG_TRACES, 0); |
| 1165 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 1166 | } | 1154 | } |
| 1167 | 1155 | ||
| 1168 | if (active) { | 1156 | if (active) { |
| @@ -1196,9 +1184,7 @@ out_no_active_lock: | |||
| 1196 | if (!dev_priv->enable_fb) { | 1184 | if (!dev_priv->enable_fb) { |
| 1197 | vmw_kms_restore_vga(dev_priv); | 1185 | vmw_kms_restore_vga(dev_priv); |
| 1198 | vmw_3d_resource_dec(dev_priv, true); | 1186 | vmw_3d_resource_dec(dev_priv, true); |
| 1199 | mutex_lock(&dev_priv->hw_mutex); | ||
| 1200 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | 1187 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
| 1201 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 1202 | } | 1188 | } |
| 1203 | return ret; | 1189 | return ret; |
| 1204 | } | 1190 | } |
| @@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
| 1233 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | 1219 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); |
| 1234 | vmw_kms_restore_vga(dev_priv); | 1220 | vmw_kms_restore_vga(dev_priv); |
| 1235 | vmw_3d_resource_dec(dev_priv, true); | 1221 | vmw_3d_resource_dec(dev_priv, true); |
| 1236 | mutex_lock(&dev_priv->hw_mutex); | ||
| 1237 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); | 1222 | vmw_write(dev_priv, SVGA_REG_TRACES, 1); |
| 1238 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 1239 | } | 1223 | } |
| 1240 | 1224 | ||
| 1241 | dev_priv->active_master = &dev_priv->fbdev_master; | 1225 | dev_priv->active_master = &dev_priv->fbdev_master; |
| @@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev) | |||
| 1367 | struct drm_device *dev = pci_get_drvdata(pdev); | 1351 | struct drm_device *dev = pci_get_drvdata(pdev); |
| 1368 | struct vmw_private *dev_priv = vmw_priv(dev); | 1352 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1369 | 1353 | ||
| 1370 | mutex_lock(&dev_priv->hw_mutex); | ||
| 1371 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); | 1354 | vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); |
| 1372 | (void) vmw_read(dev_priv, SVGA_REG_ID); | 1355 | (void) vmw_read(dev_priv, SVGA_REG_ID); |
| 1373 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 1374 | 1356 | ||
| 1375 | /** | 1357 | /** |
| 1376 | * Reclaim 3d reference held by fbdev and potentially | 1358 | * Reclaim 3d reference held by fbdev and potentially |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ee799b43d5d..d26a6daa9719 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -399,7 +399,8 @@ struct vmw_private { | |||
| 399 | uint32_t memory_size; | 399 | uint32_t memory_size; |
| 400 | bool has_gmr; | 400 | bool has_gmr; |
| 401 | bool has_mob; | 401 | bool has_mob; |
| 402 | struct mutex hw_mutex; | 402 | spinlock_t hw_lock; |
| 403 | spinlock_t cap_lock; | ||
| 403 | 404 | ||
| 404 | /* | 405 | /* |
| 405 | * VGA registers. | 406 | * VGA registers. |
| @@ -449,8 +450,9 @@ struct vmw_private { | |||
| 449 | atomic_t marker_seq; | 450 | atomic_t marker_seq; |
| 450 | wait_queue_head_t fence_queue; | 451 | wait_queue_head_t fence_queue; |
| 451 | wait_queue_head_t fifo_queue; | 452 | wait_queue_head_t fifo_queue; |
| 452 | int fence_queue_waiters; /* Protected by hw_mutex */ | 453 | spinlock_t waiter_lock; |
| 453 | int goal_queue_waiters; /* Protected by hw_mutex */ | 454 | int fence_queue_waiters; /* Protected by waiter_lock */ |
| 455 | int goal_queue_waiters; /* Protected by waiter_lock */ | ||
| 454 | atomic_t fifo_queue_waiters; | 456 | atomic_t fifo_queue_waiters; |
| 455 | uint32_t last_read_seqno; | 457 | uint32_t last_read_seqno; |
| 456 | spinlock_t irq_lock; | 458 | spinlock_t irq_lock; |
| @@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master) | |||
| 553 | return (struct vmw_master *) master->driver_priv; | 555 | return (struct vmw_master *) master->driver_priv; |
| 554 | } | 556 | } |
| 555 | 557 | ||
| 558 | /* | ||
| 559 | * The locking here is fine-grained, so that it is performed once | ||
| 560 | * for every read- and write operation. This is of course costly, but we | ||
| 561 | * don't perform much register access in the timing critical paths anyway. | ||
| 562 | * Instead we have the extra benefit of being sure that we don't forget | ||
| 563 | * the hw lock around register accesses. | ||
| 564 | */ | ||
| 556 | static inline void vmw_write(struct vmw_private *dev_priv, | 565 | static inline void vmw_write(struct vmw_private *dev_priv, |
| 557 | unsigned int offset, uint32_t value) | 566 | unsigned int offset, uint32_t value) |
| 558 | { | 567 | { |
| 568 | unsigned long irq_flags; | ||
| 569 | |||
| 570 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); | ||
| 559 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | 571 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
| 560 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); | 572 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); |
| 573 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); | ||
| 561 | } | 574 | } |
| 562 | 575 | ||
| 563 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, | 576 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, |
| 564 | unsigned int offset) | 577 | unsigned int offset) |
| 565 | { | 578 | { |
| 566 | uint32_t val; | 579 | unsigned long irq_flags; |
| 580 | u32 val; | ||
| 567 | 581 | ||
| 582 | spin_lock_irqsave(&dev_priv->hw_lock, irq_flags); | ||
| 568 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | 583 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); |
| 569 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); | 584 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); |
| 585 | spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags); | ||
| 586 | |||
| 570 | return val; | 587 | return val; |
| 571 | } | 588 | } |
| 572 | 589 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b7594cb758af..945f1e0dad92 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
| @@ -35,7 +35,7 @@ struct vmw_fence_manager { | |||
| 35 | struct vmw_private *dev_priv; | 35 | struct vmw_private *dev_priv; |
| 36 | spinlock_t lock; | 36 | spinlock_t lock; |
| 37 | struct list_head fence_list; | 37 | struct list_head fence_list; |
| 38 | struct work_struct work, ping_work; | 38 | struct work_struct work; |
| 39 | u32 user_fence_size; | 39 | u32 user_fence_size; |
| 40 | u32 fence_size; | 40 | u32 fence_size; |
| 41 | u32 event_fence_action_size; | 41 | u32 event_fence_action_size; |
| @@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f) | |||
| 134 | return "svga"; | 134 | return "svga"; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static void vmw_fence_ping_func(struct work_struct *work) | ||
| 138 | { | ||
| 139 | struct vmw_fence_manager *fman = | ||
| 140 | container_of(work, struct vmw_fence_manager, ping_work); | ||
| 141 | |||
| 142 | vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); | ||
| 143 | } | ||
| 144 | |||
| 145 | static bool vmw_fence_enable_signaling(struct fence *f) | 137 | static bool vmw_fence_enable_signaling(struct fence *f) |
| 146 | { | 138 | { |
| 147 | struct vmw_fence_obj *fence = | 139 | struct vmw_fence_obj *fence = |
| @@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f) | |||
| 155 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) | 147 | if (seqno - fence->base.seqno < VMW_FENCE_WRAP) |
| 156 | return false; | 148 | return false; |
| 157 | 149 | ||
| 158 | if (mutex_trylock(&dev_priv->hw_mutex)) { | 150 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); |
| 159 | vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); | ||
| 160 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 161 | } else | ||
| 162 | schedule_work(&fman->ping_work); | ||
| 163 | 151 | ||
| 164 | return true; | 152 | return true; |
| 165 | } | 153 | } |
| @@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |||
| 305 | INIT_LIST_HEAD(&fman->fence_list); | 293 | INIT_LIST_HEAD(&fman->fence_list); |
| 306 | INIT_LIST_HEAD(&fman->cleanup_list); | 294 | INIT_LIST_HEAD(&fman->cleanup_list); |
| 307 | INIT_WORK(&fman->work, &vmw_fence_work_func); | 295 | INIT_WORK(&fman->work, &vmw_fence_work_func); |
| 308 | INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); | ||
| 309 | fman->fifo_down = true; | 296 | fman->fifo_down = true; |
| 310 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); | 297 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); |
| 311 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); | 298 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); |
| @@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) | |||
| 323 | bool lists_empty; | 310 | bool lists_empty; |
| 324 | 311 | ||
| 325 | (void) cancel_work_sync(&fman->work); | 312 | (void) cancel_work_sync(&fman->work); |
| 326 | (void) cancel_work_sync(&fman->ping_work); | ||
| 327 | 313 | ||
| 328 | spin_lock_irqsave(&fman->lock, irq_flags); | 314 | spin_lock_irqsave(&fman->lock, irq_flags); |
| 329 | lists_empty = list_empty(&fman->fence_list) && | 315 | lists_empty = list_empty(&fman->fence_list) && |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 09e10aefcd8e..39f2b03888e7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
| 44 | if (!dev_priv->has_mob) | 44 | if (!dev_priv->has_mob) |
| 45 | return false; | 45 | return false; |
| 46 | 46 | ||
| 47 | mutex_lock(&dev_priv->hw_mutex); | 47 | spin_lock(&dev_priv->cap_lock); |
| 48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); | 48 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); |
| 49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 49 | result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
| 50 | mutex_unlock(&dev_priv->hw_mutex); | 50 | spin_unlock(&dev_priv->cap_lock); |
| 51 | 51 | ||
| 52 | return (result != 0); | 52 | return (result != 0); |
| 53 | } | 53 | } |
| @@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 120 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); | 120 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); |
| 121 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); | 121 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); |
| 122 | 122 | ||
| 123 | mutex_lock(&dev_priv->hw_mutex); | ||
| 124 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | 123 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); |
| 125 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | 124 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); |
| 126 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); | 125 | dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); |
| @@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 143 | mb(); | 142 | mb(); |
| 144 | 143 | ||
| 145 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); | 144 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); |
| 146 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 147 | 145 | ||
| 148 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | 146 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); |
| 149 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 147 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
| @@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 160 | return vmw_fifo_send_fence(dev_priv, &dummy); | 158 | return vmw_fifo_send_fence(dev_priv, &dummy); |
| 161 | } | 159 | } |
| 162 | 160 | ||
| 163 | void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) | 161 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) |
| 164 | { | 162 | { |
| 165 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 163 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| 164 | static DEFINE_SPINLOCK(ping_lock); | ||
| 165 | unsigned long irq_flags; | ||
| 166 | 166 | ||
| 167 | /* | ||
| 168 | * The ping_lock is needed because we don't have an atomic | ||
| 169 | * test-and-set of the SVGA_FIFO_BUSY register. | ||
| 170 | */ | ||
| 171 | spin_lock_irqsave(&ping_lock, irq_flags); | ||
| 167 | if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { | 172 | if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { |
| 168 | iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); | 173 | iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); |
| 169 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); | 174 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); |
| 170 | } | 175 | } |
| 171 | } | 176 | spin_unlock_irqrestore(&ping_lock, irq_flags); |
| 172 | |||
| 173 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) | ||
| 174 | { | ||
| 175 | mutex_lock(&dev_priv->hw_mutex); | ||
| 176 | |||
| 177 | vmw_fifo_ping_host_locked(dev_priv, reason); | ||
| 178 | |||
| 179 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 180 | } | 177 | } |
| 181 | 178 | ||
| 182 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 179 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
| 183 | { | 180 | { |
| 184 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 181 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
| 185 | 182 | ||
| 186 | mutex_lock(&dev_priv->hw_mutex); | ||
| 187 | |||
| 188 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | 183 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); |
| 189 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) | 184 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) |
| 190 | ; | 185 | ; |
| @@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
| 198 | vmw_write(dev_priv, SVGA_REG_TRACES, | 193 | vmw_write(dev_priv, SVGA_REG_TRACES, |
| 199 | dev_priv->traces_state); | 194 | dev_priv->traces_state); |
| 200 | 195 | ||
| 201 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 202 | vmw_marker_queue_takedown(&fifo->marker_queue); | 196 | vmw_marker_queue_takedown(&fifo->marker_queue); |
| 203 | 197 | ||
| 204 | if (likely(fifo->static_buffer != NULL)) { | 198 | if (likely(fifo->static_buffer != NULL)) { |
| @@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
| 271 | return vmw_fifo_wait_noirq(dev_priv, bytes, | 265 | return vmw_fifo_wait_noirq(dev_priv, bytes, |
| 272 | interruptible, timeout); | 266 | interruptible, timeout); |
| 273 | 267 | ||
| 274 | mutex_lock(&dev_priv->hw_mutex); | 268 | spin_lock(&dev_priv->waiter_lock); |
| 275 | if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { | 269 | if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { |
| 276 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 270 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
| 277 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, | 271 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, |
| @@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
| 280 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 274 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 281 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 275 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 282 | } | 276 | } |
| 283 | mutex_unlock(&dev_priv->hw_mutex); | 277 | spin_unlock(&dev_priv->waiter_lock); |
| 284 | 278 | ||
| 285 | if (interruptible) | 279 | if (interruptible) |
| 286 | ret = wait_event_interruptible_timeout | 280 | ret = wait_event_interruptible_timeout |
| @@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
| 296 | else if (likely(ret > 0)) | 290 | else if (likely(ret > 0)) |
| 297 | ret = 0; | 291 | ret = 0; |
| 298 | 292 | ||
| 299 | mutex_lock(&dev_priv->hw_mutex); | 293 | spin_lock(&dev_priv->waiter_lock); |
| 300 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { | 294 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { |
| 301 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | 295 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); |
| 302 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; | 296 | dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; |
| 303 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 297 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 304 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 298 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 305 | } | 299 | } |
| 306 | mutex_unlock(&dev_priv->hw_mutex); | 300 | spin_unlock(&dev_priv->waiter_lock); |
| 307 | 301 | ||
| 308 | return ret; | 302 | return ret; |
| 309 | } | 303 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 37881ecf5d7a..69c8ce23123c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, | |||
| 135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); | 135 | (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); |
| 136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; | 136 | compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; |
| 137 | 137 | ||
| 138 | mutex_lock(&dev_priv->hw_mutex); | 138 | spin_lock(&dev_priv->cap_lock); |
| 139 | for (i = 0; i < max_size; ++i) { | 139 | for (i = 0; i < max_size; ++i) { |
| 140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | 140 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); |
| 141 | compat_cap->pairs[i][0] = i; | 141 | compat_cap->pairs[i][0] = i; |
| 142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 142 | compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
| 143 | } | 143 | } |
| 144 | mutex_unlock(&dev_priv->hw_mutex); | 144 | spin_unlock(&dev_priv->cap_lock); |
| 145 | 145 | ||
| 146 | return 0; | 146 | return 0; |
| 147 | } | 147 | } |
| @@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, | |||
| 191 | if (num > SVGA3D_DEVCAP_MAX) | 191 | if (num > SVGA3D_DEVCAP_MAX) |
| 192 | num = SVGA3D_DEVCAP_MAX; | 192 | num = SVGA3D_DEVCAP_MAX; |
| 193 | 193 | ||
| 194 | mutex_lock(&dev_priv->hw_mutex); | 194 | spin_lock(&dev_priv->cap_lock); |
| 195 | for (i = 0; i < num; ++i) { | 195 | for (i = 0; i < num; ++i) { |
| 196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); | 196 | vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); |
| 197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); | 197 | *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); |
| 198 | } | 198 | } |
| 199 | mutex_unlock(&dev_priv->hw_mutex); | 199 | spin_unlock(&dev_priv->cap_lock); |
| 200 | } else if (gb_objects) { | 200 | } else if (gb_objects) { |
| 201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); | 201 | ret = vmw_fill_compat_cap(dev_priv, bounce, size); |
| 202 | if (unlikely(ret != 0)) | 202 | if (unlikely(ret != 0)) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 0c423766c441..9fe9827ee499 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
| @@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg) | |||
| 62 | 62 | ||
| 63 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) | 63 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) |
| 64 | { | 64 | { |
| 65 | uint32_t busy; | ||
| 66 | 65 | ||
| 67 | mutex_lock(&dev_priv->hw_mutex); | 66 | return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); |
| 68 | busy = vmw_read(dev_priv, SVGA_REG_BUSY); | ||
| 69 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 70 | |||
| 71 | return (busy == 0); | ||
| 72 | } | 67 | } |
| 73 | 68 | ||
| 74 | void vmw_update_seqno(struct vmw_private *dev_priv, | 69 | void vmw_update_seqno(struct vmw_private *dev_priv, |
| @@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
| 184 | 179 | ||
| 185 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) | 180 | void vmw_seqno_waiter_add(struct vmw_private *dev_priv) |
| 186 | { | 181 | { |
| 187 | mutex_lock(&dev_priv->hw_mutex); | 182 | spin_lock(&dev_priv->waiter_lock); |
| 188 | if (dev_priv->fence_queue_waiters++ == 0) { | 183 | if (dev_priv->fence_queue_waiters++ == 0) { |
| 189 | unsigned long irq_flags; | 184 | unsigned long irq_flags; |
| 190 | 185 | ||
| @@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv) | |||
| 195 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 190 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 191 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 197 | } | 192 | } |
| 198 | mutex_unlock(&dev_priv->hw_mutex); | 193 | spin_unlock(&dev_priv->waiter_lock); |
| 199 | } | 194 | } |
| 200 | 195 | ||
| 201 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | 196 | void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) |
| 202 | { | 197 | { |
| 203 | mutex_lock(&dev_priv->hw_mutex); | 198 | spin_lock(&dev_priv->waiter_lock); |
| 204 | if (--dev_priv->fence_queue_waiters == 0) { | 199 | if (--dev_priv->fence_queue_waiters == 0) { |
| 205 | unsigned long irq_flags; | 200 | unsigned long irq_flags; |
| 206 | 201 | ||
| @@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | |||
| 209 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 204 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 210 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 205 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 211 | } | 206 | } |
| 212 | mutex_unlock(&dev_priv->hw_mutex); | 207 | spin_unlock(&dev_priv->waiter_lock); |
| 213 | } | 208 | } |
| 214 | 209 | ||
| 215 | 210 | ||
| 216 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) | 211 | void vmw_goal_waiter_add(struct vmw_private *dev_priv) |
| 217 | { | 212 | { |
| 218 | mutex_lock(&dev_priv->hw_mutex); | 213 | spin_lock(&dev_priv->waiter_lock); |
| 219 | if (dev_priv->goal_queue_waiters++ == 0) { | 214 | if (dev_priv->goal_queue_waiters++ == 0) { |
| 220 | unsigned long irq_flags; | 215 | unsigned long irq_flags; |
| 221 | 216 | ||
| @@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv) | |||
| 226 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 221 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 227 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 222 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 228 | } | 223 | } |
| 229 | mutex_unlock(&dev_priv->hw_mutex); | 224 | spin_unlock(&dev_priv->waiter_lock); |
| 230 | } | 225 | } |
| 231 | 226 | ||
| 232 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | 227 | void vmw_goal_waiter_remove(struct vmw_private *dev_priv) |
| 233 | { | 228 | { |
| 234 | mutex_lock(&dev_priv->hw_mutex); | 229 | spin_lock(&dev_priv->waiter_lock); |
| 235 | if (--dev_priv->goal_queue_waiters == 0) { | 230 | if (--dev_priv->goal_queue_waiters == 0) { |
| 236 | unsigned long irq_flags; | 231 | unsigned long irq_flags; |
| 237 | 232 | ||
| @@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv) | |||
| 240 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); | 235 | vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); |
| 241 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | 236 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); |
| 242 | } | 237 | } |
| 243 | mutex_unlock(&dev_priv->hw_mutex); | 238 | spin_unlock(&dev_priv->waiter_lock); |
| 244 | } | 239 | } |
| 245 | 240 | ||
| 246 | int vmw_wait_seqno(struct vmw_private *dev_priv, | 241 | int vmw_wait_seqno(struct vmw_private *dev_priv, |
| @@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev) | |||
| 315 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | 310 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) |
| 316 | return; | 311 | return; |
| 317 | 312 | ||
| 318 | mutex_lock(&dev_priv->hw_mutex); | ||
| 319 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); | 313 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); |
| 320 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 321 | 314 | ||
| 322 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 315 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
| 323 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | 316 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3725b521d931..8725b79e7847 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) | |||
| 1828 | struct vmw_private *dev_priv = vmw_priv(dev); | 1828 | struct vmw_private *dev_priv = vmw_priv(dev); |
| 1829 | struct vmw_display_unit *du = vmw_connector_to_du(connector); | 1829 | struct vmw_display_unit *du = vmw_connector_to_du(connector); |
| 1830 | 1830 | ||
| 1831 | mutex_lock(&dev_priv->hw_mutex); | ||
| 1832 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); | 1831 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); |
| 1833 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 1834 | 1832 | ||
| 1835 | return ((vmw_connector_to_du(connector)->unit < num_displays && | 1833 | return ((vmw_connector_to_du(connector)->unit < num_displays && |
| 1836 | du->pref_active) ? | 1834 | du->pref_active) ? |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6529c09c46f0..a7de26d1ac80 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON | |||
| 574 | for those channels specified in the map. This map can be provided | 574 | for those channels specified in the map. This map can be provided |
| 575 | either via platform data or the device tree bindings. | 575 | either via platform data or the device tree bindings. |
| 576 | 576 | ||
| 577 | config SENSORS_I5500 | ||
| 578 | tristate "Intel 5500/5520/X58 temperature sensor" | ||
| 579 | depends on X86 && PCI | ||
| 580 | help | ||
| 581 | If you say yes here you get support for the temperature | ||
| 582 | sensor inside the Intel 5500, 5520 and X58 chipsets. | ||
| 583 | |||
| 584 | This driver can also be built as a module. If so, the module | ||
| 585 | will be called i5500_temp. | ||
| 586 | |||
| 577 | config SENSORS_CORETEMP | 587 | config SENSORS_CORETEMP |
| 578 | tristate "Intel Core/Core2/Atom temperature sensor" | 588 | tristate "Intel Core/Core2/Atom temperature sensor" |
| 579 | depends on X86 | 589 | depends on X86 |
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 67280643bcf0..6c941472e707 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile | |||
| @@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o | |||
| 68 | obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o | 68 | obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o |
| 69 | obj-$(CONFIG_SENSORS_HTU21) += htu21.o | 69 | obj-$(CONFIG_SENSORS_HTU21) += htu21.o |
| 70 | obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o | 70 | obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o |
| 71 | obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o | ||
| 71 | obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o | 72 | obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o |
| 72 | obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o | 73 | obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o |
| 73 | obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o | 74 | obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o |
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c new file mode 100644 index 000000000000..3e3ccbf18b4e --- /dev/null +++ b/drivers/hwmon/i5500_temp.c | |||
| @@ -0,0 +1,149 @@ | |||
| 1 | /* | ||
| 2 | * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/init.h> | ||
| 19 | #include <linux/slab.h> | ||
| 20 | #include <linux/jiffies.h> | ||
| 21 | #include <linux/device.h> | ||
| 22 | #include <linux/pci.h> | ||
| 23 | #include <linux/hwmon.h> | ||
| 24 | #include <linux/hwmon-sysfs.h> | ||
| 25 | #include <linux/err.h> | ||
| 26 | #include <linux/mutex.h> | ||
| 27 | |||
| 28 | /* Register definitions from datasheet */ | ||
| 29 | #define REG_TSTHRCATA 0xE2 | ||
| 30 | #define REG_TSCTRL 0xE8 | ||
| 31 | #define REG_TSTHRRPEX 0xEB | ||
| 32 | #define REG_TSTHRLO 0xEC | ||
| 33 | #define REG_TSTHRHI 0xEE | ||
| 34 | #define REG_CTHINT 0xF0 | ||
| 35 | #define REG_TSFSC 0xF3 | ||
| 36 | #define REG_CTSTS 0xF4 | ||
| 37 | #define REG_TSTHRRQPI 0xF5 | ||
| 38 | #define REG_CTCTRL 0xF7 | ||
| 39 | #define REG_TSTIMER 0xF8 | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Sysfs stuff | ||
| 43 | */ | ||
| 44 | |||
| 45 | /* Sensor resolution : 0.5 degree C */ | ||
| 46 | static ssize_t show_temp(struct device *dev, | ||
| 47 | struct device_attribute *devattr, char *buf) | ||
| 48 | { | ||
| 49 | struct pci_dev *pdev = to_pci_dev(dev->parent); | ||
| 50 | long temp; | ||
| 51 | u16 tsthrhi; | ||
| 52 | s8 tsfsc; | ||
| 53 | |||
| 54 | pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi); | ||
| 55 | pci_read_config_byte(pdev, REG_TSFSC, &tsfsc); | ||
| 56 | temp = ((long)tsthrhi - tsfsc) * 500; | ||
| 57 | |||
| 58 | return sprintf(buf, "%ld\n", temp); | ||
| 59 | } | ||
| 60 | |||
| 61 | static ssize_t show_thresh(struct device *dev, | ||
| 62 | struct device_attribute *devattr, char *buf) | ||
| 63 | { | ||
| 64 | struct pci_dev *pdev = to_pci_dev(dev->parent); | ||
| 65 | int reg = to_sensor_dev_attr(devattr)->index; | ||
| 66 | long temp; | ||
| 67 | u16 tsthr; | ||
| 68 | |||
| 69 | pci_read_config_word(pdev, reg, &tsthr); | ||
| 70 | temp = tsthr * 500; | ||
| 71 | |||
| 72 | return sprintf(buf, "%ld\n", temp); | ||
| 73 | } | ||
| 74 | |||
| 75 | static ssize_t show_alarm(struct device *dev, | ||
| 76 | struct device_attribute *devattr, char *buf) | ||
| 77 | { | ||
| 78 | struct pci_dev *pdev = to_pci_dev(dev->parent); | ||
| 79 | int nr = to_sensor_dev_attr(devattr)->index; | ||
| 80 | u8 ctsts; | ||
| 81 | |||
| 82 | pci_read_config_byte(pdev, REG_CTSTS, &ctsts); | ||
| 83 | return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr)); | ||
| 84 | } | ||
| 85 | |||
| 86 | static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL); | ||
| 87 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2); | ||
| 88 | static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC); | ||
| 89 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE); | ||
| 90 | static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0); | ||
| 91 | static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1); | ||
| 92 | |||
| 93 | static struct attribute *i5500_temp_attrs[] = { | ||
| 94 | &dev_attr_temp1_input.attr, | ||
| 95 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | ||
| 96 | &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, | ||
| 97 | &sensor_dev_attr_temp1_max.dev_attr.attr, | ||
| 98 | &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, | ||
| 99 | &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, | ||
| 100 | NULL | ||
| 101 | }; | ||
| 102 | |||
| 103 | ATTRIBUTE_GROUPS(i5500_temp); | ||
| 104 | |||
| 105 | static const struct pci_device_id i5500_temp_ids[] = { | ||
| 106 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) }, | ||
| 107 | { 0 }, | ||
| 108 | }; | ||
| 109 | |||
| 110 | MODULE_DEVICE_TABLE(pci, i5500_temp_ids); | ||
| 111 | |||
| 112 | static int i5500_temp_probe(struct pci_dev *pdev, | ||
| 113 | const struct pci_device_id *id) | ||
| 114 | { | ||
| 115 | int err; | ||
| 116 | struct device *hwmon_dev; | ||
| 117 | u32 tstimer; | ||
| 118 | s8 tsfsc; | ||
| 119 | |||
| 120 | err = pci_enable_device(pdev); | ||
| 121 | if (err) { | ||
| 122 | dev_err(&pdev->dev, "Failed to enable device\n"); | ||
| 123 | return err; | ||
| 124 | } | ||
| 125 | |||
| 126 | pci_read_config_byte(pdev, REG_TSFSC, &tsfsc); | ||
| 127 | pci_read_config_dword(pdev, REG_TSTIMER, &tstimer); | ||
| 128 | if (tsfsc == 0x7F && tstimer == 0x07D30D40) { | ||
| 129 | dev_notice(&pdev->dev, "Sensor seems to be disabled\n"); | ||
| 130 | return -ENODEV; | ||
| 131 | } | ||
| 132 | |||
| 133 | hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev, | ||
| 134 | "intel5500", NULL, | ||
| 135 | i5500_temp_groups); | ||
| 136 | return PTR_ERR_OR_ZERO(hwmon_dev); | ||
| 137 | } | ||
| 138 | |||
| 139 | static struct pci_driver i5500_temp_driver = { | ||
| 140 | .name = "i5500_temp", | ||
| 141 | .id_table = i5500_temp_ids, | ||
| 142 | .probe = i5500_temp_probe, | ||
| 143 | }; | ||
| 144 | |||
| 145 | module_pci_driver(i5500_temp_driver); | ||
| 146 | |||
| 147 | MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); | ||
| 148 | MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver"); | ||
| 149 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 57ecc5b204f3..9117b7a2d5f8 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -1114,7 +1114,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_ | |||
| 1114 | struct mlx4_dev *dev = to_mdev(qp->device)->dev; | 1114 | struct mlx4_dev *dev = to_mdev(qp->device)->dev; |
| 1115 | int err = 0; | 1115 | int err = 0; |
| 1116 | 1116 | ||
| 1117 | if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) | 1117 | if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || |
| 1118 | dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) | ||
| 1118 | return 0; /* do nothing */ | 1119 | return 0; /* do nothing */ |
| 1119 | 1120 | ||
| 1120 | ib_flow = flow_attr + 1; | 1121 | ib_flow = flow_attr + 1; |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index f2b978026407..77ecf6d32237 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
| @@ -1520,6 +1520,8 @@ static int elantech_set_properties(struct elantech_data *etd) | |||
| 1520 | case 7: | 1520 | case 7: |
| 1521 | case 8: | 1521 | case 8: |
| 1522 | case 9: | 1522 | case 9: |
| 1523 | case 10: | ||
| 1524 | case 13: | ||
| 1523 | etd->hw_version = 4; | 1525 | etd->hw_version = 4; |
| 1524 | break; | 1526 | break; |
| 1525 | default: | 1527 | default: |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index c66d1b53843e..764857b4e268 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
| @@ -415,6 +415,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | |||
| 415 | }, | 415 | }, |
| 416 | }, | 416 | }, |
| 417 | { | 417 | { |
| 418 | /* Acer Aspire 7738 */ | ||
| 419 | .matches = { | ||
| 420 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
| 421 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), | ||
| 422 | }, | ||
| 423 | }, | ||
| 424 | { | ||
| 418 | /* Gericom Bellagio */ | 425 | /* Gericom Bellagio */ |
| 419 | .matches = { | 426 | .matches = { |
| 420 | DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), | 427 | DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), |
| @@ -745,6 +752,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { | |||
| 745 | { } | 752 | { } |
| 746 | }; | 753 | }; |
| 747 | 754 | ||
| 755 | /* | ||
| 756 | * Some laptops need keyboard reset before probing for the trackpad to get | ||
| 757 | * it detected, initialised & finally work. | ||
| 758 | */ | ||
| 759 | static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { | ||
| 760 | { | ||
| 761 | /* Gigabyte P35 v2 - Elantech touchpad */ | ||
| 762 | .matches = { | ||
| 763 | DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), | ||
| 764 | DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), | ||
| 765 | }, | ||
| 766 | }, | ||
| 767 | { | ||
| 768 | /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ | ||
| 769 | .matches = { | ||
| 770 | DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), | ||
| 771 | DMI_MATCH(DMI_PRODUCT_NAME, "X3"), | ||
| 772 | }, | ||
| 773 | }, | ||
| 774 | { | ||
| 775 | /* Gigabyte P34 - Elantech touchpad */ | ||
| 776 | .matches = { | ||
| 777 | DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), | ||
| 778 | DMI_MATCH(DMI_PRODUCT_NAME, "P34"), | ||
| 779 | }, | ||
| 780 | }, | ||
| 781 | { } | ||
| 782 | }; | ||
| 783 | |||
| 748 | #endif /* CONFIG_X86 */ | 784 | #endif /* CONFIG_X86 */ |
| 749 | 785 | ||
| 750 | #ifdef CONFIG_PNP | 786 | #ifdef CONFIG_PNP |
| @@ -1040,6 +1076,9 @@ static int __init i8042_platform_init(void) | |||
| 1040 | if (dmi_check_system(i8042_dmi_dritek_table)) | 1076 | if (dmi_check_system(i8042_dmi_dritek_table)) |
| 1041 | i8042_dritek = true; | 1077 | i8042_dritek = true; |
| 1042 | 1078 | ||
| 1079 | if (dmi_check_system(i8042_dmi_kbdreset_table)) | ||
| 1080 | i8042_kbdreset = true; | ||
| 1081 | |||
| 1043 | /* | 1082 | /* |
| 1044 | * A20 was already enabled during early kernel init. But some buggy | 1083 | * A20 was already enabled during early kernel init. But some buggy |
| 1045 | * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to | 1084 | * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to |
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 924e4bf357fb..986a71c614b0 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c | |||
| @@ -67,6 +67,10 @@ static bool i8042_notimeout; | |||
| 67 | module_param_named(notimeout, i8042_notimeout, bool, 0); | 67 | module_param_named(notimeout, i8042_notimeout, bool, 0); |
| 68 | MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); | 68 | MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); |
| 69 | 69 | ||
| 70 | static bool i8042_kbdreset; | ||
| 71 | module_param_named(kbdreset, i8042_kbdreset, bool, 0); | ||
| 72 | MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port"); | ||
| 73 | |||
| 70 | #ifdef CONFIG_X86 | 74 | #ifdef CONFIG_X86 |
| 71 | static bool i8042_dritek; | 75 | static bool i8042_dritek; |
| 72 | module_param_named(dritek, i8042_dritek, bool, 0); | 76 | module_param_named(dritek, i8042_dritek, bool, 0); |
| @@ -790,6 +794,16 @@ static int __init i8042_check_aux(void) | |||
| 790 | return -1; | 794 | return -1; |
| 791 | 795 | ||
| 792 | /* | 796 | /* |
| 797 | * Reset keyboard (needed on some laptops to successfully detect | ||
| 798 | * touchpad, e.g., some Gigabyte laptop models with Elantech | ||
| 799 | * touchpads). | ||
| 800 | */ | ||
| 801 | if (i8042_kbdreset) { | ||
| 802 | pr_warn("Attempting to reset device connected to KBD port\n"); | ||
| 803 | i8042_kbd_write(NULL, (unsigned char) 0xff); | ||
| 804 | } | ||
| 805 | |||
| 806 | /* | ||
| 793 | * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and | 807 | * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and |
| 794 | * used it for a PCI card or somethig else. | 808 | * used it for a PCI card or somethig else. |
| 795 | */ | 809 | */ |
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index f722a0c466cf..c48da057dbb1 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c | |||
| @@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = { | |||
| 315 | .attach_dev = gart_iommu_attach_dev, | 315 | .attach_dev = gart_iommu_attach_dev, |
| 316 | .detach_dev = gart_iommu_detach_dev, | 316 | .detach_dev = gart_iommu_detach_dev, |
| 317 | .map = gart_iommu_map, | 317 | .map = gart_iommu_map, |
| 318 | .map_sg = default_iommu_map_sg, | ||
| 318 | .unmap = gart_iommu_unmap, | 319 | .unmap = gart_iommu_unmap, |
| 319 | .iova_to_phys = gart_iommu_iova_to_phys, | 320 | .iova_to_phys = gart_iommu_iova_to_phys, |
| 320 | .pgsize_bitmap = GART_IOMMU_PGSIZES, | 321 | .pgsize_bitmap = GART_IOMMU_PGSIZES, |
| @@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev) | |||
| 395 | do_gart_setup(gart, NULL); | 396 | do_gart_setup(gart, NULL); |
| 396 | 397 | ||
| 397 | gart_handle = gart; | 398 | gart_handle = gart; |
| 398 | bus_set_iommu(&platform_bus_type, &gart_iommu_ops); | 399 | |
| 399 | return 0; | 400 | return 0; |
| 400 | } | 401 | } |
| 401 | 402 | ||
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index d111ac779c40..63cd031b2c28 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | #define AT91_AIC_IRQ_MIN_PRIORITY 0 | 28 | #define AT91_AIC_IRQ_MIN_PRIORITY 0 |
| 29 | #define AT91_AIC_IRQ_MAX_PRIORITY 7 | 29 | #define AT91_AIC_IRQ_MAX_PRIORITY 7 |
| 30 | 30 | ||
| 31 | #define AT91_AIC_SRCTYPE GENMASK(7, 6) | 31 | #define AT91_AIC_SRCTYPE GENMASK(6, 5) |
| 32 | #define AT91_AIC_SRCTYPE_LOW (0 << 5) | 32 | #define AT91_AIC_SRCTYPE_LOW (0 << 5) |
| 33 | #define AT91_AIC_SRCTYPE_FALLING (1 << 5) | 33 | #define AT91_AIC_SRCTYPE_FALLING (1 << 5) |
| 34 | #define AT91_AIC_SRCTYPE_HIGH (2 << 5) | 34 | #define AT91_AIC_SRCTYPE_HIGH (2 << 5) |
| @@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val) | |||
| 74 | return -EINVAL; | 74 | return -EINVAL; |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | *val &= AT91_AIC_SRCTYPE; | 77 | *val &= ~AT91_AIC_SRCTYPE; |
| 78 | *val |= aic_type; | 78 | *val |= aic_type; |
| 79 | 79 | ||
| 80 | return 0; | 80 | return 0; |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 86e4684adeb1..d8996bdf0f61 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
| 1053 | * of two entries. No, the architecture doesn't let you | 1053 | * of two entries. No, the architecture doesn't let you |
| 1054 | * express an ITT with a single entry. | 1054 | * express an ITT with a single entry. |
| 1055 | */ | 1055 | */ |
| 1056 | nr_ites = max(2, roundup_pow_of_two(nvecs)); | 1056 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); |
| 1057 | sz = nr_ites * its->ite_size; | 1057 | sz = nr_ites * its->ite_size; |
| 1058 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | 1058 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
| 1059 | itt = kmalloc(sz, GFP_KERNEL); | 1059 | itt = kmalloc(sz, GFP_KERNEL); |
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 29b8f21b74d0..6bc2deb73d53 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c | |||
| @@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent) | |||
| 381 | * It will be refined as each CPU probes its ID. | 381 | * It will be refined as each CPU probes its ID. |
| 382 | */ | 382 | */ |
| 383 | for (i = 0; i < NR_HIP04_CPU_IF; i++) | 383 | for (i = 0; i < NR_HIP04_CPU_IF; i++) |
| 384 | hip04_cpu_map[i] = 0xff; | 384 | hip04_cpu_map[i] = 0xffff; |
| 385 | 385 | ||
| 386 | /* | 386 | /* |
| 387 | * Find out how many interrupts are supported. | 387 | * Find out how many interrupts are supported. |
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 7e342df6a62f..0b0d2c00a2df 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c | |||
| @@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node, | |||
| 137 | return -ENOMEM; | 137 | return -ENOMEM; |
| 138 | 138 | ||
| 139 | chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol"); | 139 | chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol"); |
| 140 | if (!chip_data->intpol_base) { | 140 | if (IS_ERR(chip_data->intpol_base)) { |
| 141 | pr_err("mtk_sysirq: unable to map sysirq register\n"); | 141 | pr_err("mtk_sysirq: unable to map sysirq register\n"); |
| 142 | ret = -ENOMEM; | 142 | ret = PTR_ERR(chip_data->intpol_base); |
| 143 | goto out_free; | 143 | goto out_free; |
| 144 | } | 144 | } |
| 145 | 145 | ||
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c index 28718d3e8281..c03f140acbae 100644 --- a/drivers/irqchip/irq-omap-intc.c +++ b/drivers/irqchip/irq-omap-intc.c | |||
| @@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node) | |||
| 263 | return ret; | 263 | return ret; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static int __init omap_init_irq_legacy(u32 base) | 266 | static int __init omap_init_irq_legacy(u32 base, struct device_node *node) |
| 267 | { | 267 | { |
| 268 | int j, irq_base; | 268 | int j, irq_base; |
| 269 | 269 | ||
| @@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base) | |||
| 277 | irq_base = 0; | 277 | irq_base = 0; |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0, | 280 | domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0, |
| 281 | &irq_domain_simple_ops, NULL); | 281 | &irq_domain_simple_ops, NULL); |
| 282 | 282 | ||
| 283 | omap_irq_soft_reset(); | 283 | omap_irq_soft_reset(); |
| @@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node) | |||
| 301 | { | 301 | { |
| 302 | int ret; | 302 | int ret; |
| 303 | 303 | ||
| 304 | if (node) | 304 | /* |
| 305 | * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c | ||
| 306 | * depends is still not ready for linear IRQ domains; because of that | ||
| 307 | * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using | ||
| 308 | * linear IRQ Domain until that driver is finally fixed. | ||
| 309 | */ | ||
| 310 | if (of_device_is_compatible(node, "ti,omap2-intc") || | ||
| 311 | of_device_is_compatible(node, "ti,omap3-intc")) { | ||
| 312 | struct resource res; | ||
| 313 | |||
| 314 | if (of_address_to_resource(node, 0, &res)) | ||
| 315 | return -ENOMEM; | ||
| 316 | |||
| 317 | base = res.start; | ||
| 318 | ret = omap_init_irq_legacy(base, node); | ||
| 319 | } else if (node) { | ||
| 305 | ret = omap_init_irq_of(node); | 320 | ret = omap_init_irq_of(node); |
| 306 | else | 321 | } else { |
| 307 | ret = omap_init_irq_legacy(base); | 322 | ret = omap_init_irq_legacy(base, NULL); |
| 323 | } | ||
| 308 | 324 | ||
| 309 | if (ret == 0) | 325 | if (ret == 0) |
| 310 | omap_irq_enable_protection(); | 326 | omap_irq_enable_protection(); |
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 9fc616c2755e..c1c010498a21 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c | |||
| @@ -94,6 +94,9 @@ struct cache_disk_superblock { | |||
| 94 | } __packed; | 94 | } __packed; |
| 95 | 95 | ||
| 96 | struct dm_cache_metadata { | 96 | struct dm_cache_metadata { |
| 97 | atomic_t ref_count; | ||
| 98 | struct list_head list; | ||
| 99 | |||
| 97 | struct block_device *bdev; | 100 | struct block_device *bdev; |
| 98 | struct dm_block_manager *bm; | 101 | struct dm_block_manager *bm; |
| 99 | struct dm_space_map *metadata_sm; | 102 | struct dm_space_map *metadata_sm; |
| @@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags) | |||
| 669 | 672 | ||
| 670 | /*----------------------------------------------------------------*/ | 673 | /*----------------------------------------------------------------*/ |
| 671 | 674 | ||
| 672 | struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, | 675 | static struct dm_cache_metadata *metadata_open(struct block_device *bdev, |
| 673 | sector_t data_block_size, | 676 | sector_t data_block_size, |
| 674 | bool may_format_device, | 677 | bool may_format_device, |
| 675 | size_t policy_hint_size) | 678 | size_t policy_hint_size) |
| 676 | { | 679 | { |
| 677 | int r; | 680 | int r; |
| 678 | struct dm_cache_metadata *cmd; | 681 | struct dm_cache_metadata *cmd; |
| @@ -680,9 +683,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, | |||
| 680 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | 683 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
| 681 | if (!cmd) { | 684 | if (!cmd) { |
| 682 | DMERR("could not allocate metadata struct"); | 685 | DMERR("could not allocate metadata struct"); |
| 683 | return NULL; | 686 | return ERR_PTR(-ENOMEM); |
| 684 | } | 687 | } |
| 685 | 688 | ||
| 689 | atomic_set(&cmd->ref_count, 1); | ||
| 686 | init_rwsem(&cmd->root_lock); | 690 | init_rwsem(&cmd->root_lock); |
| 687 | cmd->bdev = bdev; | 691 | cmd->bdev = bdev; |
| 688 | cmd->data_block_size = data_block_size; | 692 | cmd->data_block_size = data_block_size; |
| @@ -705,10 +709,96 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, | |||
| 705 | return cmd; | 709 | return cmd; |
| 706 | } | 710 | } |
| 707 | 711 | ||
| 712 | /* | ||
| 713 | * We keep a little list of ref counted metadata objects to prevent two | ||
| 714 | * different target instances creating separate bufio instances. This is | ||
| 715 | * an issue if a table is reloaded before the suspend. | ||
| 716 | */ | ||
| 717 | static DEFINE_MUTEX(table_lock); | ||
| 718 | static LIST_HEAD(table); | ||
| 719 | |||
| 720 | static struct dm_cache_metadata *lookup(struct block_device *bdev) | ||
| 721 | { | ||
| 722 | struct dm_cache_metadata *cmd; | ||
| 723 | |||
| 724 | list_for_each_entry(cmd, &table, list) | ||
| 725 | if (cmd->bdev == bdev) { | ||
| 726 | atomic_inc(&cmd->ref_count); | ||
| 727 | return cmd; | ||
| 728 | } | ||
| 729 | |||
| 730 | return NULL; | ||
| 731 | } | ||
| 732 | |||
| 733 | static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, | ||
| 734 | sector_t data_block_size, | ||
| 735 | bool may_format_device, | ||
| 736 | size_t policy_hint_size) | ||
| 737 | { | ||
| 738 | struct dm_cache_metadata *cmd, *cmd2; | ||
| 739 | |||
| 740 | mutex_lock(&table_lock); | ||
| 741 | cmd = lookup(bdev); | ||
| 742 | mutex_unlock(&table_lock); | ||
| 743 | |||
| 744 | if (cmd) | ||
| 745 | return cmd; | ||
| 746 | |||
| 747 | cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); | ||
| 748 | if (!IS_ERR(cmd)) { | ||
| 749 | mutex_lock(&table_lock); | ||
| 750 | cmd2 = lookup(bdev); | ||
| 751 | if (cmd2) { | ||
| 752 | mutex_unlock(&table_lock); | ||
| 753 | __destroy_persistent_data_objects(cmd); | ||
| 754 | kfree(cmd); | ||
| 755 | return cmd2; | ||
| 756 | } | ||
| 757 | list_add(&cmd->list, &table); | ||
| 758 | mutex_unlock(&table_lock); | ||
| 759 | } | ||
| 760 | |||
| 761 | return cmd; | ||
| 762 | } | ||
| 763 | |||
| 764 | static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size) | ||
| 765 | { | ||
| 766 | if (cmd->data_block_size != data_block_size) { | ||
| 767 | DMERR("data_block_size (%llu) different from that in metadata (%llu)\n", | ||
| 768 | (unsigned long long) data_block_size, | ||
| 769 | (unsigned long long) cmd->data_block_size); | ||
| 770 | return false; | ||
| 771 | } | ||
| 772 | |||
| 773 | return true; | ||
| 774 | } | ||
| 775 | |||
| 776 | struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, | ||
| 777 | sector_t data_block_size, | ||
| 778 | bool may_format_device, | ||
| 779 | size_t policy_hint_size) | ||
| 780 | { | ||
| 781 | struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, | ||
| 782 | may_format_device, policy_hint_size); | ||
| 783 | |||
| 784 | if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) { | ||
| 785 | dm_cache_metadata_close(cmd); | ||
| 786 | return ERR_PTR(-EINVAL); | ||
| 787 | } | ||
| 788 | |||
| 789 | return cmd; | ||
| 790 | } | ||
| 791 | |||
| 708 | void dm_cache_metadata_close(struct dm_cache_metadata *cmd) | 792 | void dm_cache_metadata_close(struct dm_cache_metadata *cmd) |
| 709 | { | 793 | { |
| 710 | __destroy_persistent_data_objects(cmd); | 794 | if (atomic_dec_and_test(&cmd->ref_count)) { |
| 711 | kfree(cmd); | 795 | mutex_lock(&table_lock); |
| 796 | list_del(&cmd->list); | ||
| 797 | mutex_unlock(&table_lock); | ||
| 798 | |||
| 799 | __destroy_persistent_data_objects(cmd); | ||
| 800 | kfree(cmd); | ||
| 801 | } | ||
| 712 | } | 802 | } |
| 713 | 803 | ||
| 714 | /* | 804 | /* |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1e96d7889f51..e1650539cc2f 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
| @@ -221,7 +221,13 @@ struct cache { | |||
| 221 | struct list_head need_commit_migrations; | 221 | struct list_head need_commit_migrations; |
| 222 | sector_t migration_threshold; | 222 | sector_t migration_threshold; |
| 223 | wait_queue_head_t migration_wait; | 223 | wait_queue_head_t migration_wait; |
| 224 | atomic_t nr_migrations; | 224 | atomic_t nr_allocated_migrations; |
| 225 | |||
| 226 | /* | ||
| 227 | * The number of in flight migrations that are performing | ||
| 228 | * background io. eg, promotion, writeback. | ||
| 229 | */ | ||
| 230 | atomic_t nr_io_migrations; | ||
| 225 | 231 | ||
| 226 | wait_queue_head_t quiescing_wait; | 232 | wait_queue_head_t quiescing_wait; |
| 227 | atomic_t quiescing; | 233 | atomic_t quiescing; |
| @@ -258,7 +264,6 @@ struct cache { | |||
| 258 | struct dm_deferred_set *all_io_ds; | 264 | struct dm_deferred_set *all_io_ds; |
| 259 | 265 | ||
| 260 | mempool_t *migration_pool; | 266 | mempool_t *migration_pool; |
| 261 | struct dm_cache_migration *next_migration; | ||
| 262 | 267 | ||
| 263 | struct dm_cache_policy *policy; | 268 | struct dm_cache_policy *policy; |
| 264 | unsigned policy_nr_args; | 269 | unsigned policy_nr_args; |
| @@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel | |||
| 350 | dm_bio_prison_free_cell(cache->prison, cell); | 355 | dm_bio_prison_free_cell(cache->prison, cell); |
| 351 | } | 356 | } |
| 352 | 357 | ||
| 358 | static struct dm_cache_migration *alloc_migration(struct cache *cache) | ||
| 359 | { | ||
| 360 | struct dm_cache_migration *mg; | ||
| 361 | |||
| 362 | mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); | ||
| 363 | if (mg) { | ||
| 364 | mg->cache = cache; | ||
| 365 | atomic_inc(&mg->cache->nr_allocated_migrations); | ||
| 366 | } | ||
| 367 | |||
| 368 | return mg; | ||
| 369 | } | ||
| 370 | |||
| 371 | static void free_migration(struct dm_cache_migration *mg) | ||
| 372 | { | ||
| 373 | if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations)) | ||
| 374 | wake_up(&mg->cache->migration_wait); | ||
| 375 | |||
| 376 | mempool_free(mg, mg->cache->migration_pool); | ||
| 377 | } | ||
| 378 | |||
| 353 | static int prealloc_data_structs(struct cache *cache, struct prealloc *p) | 379 | static int prealloc_data_structs(struct cache *cache, struct prealloc *p) |
| 354 | { | 380 | { |
| 355 | if (!p->mg) { | 381 | if (!p->mg) { |
| 356 | p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); | 382 | p->mg = alloc_migration(cache); |
| 357 | if (!p->mg) | 383 | if (!p->mg) |
| 358 | return -ENOMEM; | 384 | return -ENOMEM; |
| 359 | } | 385 | } |
| @@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p) | |||
| 382 | free_prison_cell(cache, p->cell1); | 408 | free_prison_cell(cache, p->cell1); |
| 383 | 409 | ||
| 384 | if (p->mg) | 410 | if (p->mg) |
| 385 | mempool_free(p->mg, cache->migration_pool); | 411 | free_migration(p->mg); |
| 386 | } | 412 | } |
| 387 | 413 | ||
| 388 | static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) | 414 | static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) |
| @@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, | |||
| 854 | * Migration covers moving data from the origin device to the cache, or | 880 | * Migration covers moving data from the origin device to the cache, or |
| 855 | * vice versa. | 881 | * vice versa. |
| 856 | *--------------------------------------------------------------*/ | 882 | *--------------------------------------------------------------*/ |
| 857 | static void free_migration(struct dm_cache_migration *mg) | 883 | static void inc_io_migrations(struct cache *cache) |
| 858 | { | ||
| 859 | mempool_free(mg, mg->cache->migration_pool); | ||
| 860 | } | ||
| 861 | |||
| 862 | static void inc_nr_migrations(struct cache *cache) | ||
| 863 | { | 884 | { |
| 864 | atomic_inc(&cache->nr_migrations); | 885 | atomic_inc(&cache->nr_io_migrations); |
| 865 | } | 886 | } |
| 866 | 887 | ||
| 867 | static void dec_nr_migrations(struct cache *cache) | 888 | static void dec_io_migrations(struct cache *cache) |
| 868 | { | 889 | { |
| 869 | atomic_dec(&cache->nr_migrations); | 890 | atomic_dec(&cache->nr_io_migrations); |
| 870 | |||
| 871 | /* | ||
| 872 | * Wake the worker in case we're suspending the target. | ||
| 873 | */ | ||
| 874 | wake_up(&cache->migration_wait); | ||
| 875 | } | 891 | } |
| 876 | 892 | ||
| 877 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, | 893 | static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, |
| @@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, | |||
| 894 | wake_worker(cache); | 910 | wake_worker(cache); |
| 895 | } | 911 | } |
| 896 | 912 | ||
| 897 | static void cleanup_migration(struct dm_cache_migration *mg) | 913 | static void free_io_migration(struct dm_cache_migration *mg) |
| 898 | { | 914 | { |
| 899 | struct cache *cache = mg->cache; | 915 | dec_io_migrations(mg->cache); |
| 900 | free_migration(mg); | 916 | free_migration(mg); |
| 901 | dec_nr_migrations(cache); | ||
| 902 | } | 917 | } |
| 903 | 918 | ||
| 904 | static void migration_failure(struct dm_cache_migration *mg) | 919 | static void migration_failure(struct dm_cache_migration *mg) |
| @@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg) | |||
| 923 | cell_defer(cache, mg->new_ocell, true); | 938 | cell_defer(cache, mg->new_ocell, true); |
| 924 | } | 939 | } |
| 925 | 940 | ||
| 926 | cleanup_migration(mg); | 941 | free_io_migration(mg); |
| 927 | } | 942 | } |
| 928 | 943 | ||
| 929 | static void migration_success_pre_commit(struct dm_cache_migration *mg) | 944 | static void migration_success_pre_commit(struct dm_cache_migration *mg) |
| @@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) | |||
| 934 | if (mg->writeback) { | 949 | if (mg->writeback) { |
| 935 | clear_dirty(cache, mg->old_oblock, mg->cblock); | 950 | clear_dirty(cache, mg->old_oblock, mg->cblock); |
| 936 | cell_defer(cache, mg->old_ocell, false); | 951 | cell_defer(cache, mg->old_ocell, false); |
| 937 | cleanup_migration(mg); | 952 | free_io_migration(mg); |
| 938 | return; | 953 | return; |
| 939 | 954 | ||
| 940 | } else if (mg->demote) { | 955 | } else if (mg->demote) { |
| @@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) | |||
| 944 | mg->old_oblock); | 959 | mg->old_oblock); |
| 945 | if (mg->promote) | 960 | if (mg->promote) |
| 946 | cell_defer(cache, mg->new_ocell, true); | 961 | cell_defer(cache, mg->new_ocell, true); |
| 947 | cleanup_migration(mg); | 962 | free_io_migration(mg); |
| 948 | return; | 963 | return; |
| 949 | } | 964 | } |
| 950 | } else { | 965 | } else { |
| 951 | if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { | 966 | if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { |
| 952 | DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); | 967 | DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); |
| 953 | policy_remove_mapping(cache->policy, mg->new_oblock); | 968 | policy_remove_mapping(cache->policy, mg->new_oblock); |
| 954 | cleanup_migration(mg); | 969 | free_io_migration(mg); |
| 955 | return; | 970 | return; |
| 956 | } | 971 | } |
| 957 | } | 972 | } |
| @@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) | |||
| 984 | } else { | 999 | } else { |
| 985 | if (mg->invalidate) | 1000 | if (mg->invalidate) |
| 986 | policy_remove_mapping(cache->policy, mg->old_oblock); | 1001 | policy_remove_mapping(cache->policy, mg->old_oblock); |
| 987 | cleanup_migration(mg); | 1002 | free_io_migration(mg); |
| 988 | } | 1003 | } |
| 989 | 1004 | ||
| 990 | } else { | 1005 | } else { |
| @@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) | |||
| 999 | bio_endio(mg->new_ocell->holder, 0); | 1014 | bio_endio(mg->new_ocell->holder, 0); |
| 1000 | cell_defer(cache, mg->new_ocell, false); | 1015 | cell_defer(cache, mg->new_ocell, false); |
| 1001 | } | 1016 | } |
| 1002 | cleanup_migration(mg); | 1017 | free_io_migration(mg); |
| 1003 | } | 1018 | } |
| 1004 | } | 1019 | } |
| 1005 | 1020 | ||
| @@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs, | |||
| 1251 | mg->new_ocell = cell; | 1266 | mg->new_ocell = cell; |
| 1252 | mg->start_jiffies = jiffies; | 1267 | mg->start_jiffies = jiffies; |
| 1253 | 1268 | ||
| 1254 | inc_nr_migrations(cache); | 1269 | inc_io_migrations(cache); |
| 1255 | quiesce_migration(mg); | 1270 | quiesce_migration(mg); |
| 1256 | } | 1271 | } |
| 1257 | 1272 | ||
| @@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs, | |||
| 1275 | mg->new_ocell = NULL; | 1290 | mg->new_ocell = NULL; |
| 1276 | mg->start_jiffies = jiffies; | 1291 | mg->start_jiffies = jiffies; |
| 1277 | 1292 | ||
| 1278 | inc_nr_migrations(cache); | 1293 | inc_io_migrations(cache); |
| 1279 | quiesce_migration(mg); | 1294 | quiesce_migration(mg); |
| 1280 | } | 1295 | } |
| 1281 | 1296 | ||
| @@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs, | |||
| 1302 | mg->new_ocell = new_ocell; | 1317 | mg->new_ocell = new_ocell; |
| 1303 | mg->start_jiffies = jiffies; | 1318 | mg->start_jiffies = jiffies; |
| 1304 | 1319 | ||
| 1305 | inc_nr_migrations(cache); | 1320 | inc_io_migrations(cache); |
| 1306 | quiesce_migration(mg); | 1321 | quiesce_migration(mg); |
| 1307 | } | 1322 | } |
| 1308 | 1323 | ||
| @@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs, | |||
| 1330 | mg->new_ocell = NULL; | 1345 | mg->new_ocell = NULL; |
| 1331 | mg->start_jiffies = jiffies; | 1346 | mg->start_jiffies = jiffies; |
| 1332 | 1347 | ||
| 1333 | inc_nr_migrations(cache); | 1348 | inc_io_migrations(cache); |
| 1334 | quiesce_migration(mg); | 1349 | quiesce_migration(mg); |
| 1335 | } | 1350 | } |
| 1336 | 1351 | ||
| @@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs, | |||
| 1412 | 1427 | ||
| 1413 | static bool spare_migration_bandwidth(struct cache *cache) | 1428 | static bool spare_migration_bandwidth(struct cache *cache) |
| 1414 | { | 1429 | { |
| 1415 | sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * | 1430 | sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * |
| 1416 | cache->sectors_per_block; | 1431 | cache->sectors_per_block; |
| 1417 | return current_volume < cache->migration_threshold; | 1432 | return current_volume < cache->migration_threshold; |
| 1418 | } | 1433 | } |
| @@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache) | |||
| 1764 | 1779 | ||
| 1765 | static void wait_for_migrations(struct cache *cache) | 1780 | static void wait_for_migrations(struct cache *cache) |
| 1766 | { | 1781 | { |
| 1767 | wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); | 1782 | wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); |
| 1768 | } | 1783 | } |
| 1769 | 1784 | ||
| 1770 | static void stop_worker(struct cache *cache) | 1785 | static void stop_worker(struct cache *cache) |
| @@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache) | |||
| 1876 | { | 1891 | { |
| 1877 | unsigned i; | 1892 | unsigned i; |
| 1878 | 1893 | ||
| 1879 | if (cache->next_migration) | ||
| 1880 | mempool_free(cache->next_migration, cache->migration_pool); | ||
| 1881 | |||
| 1882 | if (cache->migration_pool) | 1894 | if (cache->migration_pool) |
| 1883 | mempool_destroy(cache->migration_pool); | 1895 | mempool_destroy(cache->migration_pool); |
| 1884 | 1896 | ||
| @@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
| 2424 | INIT_LIST_HEAD(&cache->quiesced_migrations); | 2436 | INIT_LIST_HEAD(&cache->quiesced_migrations); |
| 2425 | INIT_LIST_HEAD(&cache->completed_migrations); | 2437 | INIT_LIST_HEAD(&cache->completed_migrations); |
| 2426 | INIT_LIST_HEAD(&cache->need_commit_migrations); | 2438 | INIT_LIST_HEAD(&cache->need_commit_migrations); |
| 2427 | atomic_set(&cache->nr_migrations, 0); | 2439 | atomic_set(&cache->nr_allocated_migrations, 0); |
| 2440 | atomic_set(&cache->nr_io_migrations, 0); | ||
| 2428 | init_waitqueue_head(&cache->migration_wait); | 2441 | init_waitqueue_head(&cache->migration_wait); |
| 2429 | 2442 | ||
| 2430 | init_waitqueue_head(&cache->quiescing_wait); | 2443 | init_waitqueue_head(&cache->quiescing_wait); |
| @@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
| 2487 | goto bad; | 2500 | goto bad; |
| 2488 | } | 2501 | } |
| 2489 | 2502 | ||
| 2490 | cache->next_migration = NULL; | ||
| 2491 | |||
| 2492 | cache->need_tick_bio = true; | 2503 | cache->need_tick_bio = true; |
| 2493 | cache->sized = false; | 2504 | cache->sized = false; |
| 2494 | cache->invalidate = false; | 2505 | cache->invalidate = false; |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 493478989dbd..07705ee181e3 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3385 | struct pool_c *pt = ti->private; | 3385 | struct pool_c *pt = ti->private; |
| 3386 | struct pool *pool = pt->pool; | 3386 | struct pool *pool = pt->pool; |
| 3387 | 3387 | ||
| 3388 | if (get_pool_mode(pool) >= PM_READ_ONLY) { | ||
| 3389 | DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", | ||
| 3390 | dm_device_name(pool->pool_md)); | ||
| 3391 | return -EINVAL; | ||
| 3392 | } | ||
| 3393 | |||
| 3388 | if (!strcasecmp(argv[0], "create_thin")) | 3394 | if (!strcasecmp(argv[0], "create_thin")) |
| 3389 | r = process_create_thin_mesg(argc, argv, pool); | 3395 | r = process_create_thin_mesg(argc, argv, pool); |
| 3390 | 3396 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b98cd9d84435..2caf5b374649 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -206,6 +206,9 @@ struct mapped_device { | |||
| 206 | /* zero-length flush that will be cloned and submitted to targets */ | 206 | /* zero-length flush that will be cloned and submitted to targets */ |
| 207 | struct bio flush_bio; | 207 | struct bio flush_bio; |
| 208 | 208 | ||
| 209 | /* the number of internal suspends */ | ||
| 210 | unsigned internal_suspend_count; | ||
| 211 | |||
| 209 | struct dm_stats stats; | 212 | struct dm_stats stats; |
| 210 | }; | 213 | }; |
| 211 | 214 | ||
| @@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla | |||
| 2928 | { | 2931 | { |
| 2929 | struct dm_table *map = NULL; | 2932 | struct dm_table *map = NULL; |
| 2930 | 2933 | ||
| 2931 | if (dm_suspended_internally_md(md)) | 2934 | if (md->internal_suspend_count++) |
| 2932 | return; /* nested internal suspend */ | 2935 | return; /* nested internal suspend */ |
| 2933 | 2936 | ||
| 2934 | if (dm_suspended_md(md)) { | 2937 | if (dm_suspended_md(md)) { |
| @@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla | |||
| 2953 | 2956 | ||
| 2954 | static void __dm_internal_resume(struct mapped_device *md) | 2957 | static void __dm_internal_resume(struct mapped_device *md) |
| 2955 | { | 2958 | { |
| 2956 | if (!dm_suspended_internally_md(md)) | 2959 | BUG_ON(!md->internal_suspend_count); |
| 2960 | |||
| 2961 | if (--md->internal_suspend_count) | ||
| 2957 | return; /* resume from nested internal suspend */ | 2962 | return; /* resume from nested internal suspend */ |
| 2958 | 2963 | ||
| 2959 | if (dm_suspended_md(md)) | 2964 | if (dm_suspended_md(md)) |
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c index db99ca2613ba..06931f6fa26c 100644 --- a/drivers/media/pci/cx23885/cx23885-cards.c +++ b/drivers/media/pci/cx23885/cx23885-cards.c | |||
| @@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = { | |||
| 614 | .portb = CX23885_MPEG_DVB, | 614 | .portb = CX23885_MPEG_DVB, |
| 615 | }, | 615 | }, |
| 616 | [CX23885_BOARD_HAUPPAUGE_HVR4400] = { | 616 | [CX23885_BOARD_HAUPPAUGE_HVR4400] = { |
| 617 | .name = "Hauppauge WinTV-HVR4400", | 617 | .name = "Hauppauge WinTV-HVR4400/HVR5500", |
| 618 | .porta = CX23885_ANALOG_VIDEO, | 618 | .porta = CX23885_ANALOG_VIDEO, |
| 619 | .portb = CX23885_MPEG_DVB, | 619 | .portb = CX23885_MPEG_DVB, |
| 620 | .portc = CX23885_MPEG_DVB, | 620 | .portc = CX23885_MPEG_DVB, |
| @@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = { | |||
| 622 | .tuner_addr = 0x60, /* 0xc0 >> 1 */ | 622 | .tuner_addr = 0x60, /* 0xc0 >> 1 */ |
| 623 | .tuner_bus = 1, | 623 | .tuner_bus = 1, |
| 624 | }, | 624 | }, |
| 625 | [CX23885_BOARD_HAUPPAUGE_STARBURST] = { | ||
| 626 | .name = "Hauppauge WinTV Starburst", | ||
| 627 | .portb = CX23885_MPEG_DVB, | ||
| 628 | }, | ||
| 625 | [CX23885_BOARD_AVERMEDIA_HC81R] = { | 629 | [CX23885_BOARD_AVERMEDIA_HC81R] = { |
| 626 | .name = "AVerTV Hybrid Express Slim HC81R", | 630 | .name = "AVerTV Hybrid Express Slim HC81R", |
| 627 | .tuner_type = TUNER_XC2028, | 631 | .tuner_type = TUNER_XC2028, |
| @@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = { | |||
| 936 | }, { | 940 | }, { |
| 937 | .subvendor = 0x0070, | 941 | .subvendor = 0x0070, |
| 938 | .subdevice = 0xc108, | 942 | .subdevice = 0xc108, |
| 939 | .card = CX23885_BOARD_HAUPPAUGE_HVR4400, | 943 | .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */ |
| 940 | }, { | 944 | }, { |
| 941 | .subvendor = 0x0070, | 945 | .subvendor = 0x0070, |
| 942 | .subdevice = 0xc138, | 946 | .subdevice = 0xc138, |
| 943 | .card = CX23885_BOARD_HAUPPAUGE_HVR4400, | 947 | .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */ |
| 944 | }, { | 948 | }, { |
| 945 | .subvendor = 0x0070, | 949 | .subvendor = 0x0070, |
| 946 | .subdevice = 0xc12a, | 950 | .subdevice = 0xc12a, |
| 947 | .card = CX23885_BOARD_HAUPPAUGE_HVR4400, | 951 | .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */ |
| 948 | }, { | 952 | }, { |
| 949 | .subvendor = 0x0070, | 953 | .subvendor = 0x0070, |
| 950 | .subdevice = 0xc1f8, | 954 | .subdevice = 0xc1f8, |
| 951 | .card = CX23885_BOARD_HAUPPAUGE_HVR4400, | 955 | .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */ |
| 952 | }, { | 956 | }, { |
| 953 | .subvendor = 0x1461, | 957 | .subvendor = 0x1461, |
| 954 | .subdevice = 0xd939, | 958 | .subdevice = 0xd939, |
| @@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev) | |||
| 1545 | cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/ | 1549 | cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/ |
| 1546 | break; | 1550 | break; |
| 1547 | case CX23885_BOARD_HAUPPAUGE_HVR4400: | 1551 | case CX23885_BOARD_HAUPPAUGE_HVR4400: |
| 1552 | case CX23885_BOARD_HAUPPAUGE_STARBURST: | ||
| 1548 | /* GPIO-8 tda10071 demod reset */ | 1553 | /* GPIO-8 tda10071 demod reset */ |
| 1549 | /* GPIO-9 si2165 demod reset */ | 1554 | /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/ |
| 1550 | 1555 | ||
| 1551 | /* Put the parts into reset and back */ | 1556 | /* Put the parts into reset and back */ |
| 1552 | cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1); | 1557 | cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1); |
| @@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) | |||
| 1872 | case CX23885_BOARD_HAUPPAUGE_HVR1850: | 1877 | case CX23885_BOARD_HAUPPAUGE_HVR1850: |
| 1873 | case CX23885_BOARD_HAUPPAUGE_HVR1290: | 1878 | case CX23885_BOARD_HAUPPAUGE_HVR1290: |
| 1874 | case CX23885_BOARD_HAUPPAUGE_HVR4400: | 1879 | case CX23885_BOARD_HAUPPAUGE_HVR4400: |
| 1880 | case CX23885_BOARD_HAUPPAUGE_STARBURST: | ||
| 1875 | case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE: | 1881 | case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE: |
| 1876 | if (dev->i2c_bus[0].i2c_rc == 0) | 1882 | if (dev->i2c_bus[0].i2c_rc == 0) |
| 1877 | hauppauge_eeprom(dev, eeprom+0xc0); | 1883 | hauppauge_eeprom(dev, eeprom+0xc0); |
| @@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev) | |||
| 1980 | ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ | 1986 | ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */ |
| 1981 | ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; | 1987 | ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; |
| 1982 | break; | 1988 | break; |
| 1989 | case CX23885_BOARD_HAUPPAUGE_STARBURST: | ||
| 1990 | ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */ | ||
| 1991 | ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */ | ||
| 1992 | ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO; | ||
| 1993 | break; | ||
| 1983 | case CX23885_BOARD_DVBSKY_T9580: | 1994 | case CX23885_BOARD_DVBSKY_T9580: |
| 1984 | case CX23885_BOARD_DVBSKY_T982: | 1995 | case CX23885_BOARD_DVBSKY_T982: |
| 1985 | ts1->gen_ctrl_val = 0x5; /* Parallel */ | 1996 | ts1->gen_ctrl_val = 0x5; /* Parallel */ |
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 1d9d0f86ca8c..1ad49946d7fa 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c | |||
| @@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev) | |||
| 2049 | 2049 | ||
| 2050 | cx23885_shutdown(dev); | 2050 | cx23885_shutdown(dev); |
| 2051 | 2051 | ||
| 2052 | pci_disable_device(pci_dev); | ||
| 2053 | |||
| 2054 | /* unregister stuff */ | 2052 | /* unregister stuff */ |
| 2055 | free_irq(pci_dev->irq, dev); | 2053 | free_irq(pci_dev->irq, dev); |
| 2056 | 2054 | ||
| 2055 | pci_disable_device(pci_dev); | ||
| 2056 | |||
| 2057 | cx23885_dev_unregister(dev); | 2057 | cx23885_dev_unregister(dev); |
| 2058 | vb2_dma_sg_cleanup_ctx(dev->alloc_ctx); | 2058 | vb2_dma_sg_cleanup_ctx(dev->alloc_ctx); |
| 2059 | v4l2_ctrl_handler_free(&dev->ctrl_handler); | 2059 | v4l2_ctrl_handler_free(&dev->ctrl_handler); |
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c index c47d18270cfc..a9c450d4b54e 100644 --- a/drivers/media/pci/cx23885/cx23885-dvb.c +++ b/drivers/media/pci/cx23885/cx23885-dvb.c | |||
| @@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port) | |||
| 1710 | break; | 1710 | break; |
| 1711 | } | 1711 | } |
| 1712 | break; | 1712 | break; |
| 1713 | case CX23885_BOARD_HAUPPAUGE_STARBURST: | ||
| 1714 | i2c_bus = &dev->i2c_bus[0]; | ||
| 1715 | fe0->dvb.frontend = dvb_attach(tda10071_attach, | ||
| 1716 | &hauppauge_tda10071_config, | ||
| 1717 | &i2c_bus->i2c_adap); | ||
| 1718 | if (fe0->dvb.frontend != NULL) { | ||
| 1719 | dvb_attach(a8293_attach, fe0->dvb.frontend, | ||
| 1720 | &i2c_bus->i2c_adap, | ||
| 1721 | &hauppauge_a8293_config); | ||
| 1722 | } | ||
| 1723 | break; | ||
| 1713 | case CX23885_BOARD_DVBSKY_T9580: | 1724 | case CX23885_BOARD_DVBSKY_T9580: |
| 1714 | case CX23885_BOARD_DVBSKY_S950: | 1725 | case CX23885_BOARD_DVBSKY_S950: |
| 1715 | i2c_bus = &dev->i2c_bus[0]; | 1726 | i2c_bus = &dev->i2c_bus[0]; |
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index f55cd12da0fd..36f2f96c40e4 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h | |||
| @@ -99,6 +99,7 @@ | |||
| 99 | #define CX23885_BOARD_DVBSKY_S950 49 | 99 | #define CX23885_BOARD_DVBSKY_S950 49 |
| 100 | #define CX23885_BOARD_DVBSKY_S952 50 | 100 | #define CX23885_BOARD_DVBSKY_S952 50 |
| 101 | #define CX23885_BOARD_DVBSKY_T982 51 | 101 | #define CX23885_BOARD_DVBSKY_T982 51 |
| 102 | #define CX23885_BOARD_HAUPPAUGE_STARBURST 52 | ||
| 102 | 103 | ||
| 103 | #define GPIO_0 0x00000001 | 104 | #define GPIO_0 0x00000001 |
| 104 | #define GPIO_1 0x00000002 | 105 | #define GPIO_1 0x00000002 |
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c index b463fe172d16..3fe9047ef466 100644 --- a/drivers/media/platform/omap3isp/ispvideo.c +++ b/drivers/media/platform/omap3isp/ispvideo.c | |||
| @@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) | |||
| 602 | strlcpy(cap->card, video->video.name, sizeof(cap->card)); | 602 | strlcpy(cap->card, video->video.name, sizeof(cap->card)); |
| 603 | strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); | 603 | strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); |
| 604 | 604 | ||
| 605 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | ||
| 606 | | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; | ||
| 607 | |||
| 605 | if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) | 608 | if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) |
| 606 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; | 609 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; |
| 607 | else | 610 | else |
| 608 | cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; | 611 | cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; |
| 609 | 612 | ||
| 610 | return 0; | 613 | return 0; |
| 611 | } | 614 | } |
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c index 8efe40337608..6d885239b16a 100644 --- a/drivers/media/platform/soc_camera/atmel-isi.c +++ b/drivers/media/platform/soc_camera/atmel-isi.c | |||
| @@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici, | |||
| 760 | { | 760 | { |
| 761 | strcpy(cap->driver, "atmel-isi"); | 761 | strcpy(cap->driver, "atmel-isi"); |
| 762 | strcpy(cap->card, "Atmel Image Sensor Interface"); | 762 | strcpy(cap->card, "Atmel Image Sensor Interface"); |
| 763 | cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE | | 763 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; |
| 764 | V4L2_CAP_STREAMING); | 764 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; |
| 765 | |||
| 765 | return 0; | 766 | return 0; |
| 766 | } | 767 | } |
| 767 | 768 | ||
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c index ce72bd26a6ac..192377f55840 100644 --- a/drivers/media/platform/soc_camera/mx2_camera.c +++ b/drivers/media/platform/soc_camera/mx2_camera.c | |||
| @@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici, | |||
| 1256 | { | 1256 | { |
| 1257 | /* cap->name is set by the friendly caller:-> */ | 1257 | /* cap->name is set by the friendly caller:-> */ |
| 1258 | strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card)); | 1258 | strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card)); |
| 1259 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; | 1259 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; |
| 1260 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; | ||
| 1260 | 1261 | ||
| 1261 | return 0; | 1262 | return 0; |
| 1262 | } | 1263 | } |
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c index a60c3bb0e4cc..0b3299dee05d 100644 --- a/drivers/media/platform/soc_camera/mx3_camera.c +++ b/drivers/media/platform/soc_camera/mx3_camera.c | |||
| @@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici, | |||
| 967 | { | 967 | { |
| 968 | /* cap->name is set by the firendly caller:-> */ | 968 | /* cap->name is set by the firendly caller:-> */ |
| 969 | strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card)); | 969 | strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card)); |
| 970 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; | 970 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; |
| 971 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; | ||
| 971 | 972 | ||
| 972 | return 0; | 973 | return 0; |
| 973 | } | 974 | } |
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c index e6b93281f246..16f65ecb70a3 100644 --- a/drivers/media/platform/soc_camera/omap1_camera.c +++ b/drivers/media/platform/soc_camera/omap1_camera.c | |||
| @@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici, | |||
| 1427 | { | 1427 | { |
| 1428 | /* cap->name is set by the friendly caller:-> */ | 1428 | /* cap->name is set by the friendly caller:-> */ |
| 1429 | strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card)); | 1429 | strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card)); |
| 1430 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; | 1430 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; |
| 1431 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; | ||
| 1431 | 1432 | ||
| 1432 | return 0; | 1433 | return 0; |
| 1433 | } | 1434 | } |
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c index 951226af0eba..8d6e343fec0f 100644 --- a/drivers/media/platform/soc_camera/pxa_camera.c +++ b/drivers/media/platform/soc_camera/pxa_camera.c | |||
| @@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici, | |||
| 1576 | { | 1576 | { |
| 1577 | /* cap->name is set by the firendly caller:-> */ | 1577 | /* cap->name is set by the firendly caller:-> */ |
| 1578 | strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card)); | 1578 | strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card)); |
| 1579 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; | 1579 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; |
| 1580 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; | ||
| 1580 | 1581 | ||
| 1581 | return 0; | 1582 | return 0; |
| 1582 | } | 1583 | } |
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index 0c1f55648106..9f1473c0a0cf 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c | |||
| @@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici, | |||
| 1799 | struct v4l2_capability *cap) | 1799 | struct v4l2_capability *cap) |
| 1800 | { | 1800 | { |
| 1801 | strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card)); | 1801 | strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card)); |
| 1802 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; | 1802 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; |
| 1803 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; | ||
| 1804 | |||
| 1803 | return 0; | 1805 | return 0; |
| 1804 | } | 1806 | } |
| 1805 | 1807 | ||
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c index 8b27b3eb2b25..71787702d4a2 100644 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c | |||
| @@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, | |||
| 1652 | struct v4l2_capability *cap) | 1652 | struct v4l2_capability *cap) |
| 1653 | { | 1653 | { |
| 1654 | strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); | 1654 | strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); |
| 1655 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; | 1655 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; |
| 1656 | cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; | ||
| 1657 | |||
| 1656 | return 0; | 1658 | return 0; |
| 1657 | } | 1659 | } |
| 1658 | 1660 | ||
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index 0f345b1f9014..f327c49d7e09 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c | |||
| @@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = { | |||
| 2232 | { | 2232 | { |
| 2233 | "Mygica T230 DVB-T/T2/C", | 2233 | "Mygica T230 DVB-T/T2/C", |
| 2234 | { NULL }, | 2234 | { NULL }, |
| 2235 | { &cxusb_table[22], NULL }, | 2235 | { &cxusb_table[20], NULL }, |
| 2236 | }, | 2236 | }, |
| 2237 | } | 2237 | } |
| 2238 | }; | 2238 | }; |
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c index 1b158f1167ed..536210b39428 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c | |||
| @@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1}; | |||
| 89 | module_param_array(vbi_nr, int, NULL, 0444); | 89 | module_param_array(vbi_nr, int, NULL, 0444); |
| 90 | MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor"); | 90 | MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor"); |
| 91 | 91 | ||
| 92 | static struct v4l2_capability pvr_capability ={ | ||
| 93 | .driver = "pvrusb2", | ||
| 94 | .card = "Hauppauge WinTV pvr-usb2", | ||
| 95 | .bus_info = "usb", | ||
| 96 | .version = LINUX_VERSION_CODE, | ||
| 97 | .capabilities = (V4L2_CAP_VIDEO_CAPTURE | | ||
| 98 | V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | | ||
| 99 | V4L2_CAP_READWRITE), | ||
| 100 | }; | ||
| 101 | |||
| 102 | static struct v4l2_fmtdesc pvr_fmtdesc [] = { | 92 | static struct v4l2_fmtdesc pvr_fmtdesc [] = { |
| 103 | { | 93 | { |
| 104 | .index = 0, | 94 | .index = 0, |
| @@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability * | |||
| 160 | struct pvr2_v4l2_fh *fh = file->private_data; | 150 | struct pvr2_v4l2_fh *fh = file->private_data; |
| 161 | struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; | 151 | struct pvr2_hdw *hdw = fh->channel.mc_head->hdw; |
| 162 | 152 | ||
| 163 | memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability)); | 153 | strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver)); |
| 164 | strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw), | 154 | strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw), |
| 165 | sizeof(cap->bus_info)); | 155 | sizeof(cap->bus_info)); |
| 166 | strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card)); | 156 | strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card)); |
| 157 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | | ||
| 158 | V4L2_CAP_AUDIO | V4L2_CAP_RADIO | | ||
| 159 | V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS; | ||
| 160 | switch (fh->pdi->devbase.vfl_type) { | ||
| 161 | case VFL_TYPE_GRABBER: | ||
| 162 | cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO; | ||
| 163 | break; | ||
| 164 | case VFL_TYPE_RADIO: | ||
| 165 | cap->device_caps = V4L2_CAP_RADIO; | ||
| 166 | break; | ||
| 167 | } | ||
| 168 | cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE; | ||
| 167 | return 0; | 169 | return 0; |
| 168 | } | 170 | } |
| 169 | 171 | ||
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index d09a8916e940..bc08a829bc13 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
| @@ -3146,27 +3146,26 @@ static int vb2_thread(void *data) | |||
| 3146 | prequeue--; | 3146 | prequeue--; |
| 3147 | } else { | 3147 | } else { |
| 3148 | call_void_qop(q, wait_finish, q); | 3148 | call_void_qop(q, wait_finish, q); |
| 3149 | ret = vb2_internal_dqbuf(q, &fileio->b, 0); | 3149 | if (!threadio->stop) |
| 3150 | ret = vb2_internal_dqbuf(q, &fileio->b, 0); | ||
| 3150 | call_void_qop(q, wait_prepare, q); | 3151 | call_void_qop(q, wait_prepare, q); |
| 3151 | dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); | 3152 | dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); |
| 3152 | } | 3153 | } |
| 3153 | if (threadio->stop) | 3154 | if (ret || threadio->stop) |
| 3154 | break; | ||
| 3155 | if (ret) | ||
| 3156 | break; | 3155 | break; |
| 3157 | try_to_freeze(); | 3156 | try_to_freeze(); |
| 3158 | 3157 | ||
| 3159 | vb = q->bufs[fileio->b.index]; | 3158 | vb = q->bufs[fileio->b.index]; |
| 3160 | if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR)) | 3159 | if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR)) |
| 3161 | ret = threadio->fnc(vb, threadio->priv); | 3160 | if (threadio->fnc(vb, threadio->priv)) |
| 3162 | if (ret) | 3161 | break; |
| 3163 | break; | ||
| 3164 | call_void_qop(q, wait_finish, q); | 3162 | call_void_qop(q, wait_finish, q); |
| 3165 | if (set_timestamp) | 3163 | if (set_timestamp) |
| 3166 | v4l2_get_timestamp(&fileio->b.timestamp); | 3164 | v4l2_get_timestamp(&fileio->b.timestamp); |
| 3167 | ret = vb2_internal_qbuf(q, &fileio->b); | 3165 | if (!threadio->stop) |
| 3166 | ret = vb2_internal_qbuf(q, &fileio->b); | ||
| 3168 | call_void_qop(q, wait_prepare, q); | 3167 | call_void_qop(q, wait_prepare, q); |
| 3169 | if (ret) | 3168 | if (ret || threadio->stop) |
| 3170 | break; | 3169 | break; |
| 3171 | } | 3170 | } |
| 3172 | 3171 | ||
| @@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q) | |||
| 3235 | threadio->stop = true; | 3234 | threadio->stop = true; |
| 3236 | vb2_internal_streamoff(q, q->type); | 3235 | vb2_internal_streamoff(q, q->type); |
| 3237 | call_void_qop(q, wait_prepare, q); | 3236 | call_void_qop(q, wait_prepare, q); |
| 3237 | err = kthread_stop(threadio->thread); | ||
| 3238 | q->fileio = NULL; | 3238 | q->fileio = NULL; |
| 3239 | fileio->req.count = 0; | 3239 | fileio->req.count = 0; |
| 3240 | vb2_reqbufs(q, &fileio->req); | 3240 | vb2_reqbufs(q, &fileio->req); |
| 3241 | kfree(fileio); | 3241 | kfree(fileio); |
| 3242 | err = kthread_stop(threadio->thread); | ||
| 3243 | threadio->thread = NULL; | 3242 | threadio->thread = NULL; |
| 3244 | kfree(threadio); | 3243 | kfree(threadio); |
| 3245 | q->fileio = NULL; | 3244 | q->fileio = NULL; |
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c index 52a0c2f6264f..ae498b53ee40 100644 --- a/drivers/mfd/da9052-core.c +++ b/drivers/mfd/da9052-core.c | |||
| @@ -554,7 +554,8 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id) | |||
| 554 | return ret; | 554 | return ret; |
| 555 | } | 555 | } |
| 556 | 556 | ||
| 557 | ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info, | 557 | ret = mfd_add_devices(da9052->dev, PLATFORM_DEVID_AUTO, |
| 558 | da9052_subdev_info, | ||
| 558 | ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL); | 559 | ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL); |
| 559 | if (ret) { | 560 | if (ret) { |
| 560 | dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret); | 561 | dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret); |
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index dbdd0faeb6ce..210d1f85679e 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c | |||
| @@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) | |||
| 681 | #ifdef CONFIG_PM | 681 | #ifdef CONFIG_PM |
| 682 | static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) | 682 | static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) |
| 683 | { | 683 | { |
| 684 | struct rtsx_ucr *ucr = | ||
| 685 | (struct rtsx_ucr *)usb_get_intfdata(intf); | ||
| 686 | |||
| 687 | dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n", | 684 | dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n", |
| 688 | __func__, message.event); | 685 | __func__, message.event); |
| 689 | 686 | ||
| 690 | /* | ||
| 691 | * Call to make sure LED is off during suspend to save more power. | ||
| 692 | * It is NOT a permanent state and could be turned on anytime later. | ||
| 693 | * Thus no need to call turn_on when resunming. | ||
| 694 | */ | ||
| 695 | mutex_lock(&ucr->dev_mutex); | ||
| 696 | rtsx_usb_turn_off_led(ucr); | ||
| 697 | mutex_unlock(&ucr->dev_mutex); | ||
| 698 | |||
| 699 | return 0; | 687 | return 0; |
| 700 | } | 688 | } |
| 701 | 689 | ||
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index 0d256cb002eb..d6b764349f9d 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c | |||
| @@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg, | |||
| 125 | } | 125 | } |
| 126 | EXPORT_SYMBOL_GPL(tps65218_clear_bits); | 126 | EXPORT_SYMBOL_GPL(tps65218_clear_bits); |
| 127 | 127 | ||
| 128 | static const struct regmap_range tps65218_yes_ranges[] = { | ||
| 129 | regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2), | ||
| 130 | regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS), | ||
| 131 | }; | ||
| 132 | |||
| 133 | static const struct regmap_access_table tps65218_volatile_table = { | ||
| 134 | .yes_ranges = tps65218_yes_ranges, | ||
| 135 | .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges), | ||
| 136 | }; | ||
| 137 | |||
| 128 | static struct regmap_config tps65218_regmap_config = { | 138 | static struct regmap_config tps65218_regmap_config = { |
| 129 | .reg_bits = 8, | 139 | .reg_bits = 8, |
| 130 | .val_bits = 8, | 140 | .val_bits = 8, |
| 131 | .cache_type = REGCACHE_RBTREE, | 141 | .cache_type = REGCACHE_RBTREE, |
| 142 | .volatile_table = &tps65218_volatile_table, | ||
| 132 | }; | 143 | }; |
| 133 | 144 | ||
| 134 | static const struct regmap_irq tps65218_irqs[] = { | 145 | static const struct regmap_irq tps65218_irqs[] = { |
| @@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = { | |||
| 193 | 204 | ||
| 194 | .num_regs = 2, | 205 | .num_regs = 2, |
| 195 | .mask_base = TPS65218_REG_INT_MASK1, | 206 | .mask_base = TPS65218_REG_INT_MASK1, |
| 207 | .status_base = TPS65218_REG_INT1, | ||
| 196 | }; | 208 | }; |
| 197 | 209 | ||
| 198 | static const struct of_device_id of_tps65218_match_table[] = { | 210 | static const struct of_device_id of_tps65218_match_table[] = { |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index f94a9fa60488..c672c4dcffac 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
| @@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev) | |||
| 615 | 615 | ||
| 616 | c_can_irq_control(priv, false); | 616 | c_can_irq_control(priv, false); |
| 617 | 617 | ||
| 618 | /* put ctrl to init on stop to end ongoing transmission */ | ||
| 619 | priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT); | ||
| 620 | |||
| 618 | /* deactivate pins */ | 621 | /* deactivate pins */ |
| 619 | pinctrl_pm_select_sleep_state(dev->dev.parent); | 622 | pinctrl_pm_select_sleep_state(dev->dev.parent); |
| 620 | priv->can.state = CAN_STATE_STOPPED; | 623 | priv->can.state = CAN_STATE_STOPPED; |
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index f363972cd77d..e36d10520e24 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c | |||
| @@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable) | |||
| 103 | mask = 1 << raminit->bits.start | 1 << raminit->bits.done; | 103 | mask = 1 << raminit->bits.start | 1 << raminit->bits.done; |
| 104 | regmap_read(raminit->syscon, raminit->reg, &ctrl); | 104 | regmap_read(raminit->syscon, raminit->reg, &ctrl); |
| 105 | 105 | ||
| 106 | /* We clear the done and start bit first. The start bit is | 106 | /* We clear the start bit first. The start bit is |
| 107 | * looking at the 0 -> transition, but is not self clearing; | 107 | * looking at the 0 -> transition, but is not self clearing; |
| 108 | * And we clear the init done bit as well. | ||
| 109 | * NOTE: DONE must be written with 1 to clear it. | 108 | * NOTE: DONE must be written with 1 to clear it. |
| 109 | * We can't clear the DONE bit here using regmap_update_bits() | ||
| 110 | * as it will bypass the write if initial condition is START:0 DONE:1 | ||
| 111 | * e.g. on DRA7 which needs START pulse. | ||
| 110 | */ | 112 | */ |
| 111 | ctrl &= ~(1 << raminit->bits.start); | 113 | ctrl &= ~mask; /* START = 0, DONE = 0 */ |
| 112 | ctrl |= 1 << raminit->bits.done; | 114 | regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); |
| 113 | regmap_write(raminit->syscon, raminit->reg, ctrl); | ||
| 114 | 115 | ||
| 115 | ctrl &= ~(1 << raminit->bits.done); | 116 | /* check if START bit is 0. Ignore DONE bit for now |
| 116 | c_can_hw_raminit_wait_syscon(priv, mask, ctrl); | 117 | * as it can be either 0 or 1. |
| 118 | */ | ||
| 119 | c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl); | ||
| 117 | 120 | ||
| 118 | if (enable) { | 121 | if (enable) { |
| 119 | /* Set start bit and wait for the done bit. */ | 122 | /* Clear DONE bit & set START bit. */ |
| 120 | ctrl |= 1 << raminit->bits.start; | 123 | ctrl |= 1 << raminit->bits.start; |
| 121 | regmap_write(raminit->syscon, raminit->reg, ctrl); | 124 | /* DONE must be written with 1 to clear it */ |
| 122 | 125 | ctrl |= 1 << raminit->bits.done; | |
| 126 | regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); | ||
| 127 | /* prevent further clearing of DONE bit */ | ||
| 128 | ctrl &= ~(1 << raminit->bits.done); | ||
| 123 | /* clear START bit if start pulse is needed */ | 129 | /* clear START bit if start pulse is needed */ |
| 124 | if (raminit->needs_pulse) { | 130 | if (raminit->needs_pulse) { |
| 125 | ctrl &= ~(1 << raminit->bits.start); | 131 | ctrl &= ~(1 << raminit->bits.start); |
| 126 | regmap_write(raminit->syscon, raminit->reg, ctrl); | 132 | regmap_update_bits(raminit->syscon, raminit->reg, |
| 133 | mask, ctrl); | ||
| 127 | } | 134 | } |
| 128 | 135 | ||
| 129 | ctrl |= 1 << raminit->bits.done; | 136 | ctrl |= 1 << raminit->bits.done; |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3ec8f6f25e5f..847c1f813261 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
| @@ -807,10 +807,14 @@ static int can_changelink(struct net_device *dev, | |||
| 807 | if (dev->flags & IFF_UP) | 807 | if (dev->flags & IFF_UP) |
| 808 | return -EBUSY; | 808 | return -EBUSY; |
| 809 | cm = nla_data(data[IFLA_CAN_CTRLMODE]); | 809 | cm = nla_data(data[IFLA_CAN_CTRLMODE]); |
| 810 | if (cm->flags & ~priv->ctrlmode_supported) | 810 | |
| 811 | /* check whether changed bits are allowed to be modified */ | ||
| 812 | if (cm->mask & ~priv->ctrlmode_supported) | ||
| 811 | return -EOPNOTSUPP; | 813 | return -EOPNOTSUPP; |
| 814 | |||
| 815 | /* clear bits to be modified and copy the flag values */ | ||
| 812 | priv->ctrlmode &= ~cm->mask; | 816 | priv->ctrlmode &= ~cm->mask; |
| 813 | priv->ctrlmode |= cm->flags; | 817 | priv->ctrlmode |= (cm->flags & cm->mask); |
| 814 | 818 | ||
| 815 | /* CAN_CTRLMODE_FD can only be set when driver supports FD */ | 819 | /* CAN_CTRLMODE_FD can only be set when driver supports FD */ |
| 816 | if (priv->ctrlmode & CAN_CTRLMODE_FD) | 820 | if (priv->ctrlmode & CAN_CTRLMODE_FD) |
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index d7bc462aafdc..244529881be9 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c | |||
| @@ -955,6 +955,11 @@ static struct net_device *alloc_m_can_dev(void) | |||
| 955 | priv->can.data_bittiming_const = &m_can_data_bittiming_const; | 955 | priv->can.data_bittiming_const = &m_can_data_bittiming_const; |
| 956 | priv->can.do_set_mode = m_can_set_mode; | 956 | priv->can.do_set_mode = m_can_set_mode; |
| 957 | priv->can.do_get_berr_counter = m_can_get_berr_counter; | 957 | priv->can.do_get_berr_counter = m_can_get_berr_counter; |
| 958 | |||
| 959 | /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */ | ||
| 960 | priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; | ||
| 961 | |||
| 962 | /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */ | ||
| 958 | priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | | 963 | priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | |
| 959 | CAN_CTRLMODE_LISTENONLY | | 964 | CAN_CTRLMODE_LISTENONLY | |
| 960 | CAN_CTRLMODE_BERR_REPORTING | | 965 | CAN_CTRLMODE_BERR_REPORTING | |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 541fb7a05625..7af379ca861b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
| @@ -520,10 +520,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
| 520 | skb = alloc_can_err_skb(priv->netdev, &cf); | 520 | skb = alloc_can_err_skb(priv->netdev, &cf); |
| 521 | if (skb) { | 521 | if (skb) { |
| 522 | cf->can_id |= CAN_ERR_RESTARTED; | 522 | cf->can_id |= CAN_ERR_RESTARTED; |
| 523 | netif_rx(skb); | ||
| 524 | 523 | ||
| 525 | stats->rx_packets++; | 524 | stats->rx_packets++; |
| 526 | stats->rx_bytes += cf->can_dlc; | 525 | stats->rx_bytes += cf->can_dlc; |
| 526 | netif_rx(skb); | ||
| 527 | } else { | 527 | } else { |
| 528 | netdev_err(priv->netdev, | 528 | netdev_err(priv->netdev, |
| 529 | "No memory left for err_skb\n"); | 529 | "No memory left for err_skb\n"); |
| @@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, | |||
| 587 | usb_sndbulkpipe(dev->udev, | 587 | usb_sndbulkpipe(dev->udev, |
| 588 | dev->bulk_out->bEndpointAddress), | 588 | dev->bulk_out->bEndpointAddress), |
| 589 | buf, msg->len, | 589 | buf, msg->len, |
| 590 | kvaser_usb_simple_msg_callback, priv); | 590 | kvaser_usb_simple_msg_callback, netdev); |
| 591 | usb_anchor_urb(urb, &priv->tx_submitted); | 591 | usb_anchor_urb(urb, &priv->tx_submitted); |
| 592 | 592 | ||
| 593 | err = usb_submit_urb(urb, GFP_ATOMIC); | 593 | err = usb_submit_urb(urb, GFP_ATOMIC); |
| @@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
| 662 | priv = dev->nets[channel]; | 662 | priv = dev->nets[channel]; |
| 663 | stats = &priv->netdev->stats; | 663 | stats = &priv->netdev->stats; |
| 664 | 664 | ||
| 665 | if (status & M16C_STATE_BUS_RESET) { | ||
| 666 | kvaser_usb_unlink_tx_urbs(priv); | ||
| 667 | return; | ||
| 668 | } | ||
| 669 | |||
| 670 | skb = alloc_can_err_skb(priv->netdev, &cf); | 665 | skb = alloc_can_err_skb(priv->netdev, &cf); |
| 671 | if (!skb) { | 666 | if (!skb) { |
| 672 | stats->rx_dropped++; | 667 | stats->rx_dropped++; |
| @@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
| 677 | 672 | ||
| 678 | netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); | 673 | netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); |
| 679 | 674 | ||
| 680 | if (status & M16C_STATE_BUS_OFF) { | 675 | if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { |
| 681 | cf->can_id |= CAN_ERR_BUSOFF; | 676 | cf->can_id |= CAN_ERR_BUSOFF; |
| 682 | 677 | ||
| 683 | priv->can.can_stats.bus_off++; | 678 | priv->can.can_stats.bus_off++; |
| @@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
| 703 | } | 698 | } |
| 704 | 699 | ||
| 705 | new_state = CAN_STATE_ERROR_PASSIVE; | 700 | new_state = CAN_STATE_ERROR_PASSIVE; |
| 706 | } | 701 | } else if (status & M16C_STATE_BUS_ERROR) { |
| 707 | |||
| 708 | if (status == M16C_STATE_BUS_ERROR) { | ||
| 709 | if ((priv->can.state < CAN_STATE_ERROR_WARNING) && | 702 | if ((priv->can.state < CAN_STATE_ERROR_WARNING) && |
| 710 | ((txerr >= 96) || (rxerr >= 96))) { | 703 | ((txerr >= 96) || (rxerr >= 96))) { |
| 711 | cf->can_id |= CAN_ERR_CRTL; | 704 | cf->can_id |= CAN_ERR_CRTL; |
| @@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
| 715 | 708 | ||
| 716 | priv->can.can_stats.error_warning++; | 709 | priv->can.can_stats.error_warning++; |
| 717 | new_state = CAN_STATE_ERROR_WARNING; | 710 | new_state = CAN_STATE_ERROR_WARNING; |
| 718 | } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { | 711 | } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) && |
| 712 | ((txerr < 96) && (rxerr < 96))) { | ||
| 719 | cf->can_id |= CAN_ERR_PROT; | 713 | cf->can_id |= CAN_ERR_PROT; |
| 720 | cf->data[2] = CAN_ERR_PROT_ACTIVE; | 714 | cf->data[2] = CAN_ERR_PROT_ACTIVE; |
| 721 | 715 | ||
| @@ -770,10 +764,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
| 770 | 764 | ||
| 771 | priv->can.state = new_state; | 765 | priv->can.state = new_state; |
| 772 | 766 | ||
| 773 | netif_rx(skb); | ||
| 774 | |||
| 775 | stats->rx_packets++; | 767 | stats->rx_packets++; |
| 776 | stats->rx_bytes += cf->can_dlc; | 768 | stats->rx_bytes += cf->can_dlc; |
| 769 | netif_rx(skb); | ||
| 777 | } | 770 | } |
| 778 | 771 | ||
| 779 | static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, | 772 | static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, |
| @@ -805,10 +798,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, | |||
| 805 | stats->rx_over_errors++; | 798 | stats->rx_over_errors++; |
| 806 | stats->rx_errors++; | 799 | stats->rx_errors++; |
| 807 | 800 | ||
| 808 | netif_rx(skb); | ||
| 809 | |||
| 810 | stats->rx_packets++; | 801 | stats->rx_packets++; |
| 811 | stats->rx_bytes += cf->can_dlc; | 802 | stats->rx_bytes += cf->can_dlc; |
| 803 | netif_rx(skb); | ||
| 812 | } | 804 | } |
| 813 | } | 805 | } |
| 814 | 806 | ||
| @@ -887,10 +879,9 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, | |||
| 887 | cf->can_dlc); | 879 | cf->can_dlc); |
| 888 | } | 880 | } |
| 889 | 881 | ||
| 890 | netif_rx(skb); | ||
| 891 | |||
| 892 | stats->rx_packets++; | 882 | stats->rx_packets++; |
| 893 | stats->rx_bytes += cf->can_dlc; | 883 | stats->rx_bytes += cf->can_dlc; |
| 884 | netif_rx(skb); | ||
| 894 | } | 885 | } |
| 895 | 886 | ||
| 896 | static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, | 887 | static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, |
| @@ -1246,6 +1237,9 @@ static int kvaser_usb_close(struct net_device *netdev) | |||
| 1246 | if (err) | 1237 | if (err) |
| 1247 | netdev_warn(netdev, "Cannot stop device, error %d\n", err); | 1238 | netdev_warn(netdev, "Cannot stop device, error %d\n", err); |
| 1248 | 1239 | ||
| 1240 | /* reset tx contexts */ | ||
| 1241 | kvaser_usb_unlink_tx_urbs(priv); | ||
| 1242 | |||
| 1249 | priv->can.state = CAN_STATE_STOPPED; | 1243 | priv->can.state = CAN_STATE_STOPPED; |
| 1250 | close_candev(priv->netdev); | 1244 | close_candev(priv->netdev); |
| 1251 | 1245 | ||
| @@ -1294,12 +1288,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1294 | if (!urb) { | 1288 | if (!urb) { |
| 1295 | netdev_err(netdev, "No memory left for URBs\n"); | 1289 | netdev_err(netdev, "No memory left for URBs\n"); |
| 1296 | stats->tx_dropped++; | 1290 | stats->tx_dropped++; |
| 1297 | goto nourbmem; | 1291 | dev_kfree_skb(skb); |
| 1292 | return NETDEV_TX_OK; | ||
| 1298 | } | 1293 | } |
| 1299 | 1294 | ||
| 1300 | buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); | 1295 | buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); |
| 1301 | if (!buf) { | 1296 | if (!buf) { |
| 1302 | stats->tx_dropped++; | 1297 | stats->tx_dropped++; |
| 1298 | dev_kfree_skb(skb); | ||
| 1303 | goto nobufmem; | 1299 | goto nobufmem; |
| 1304 | } | 1300 | } |
| 1305 | 1301 | ||
| @@ -1334,6 +1330,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1334 | } | 1330 | } |
| 1335 | } | 1331 | } |
| 1336 | 1332 | ||
| 1333 | /* This should never happen; it implies a flow control bug */ | ||
| 1337 | if (!context) { | 1334 | if (!context) { |
| 1338 | netdev_warn(netdev, "cannot find free context\n"); | 1335 | netdev_warn(netdev, "cannot find free context\n"); |
| 1339 | ret = NETDEV_TX_BUSY; | 1336 | ret = NETDEV_TX_BUSY; |
| @@ -1364,9 +1361,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
| 1364 | if (unlikely(err)) { | 1361 | if (unlikely(err)) { |
| 1365 | can_free_echo_skb(netdev, context->echo_index); | 1362 | can_free_echo_skb(netdev, context->echo_index); |
| 1366 | 1363 | ||
| 1367 | skb = NULL; /* set to NULL to avoid double free in | ||
| 1368 | * dev_kfree_skb(skb) */ | ||
| 1369 | |||
| 1370 | atomic_dec(&priv->active_tx_urbs); | 1364 | atomic_dec(&priv->active_tx_urbs); |
| 1371 | usb_unanchor_urb(urb); | 1365 | usb_unanchor_urb(urb); |
| 1372 | 1366 | ||
| @@ -1388,8 +1382,6 @@ releasebuf: | |||
| 1388 | kfree(buf); | 1382 | kfree(buf); |
| 1389 | nobufmem: | 1383 | nobufmem: |
| 1390 | usb_free_urb(urb); | 1384 | usb_free_urb(urb); |
| 1391 | nourbmem: | ||
| 1392 | dev_kfree_skb(skb); | ||
| 1393 | return ret; | 1385 | return ret; |
| 1394 | } | 1386 | } |
| 1395 | 1387 | ||
| @@ -1502,6 +1494,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf, | |||
| 1502 | struct kvaser_usb_net_priv *priv; | 1494 | struct kvaser_usb_net_priv *priv; |
| 1503 | int i, err; | 1495 | int i, err; |
| 1504 | 1496 | ||
| 1497 | err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); | ||
| 1498 | if (err) | ||
| 1499 | return err; | ||
| 1500 | |||
| 1505 | netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); | 1501 | netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); |
| 1506 | if (!netdev) { | 1502 | if (!netdev) { |
| 1507 | dev_err(&intf->dev, "Cannot alloc candev\n"); | 1503 | dev_err(&intf->dev, "Cannot alloc candev\n"); |
| @@ -1588,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
| 1588 | { | 1584 | { |
| 1589 | struct kvaser_usb *dev; | 1585 | struct kvaser_usb *dev; |
| 1590 | int err = -ENOMEM; | 1586 | int err = -ENOMEM; |
| 1591 | int i; | 1587 | int i, retry = 3; |
| 1592 | 1588 | ||
| 1593 | dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); | 1589 | dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); |
| 1594 | if (!dev) | 1590 | if (!dev) |
| @@ -1606,10 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
| 1606 | 1602 | ||
| 1607 | usb_set_intfdata(intf, dev); | 1603 | usb_set_intfdata(intf, dev); |
| 1608 | 1604 | ||
| 1609 | for (i = 0; i < MAX_NET_DEVICES; i++) | 1605 | /* On some x86 laptops, plugging a Kvaser device again after |
| 1610 | kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i); | 1606 | * an unplug makes the firmware always ignore the very first |
| 1607 | * command. For such a case, provide some room for retries | ||
| 1608 | * instead of completely exiting the driver. | ||
| 1609 | */ | ||
| 1610 | do { | ||
| 1611 | err = kvaser_usb_get_software_info(dev); | ||
| 1612 | } while (--retry && err == -ETIMEDOUT); | ||
| 1611 | 1613 | ||
| 1612 | err = kvaser_usb_get_software_info(dev); | ||
| 1613 | if (err) { | 1614 | if (err) { |
| 1614 | dev_err(&intf->dev, | 1615 | dev_err(&intf->dev, |
| 1615 | "Cannot get software infos, error %d\n", err); | 1616 | "Cannot get software infos, error %d\n", err); |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 75b08c63d39f..29a09271b64a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | |||
| @@ -767,16 +767,17 @@ | |||
| 767 | #define MTL_Q_RQOMR 0x40 | 767 | #define MTL_Q_RQOMR 0x40 |
| 768 | #define MTL_Q_RQMPOCR 0x44 | 768 | #define MTL_Q_RQMPOCR 0x44 |
| 769 | #define MTL_Q_RQDR 0x4c | 769 | #define MTL_Q_RQDR 0x4c |
| 770 | #define MTL_Q_RQFCR 0x50 | ||
| 770 | #define MTL_Q_IER 0x70 | 771 | #define MTL_Q_IER 0x70 |
| 771 | #define MTL_Q_ISR 0x74 | 772 | #define MTL_Q_ISR 0x74 |
| 772 | 773 | ||
| 773 | /* MTL queue register entry bit positions and sizes */ | 774 | /* MTL queue register entry bit positions and sizes */ |
| 775 | #define MTL_Q_RQFCR_RFA_INDEX 1 | ||
| 776 | #define MTL_Q_RQFCR_RFA_WIDTH 6 | ||
| 777 | #define MTL_Q_RQFCR_RFD_INDEX 17 | ||
| 778 | #define MTL_Q_RQFCR_RFD_WIDTH 6 | ||
| 774 | #define MTL_Q_RQOMR_EHFC_INDEX 7 | 779 | #define MTL_Q_RQOMR_EHFC_INDEX 7 |
| 775 | #define MTL_Q_RQOMR_EHFC_WIDTH 1 | 780 | #define MTL_Q_RQOMR_EHFC_WIDTH 1 |
| 776 | #define MTL_Q_RQOMR_RFA_INDEX 8 | ||
| 777 | #define MTL_Q_RQOMR_RFA_WIDTH 3 | ||
| 778 | #define MTL_Q_RQOMR_RFD_INDEX 13 | ||
| 779 | #define MTL_Q_RQOMR_RFD_WIDTH 3 | ||
| 780 | #define MTL_Q_RQOMR_RQS_INDEX 16 | 781 | #define MTL_Q_RQOMR_RQS_INDEX 16 |
| 781 | #define MTL_Q_RQOMR_RQS_WIDTH 9 | 782 | #define MTL_Q_RQOMR_RQS_WIDTH 9 |
| 782 | #define MTL_Q_RQOMR_RSF_INDEX 5 | 783 | #define MTL_Q_RQOMR_RSF_INDEX 5 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 53f5f66ec2ee..4c66cd1d1e60 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
| @@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) | |||
| 2079 | 2079 | ||
| 2080 | for (i = 0; i < pdata->rx_q_count; i++) { | 2080 | for (i = 0; i < pdata->rx_q_count; i++) { |
| 2081 | /* Activate flow control when less than 4k left in fifo */ | 2081 | /* Activate flow control when less than 4k left in fifo */ |
| 2082 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); | 2082 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); |
| 2083 | 2083 | ||
| 2084 | /* De-activate flow control when more than 6k left in fifo */ | 2084 | /* De-activate flow control when more than 6k left in fifo */ |
| 2085 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); | 2085 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); |
| 2086 | } | 2086 | } |
| 2087 | } | 2087 | } |
| 2088 | 2088 | ||
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 05c6af6c418f..3007d95fbb9f 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
| @@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight) | |||
| 1167 | bgmac->int_status = 0; | 1167 | bgmac->int_status = 0; |
| 1168 | } | 1168 | } |
| 1169 | 1169 | ||
| 1170 | if (handled < weight) | 1170 | if (handled < weight) { |
| 1171 | napi_complete(napi); | 1171 | napi_complete(napi); |
| 1172 | 1172 | bgmac_chip_intrs_on(bgmac); | |
| 1173 | bgmac_chip_intrs_on(bgmac); | 1173 | } |
| 1174 | 1174 | ||
| 1175 | return handled; | 1175 | return handled; |
| 1176 | } | 1176 | } |
| @@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core) | |||
| 1515 | if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) | 1515 | if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) |
| 1516 | bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); | 1516 | bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); |
| 1517 | 1517 | ||
| 1518 | netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); | ||
| 1519 | |||
| 1518 | err = bgmac_mii_register(bgmac); | 1520 | err = bgmac_mii_register(bgmac); |
| 1519 | if (err) { | 1521 | if (err) { |
| 1520 | bgmac_err(bgmac, "Cannot register MDIO\n"); | 1522 | bgmac_err(bgmac, "Cannot register MDIO\n"); |
| @@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core) | |||
| 1529 | 1531 | ||
| 1530 | netif_carrier_off(net_dev); | 1532 | netif_carrier_off(net_dev); |
| 1531 | 1533 | ||
| 1532 | netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); | ||
| 1533 | |||
| 1534 | return 0; | 1534 | return 0; |
| 1535 | 1535 | ||
| 1536 | err_mii_unregister: | 1536 | err_mii_unregister: |
| @@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core) | |||
| 1549 | { | 1549 | { |
| 1550 | struct bgmac *bgmac = bcma_get_drvdata(core); | 1550 | struct bgmac *bgmac = bcma_get_drvdata(core); |
| 1551 | 1551 | ||
| 1552 | netif_napi_del(&bgmac->napi); | ||
| 1553 | unregister_netdev(bgmac->net_dev); | 1552 | unregister_netdev(bgmac->net_dev); |
| 1554 | bgmac_mii_unregister(bgmac); | 1553 | bgmac_mii_unregister(bgmac); |
| 1554 | netif_napi_del(&bgmac->napi); | ||
| 1555 | bgmac_dma_free(bgmac); | 1555 | bgmac_dma_free(bgmac); |
| 1556 | bcma_set_drvdata(core, NULL); | 1556 | bcma_set_drvdata(core, NULL); |
| 1557 | free_netdev(bgmac->net_dev); | 1557 | free_netdev(bgmac->net_dev); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1d1147c93d59..e468ed3f210f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) | |||
| 3175 | } | 3175 | } |
| 3176 | #endif | 3176 | #endif |
| 3177 | if (!bnx2x_fp_lock_napi(fp)) | 3177 | if (!bnx2x_fp_lock_napi(fp)) |
| 3178 | return work_done; | 3178 | return budget; |
| 3179 | 3179 | ||
| 3180 | for_each_cos_in_tx_queue(fp, cos) | 3180 | for_each_cos_in_tx_queue(fp, cos) |
| 3181 | if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) | 3181 | if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b29e027c476e..e356afa44e7d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
| @@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
| 1335 | int err; | 1335 | int err; |
| 1336 | 1336 | ||
| 1337 | if (!enic_poll_lock_napi(&enic->rq[rq])) | 1337 | if (!enic_poll_lock_napi(&enic->rq[rq])) |
| 1338 | return work_done; | 1338 | return budget; |
| 1339 | /* Service RQ | 1339 | /* Service RQ |
| 1340 | */ | 1340 | */ |
| 1341 | 1341 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 41a0a5498da7..d48806b5cd88 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -4383,8 +4383,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
| 4383 | * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload | 4383 | * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload |
| 4384 | * is expected to work across all types of IP tunnels once exported. Skyhawk | 4384 | * is expected to work across all types of IP tunnels once exported. Skyhawk |
| 4385 | * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN | 4385 | * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN |
| 4386 | * offloads in hw_enc_features only when a VxLAN port is added. Note this only | 4386 | * offloads in hw_enc_features only when a VxLAN port is added. If other (non |
| 4387 | * ensures that other tunnels work fine while VxLAN offloads are not enabled. | 4387 | * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for |
| 4388 | * those other tunnels are unexported on the fly through ndo_features_check(). | ||
| 4388 | * | 4389 | * |
| 4389 | * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack | 4390 | * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack |
| 4390 | * adds more than one port, disable offloads and don't re-enable them again | 4391 | * adds more than one port, disable offloads and don't re-enable them again |
| @@ -4463,7 +4464,41 @@ static netdev_features_t be_features_check(struct sk_buff *skb, | |||
| 4463 | struct net_device *dev, | 4464 | struct net_device *dev, |
| 4464 | netdev_features_t features) | 4465 | netdev_features_t features) |
| 4465 | { | 4466 | { |
| 4466 | return vxlan_features_check(skb, features); | 4467 | struct be_adapter *adapter = netdev_priv(dev); |
| 4468 | u8 l4_hdr = 0; | ||
| 4469 | |||
| 4470 | /* The code below restricts offload features for some tunneled packets. | ||
| 4471 | * Offload features for normal (non tunnel) packets are unchanged. | ||
| 4472 | */ | ||
| 4473 | if (!skb->encapsulation || | ||
| 4474 | !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) | ||
| 4475 | return features; | ||
| 4476 | |||
| 4477 | /* It's an encapsulated packet and VxLAN offloads are enabled. We | ||
| 4478 | * should disable tunnel offload features if it's not a VxLAN packet, | ||
| 4479 | * as tunnel offloads have been enabled only for VxLAN. This is done to | ||
| 4480 | * allow other tunneled traffic like GRE work fine while VxLAN | ||
| 4481 | * offloads are configured in Skyhawk-R. | ||
| 4482 | */ | ||
| 4483 | switch (vlan_get_protocol(skb)) { | ||
| 4484 | case htons(ETH_P_IP): | ||
| 4485 | l4_hdr = ip_hdr(skb)->protocol; | ||
| 4486 | break; | ||
| 4487 | case htons(ETH_P_IPV6): | ||
| 4488 | l4_hdr = ipv6_hdr(skb)->nexthdr; | ||
| 4489 | break; | ||
| 4490 | default: | ||
| 4491 | return features; | ||
| 4492 | } | ||
| 4493 | |||
| 4494 | if (l4_hdr != IPPROTO_UDP || | ||
| 4495 | skb->inner_protocol_type != ENCAP_TYPE_ETHER || | ||
| 4496 | skb->inner_protocol != htons(ETH_P_TEB) || | ||
| 4497 | skb_inner_mac_header(skb) - skb_transport_header(skb) != | ||
| 4498 | sizeof(struct udphdr) + sizeof(struct vxlanhdr)) | ||
| 4499 | return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); | ||
| 4500 | |||
| 4501 | return features; | ||
| 4467 | } | 4502 | } |
| 4468 | #endif | 4503 | #endif |
| 4469 | 4504 | ||
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a62fc38f045e..1c75829eb166 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
| @@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4"; | |||
| 192 | #define IS_TSO_HEADER(txq, addr) \ | 192 | #define IS_TSO_HEADER(txq, addr) \ |
| 193 | ((addr >= txq->tso_hdrs_dma) && \ | 193 | ((addr >= txq->tso_hdrs_dma) && \ |
| 194 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) | 194 | (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) |
| 195 | |||
| 196 | #define DESC_DMA_MAP_SINGLE 0 | ||
| 197 | #define DESC_DMA_MAP_PAGE 1 | ||
| 198 | |||
| 195 | /* | 199 | /* |
| 196 | * RX/TX descriptors. | 200 | * RX/TX descriptors. |
| 197 | */ | 201 | */ |
| @@ -362,6 +366,7 @@ struct tx_queue { | |||
| 362 | dma_addr_t tso_hdrs_dma; | 366 | dma_addr_t tso_hdrs_dma; |
| 363 | 367 | ||
| 364 | struct tx_desc *tx_desc_area; | 368 | struct tx_desc *tx_desc_area; |
| 369 | char *tx_desc_mapping; /* array to track the type of the dma mapping */ | ||
| 365 | dma_addr_t tx_desc_dma; | 370 | dma_addr_t tx_desc_dma; |
| 366 | int tx_desc_area_size; | 371 | int tx_desc_area_size; |
| 367 | 372 | ||
| @@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, | |||
| 750 | if (txq->tx_curr_desc == txq->tx_ring_size) | 755 | if (txq->tx_curr_desc == txq->tx_ring_size) |
| 751 | txq->tx_curr_desc = 0; | 756 | txq->tx_curr_desc = 0; |
| 752 | desc = &txq->tx_desc_area[tx_index]; | 757 | desc = &txq->tx_desc_area[tx_index]; |
| 758 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; | ||
| 753 | 759 | ||
| 754 | desc->l4i_chk = 0; | 760 | desc->l4i_chk = 0; |
| 755 | desc->byte_cnt = length; | 761 | desc->byte_cnt = length; |
| @@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
| 879 | skb_frag_t *this_frag; | 885 | skb_frag_t *this_frag; |
| 880 | int tx_index; | 886 | int tx_index; |
| 881 | struct tx_desc *desc; | 887 | struct tx_desc *desc; |
| 882 | void *addr; | ||
| 883 | 888 | ||
| 884 | this_frag = &skb_shinfo(skb)->frags[frag]; | 889 | this_frag = &skb_shinfo(skb)->frags[frag]; |
| 885 | addr = page_address(this_frag->page.p) + this_frag->page_offset; | ||
| 886 | tx_index = txq->tx_curr_desc++; | 890 | tx_index = txq->tx_curr_desc++; |
| 887 | if (txq->tx_curr_desc == txq->tx_ring_size) | 891 | if (txq->tx_curr_desc == txq->tx_ring_size) |
| 888 | txq->tx_curr_desc = 0; | 892 | txq->tx_curr_desc = 0; |
| 889 | desc = &txq->tx_desc_area[tx_index]; | 893 | desc = &txq->tx_desc_area[tx_index]; |
| 894 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE; | ||
| 890 | 895 | ||
| 891 | /* | 896 | /* |
| 892 | * The last fragment will generate an interrupt | 897 | * The last fragment will generate an interrupt |
| @@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) | |||
| 902 | 907 | ||
| 903 | desc->l4i_chk = 0; | 908 | desc->l4i_chk = 0; |
| 904 | desc->byte_cnt = skb_frag_size(this_frag); | 909 | desc->byte_cnt = skb_frag_size(this_frag); |
| 905 | desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, | 910 | desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, |
| 906 | desc->byte_cnt, DMA_TO_DEVICE); | 911 | this_frag, 0, desc->byte_cnt, |
| 912 | DMA_TO_DEVICE); | ||
| 907 | } | 913 | } |
| 908 | } | 914 | } |
| 909 | 915 | ||
| @@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb, | |||
| 936 | if (txq->tx_curr_desc == txq->tx_ring_size) | 942 | if (txq->tx_curr_desc == txq->tx_ring_size) |
| 937 | txq->tx_curr_desc = 0; | 943 | txq->tx_curr_desc = 0; |
| 938 | desc = &txq->tx_desc_area[tx_index]; | 944 | desc = &txq->tx_desc_area[tx_index]; |
| 945 | txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; | ||
| 939 | 946 | ||
| 940 | if (nr_frags) { | 947 | if (nr_frags) { |
| 941 | txq_submit_frag_skb(txq, skb); | 948 | txq_submit_frag_skb(txq, skb); |
| @@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
| 1047 | int tx_index; | 1054 | int tx_index; |
| 1048 | struct tx_desc *desc; | 1055 | struct tx_desc *desc; |
| 1049 | u32 cmd_sts; | 1056 | u32 cmd_sts; |
| 1057 | char desc_dma_map; | ||
| 1050 | 1058 | ||
| 1051 | tx_index = txq->tx_used_desc; | 1059 | tx_index = txq->tx_used_desc; |
| 1052 | desc = &txq->tx_desc_area[tx_index]; | 1060 | desc = &txq->tx_desc_area[tx_index]; |
| 1061 | desc_dma_map = txq->tx_desc_mapping[tx_index]; | ||
| 1062 | |||
| 1053 | cmd_sts = desc->cmd_sts; | 1063 | cmd_sts = desc->cmd_sts; |
| 1054 | 1064 | ||
| 1055 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { | 1065 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { |
| @@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
| 1065 | reclaimed++; | 1075 | reclaimed++; |
| 1066 | txq->tx_desc_count--; | 1076 | txq->tx_desc_count--; |
| 1067 | 1077 | ||
| 1068 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) | 1078 | if (!IS_TSO_HEADER(txq, desc->buf_ptr)) { |
| 1069 | dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, | 1079 | |
| 1070 | desc->byte_cnt, DMA_TO_DEVICE); | 1080 | if (desc_dma_map == DESC_DMA_MAP_PAGE) |
| 1081 | dma_unmap_page(mp->dev->dev.parent, | ||
| 1082 | desc->buf_ptr, | ||
| 1083 | desc->byte_cnt, | ||
| 1084 | DMA_TO_DEVICE); | ||
| 1085 | else | ||
| 1086 | dma_unmap_single(mp->dev->dev.parent, | ||
| 1087 | desc->buf_ptr, | ||
| 1088 | desc->byte_cnt, | ||
| 1089 | DMA_TO_DEVICE); | ||
| 1090 | } | ||
| 1071 | 1091 | ||
| 1072 | if (cmd_sts & TX_ENABLE_INTERRUPT) { | 1092 | if (cmd_sts & TX_ENABLE_INTERRUPT) { |
| 1073 | struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); | 1093 | struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); |
| @@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
| 1996 | struct tx_queue *txq = mp->txq + index; | 2016 | struct tx_queue *txq = mp->txq + index; |
| 1997 | struct tx_desc *tx_desc; | 2017 | struct tx_desc *tx_desc; |
| 1998 | int size; | 2018 | int size; |
| 2019 | int ret; | ||
| 1999 | int i; | 2020 | int i; |
| 2000 | 2021 | ||
| 2001 | txq->index = index; | 2022 | txq->index = index; |
| @@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
| 2048 | nexti * sizeof(struct tx_desc); | 2069 | nexti * sizeof(struct tx_desc); |
| 2049 | } | 2070 | } |
| 2050 | 2071 | ||
| 2072 | txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char), | ||
| 2073 | GFP_KERNEL); | ||
| 2074 | if (!txq->tx_desc_mapping) { | ||
| 2075 | ret = -ENOMEM; | ||
| 2076 | goto err_free_desc_area; | ||
| 2077 | } | ||
| 2078 | |||
| 2051 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ | 2079 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ |
| 2052 | txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, | 2080 | txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, |
| 2053 | txq->tx_ring_size * TSO_HEADER_SIZE, | 2081 | txq->tx_ring_size * TSO_HEADER_SIZE, |
| 2054 | &txq->tso_hdrs_dma, GFP_KERNEL); | 2082 | &txq->tso_hdrs_dma, GFP_KERNEL); |
| 2055 | if (txq->tso_hdrs == NULL) { | 2083 | if (txq->tso_hdrs == NULL) { |
| 2056 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | 2084 | ret = -ENOMEM; |
| 2057 | txq->tx_desc_area, txq->tx_desc_dma); | 2085 | goto err_free_desc_mapping; |
| 2058 | return -ENOMEM; | ||
| 2059 | } | 2086 | } |
| 2060 | skb_queue_head_init(&txq->tx_skb); | 2087 | skb_queue_head_init(&txq->tx_skb); |
| 2061 | 2088 | ||
| 2062 | return 0; | 2089 | return 0; |
| 2090 | |||
| 2091 | err_free_desc_mapping: | ||
| 2092 | kfree(txq->tx_desc_mapping); | ||
| 2093 | err_free_desc_area: | ||
| 2094 | if (index == 0 && size <= mp->tx_desc_sram_size) | ||
| 2095 | iounmap(txq->tx_desc_area); | ||
| 2096 | else | ||
| 2097 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | ||
| 2098 | txq->tx_desc_area, txq->tx_desc_dma); | ||
| 2099 | return ret; | ||
| 2063 | } | 2100 | } |
| 2064 | 2101 | ||
| 2065 | static void txq_deinit(struct tx_queue *txq) | 2102 | static void txq_deinit(struct tx_queue *txq) |
| @@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq) | |||
| 2077 | else | 2114 | else |
| 2078 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, | 2115 | dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, |
| 2079 | txq->tx_desc_area, txq->tx_desc_dma); | 2116 | txq->tx_desc_area, txq->tx_desc_dma); |
| 2117 | kfree(txq->tx_desc_mapping); | ||
| 2118 | |||
| 2080 | if (txq->tso_hdrs) | 2119 | if (txq->tso_hdrs) |
| 2081 | dma_free_coherent(mp->dev->dev.parent, | 2120 | dma_free_coherent(mp->dev->dev.parent, |
| 2082 | txq->tx_ring_size * TSO_HEADER_SIZE, | 2121 | txq->tx_ring_size * TSO_HEADER_SIZE, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d0d6dc1b8e46..ac6a8f1eea6c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad | |||
| 475 | { | 475 | { |
| 476 | int err; | 476 | int err; |
| 477 | 477 | ||
| 478 | if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) | 478 | if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || |
| 479 | priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) | ||
| 479 | return 0; /* do nothing */ | 480 | return 0; /* do nothing */ |
| 480 | 481 | ||
| 481 | err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, | 482 | err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 03e9eb0dc761..6e08352ec994 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -1744,8 +1744,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev, | |||
| 1744 | struct mlx4_dev_cap *dev_cap) | 1744 | struct mlx4_dev_cap *dev_cap) |
| 1745 | { | 1745 | { |
| 1746 | if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && | 1746 | if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && |
| 1747 | dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS && | 1747 | dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) |
| 1748 | dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) | ||
| 1749 | dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; | 1748 | dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; |
| 1750 | else | 1749 | else |
| 1751 | dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; | 1750 | dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; |
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index f5e4b820128b..db0c7a9aee60 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c | |||
| @@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp) | |||
| 6987 | if (sp->s2io_entries[i].in_use == MSIX_FLG) { | 6987 | if (sp->s2io_entries[i].in_use == MSIX_FLG) { |
| 6988 | if (sp->s2io_entries[i].type == | 6988 | if (sp->s2io_entries[i].type == |
| 6989 | MSIX_RING_TYPE) { | 6989 | MSIX_RING_TYPE) { |
| 6990 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | 6990 | snprintf(sp->desc[i], |
| 6991 | sizeof(sp->desc[i]), | ||
| 6992 | "%s:MSI-X-%d-RX", | ||
| 6991 | dev->name, i); | 6993 | dev->name, i); |
| 6992 | err = request_irq(sp->entries[i].vector, | 6994 | err = request_irq(sp->entries[i].vector, |
| 6993 | s2io_msix_ring_handle, | 6995 | s2io_msix_ring_handle, |
| @@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp) | |||
| 6996 | sp->s2io_entries[i].arg); | 6998 | sp->s2io_entries[i].arg); |
| 6997 | } else if (sp->s2io_entries[i].type == | 6999 | } else if (sp->s2io_entries[i].type == |
| 6998 | MSIX_ALARM_TYPE) { | 7000 | MSIX_ALARM_TYPE) { |
| 6999 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | 7001 | snprintf(sp->desc[i], |
| 7002 | sizeof(sp->desc[i]), | ||
| 7003 | "%s:MSI-X-%d-TX", | ||
| 7000 | dev->name, i); | 7004 | dev->name, i); |
| 7001 | err = request_irq(sp->entries[i].vector, | 7005 | err = request_irq(sp->entries[i].vector, |
| 7002 | s2io_msix_fifo_handle, | 7006 | s2io_msix_fifo_handle, |
| @@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 8154 | "%s: UDP Fragmentation Offload(UFO) enabled\n", | 8158 | "%s: UDP Fragmentation Offload(UFO) enabled\n", |
| 8155 | dev->name); | 8159 | dev->name); |
| 8156 | /* Initialize device name */ | 8160 | /* Initialize device name */ |
| 8157 | sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); | 8161 | snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, |
| 8162 | sp->product_name); | ||
| 8158 | 8163 | ||
| 8159 | if (vlan_tag_strip) | 8164 | if (vlan_tag_strip) |
| 8160 | sp->vlan_strip_flag = 1; | 8165 | sp->vlan_strip_flag = 1; |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 613037584d08..c531c8ae1be4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
| @@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget) | |||
| 2388 | 2388 | ||
| 2389 | work_done = netxen_process_rcv_ring(sds_ring, budget); | 2389 | work_done = netxen_process_rcv_ring(sds_ring, budget); |
| 2390 | 2390 | ||
| 2391 | if ((work_done < budget) && tx_complete) { | 2391 | if (!tx_complete) |
| 2392 | work_done = budget; | ||
| 2393 | |||
| 2394 | if (work_done < budget) { | ||
| 2392 | napi_complete(&sds_ring->napi); | 2395 | napi_complete(&sds_ring->napi); |
| 2393 | if (test_bit(__NX_DEV_UP, &adapter->state)) | 2396 | if (test_bit(__NX_DEV_UP, &adapter->state)) |
| 2394 | netxen_nic_enable_int(sds_ring); | 2397 | netxen_nic_enable_int(sds_ring); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 37583a9d8853..04283fe0e6a7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { | |||
| 396 | [TSU_ADRL31] = 0x01fc, | 396 | [TSU_ADRL31] = 0x01fc, |
| 397 | }; | 397 | }; |
| 398 | 398 | ||
| 399 | static void sh_eth_rcv_snd_disable(struct net_device *ndev); | ||
| 400 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); | ||
| 401 | |||
| 399 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) | 402 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) |
| 400 | { | 403 | { |
| 401 | return mdp->reg_offset == sh_eth_offset_gigabit; | 404 | return mdp->reg_offset == sh_eth_offset_gigabit; |
| @@ -498,6 +501,8 @@ static struct sh_eth_cpu_data r8a779x_data = { | |||
| 498 | EESR_ECI, | 501 | EESR_ECI, |
| 499 | .fdr_value = 0x00000f0f, | 502 | .fdr_value = 0x00000f0f, |
| 500 | 503 | ||
| 504 | .trscer_err_mask = DESC_I_RINT8, | ||
| 505 | |||
| 501 | .apr = 1, | 506 | .apr = 1, |
| 502 | .mpr = 1, | 507 | .mpr = 1, |
| 503 | .tpauser = 1, | 508 | .tpauser = 1, |
| @@ -538,8 +543,6 @@ static struct sh_eth_cpu_data sh7724_data = { | |||
| 538 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | | 543 | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | |
| 539 | EESR_ECI, | 544 | EESR_ECI, |
| 540 | 545 | ||
| 541 | .trscer_err_mask = DESC_I_RINT8, | ||
| 542 | |||
| 543 | .apr = 1, | 546 | .apr = 1, |
| 544 | .mpr = 1, | 547 | .mpr = 1, |
| 545 | .tpauser = 1, | 548 | .tpauser = 1, |
| @@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
| 1120 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; | 1123 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; |
| 1121 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; | 1124 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; |
| 1122 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | 1125 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; |
| 1126 | dma_addr_t dma_addr; | ||
| 1123 | 1127 | ||
| 1124 | mdp->cur_rx = 0; | 1128 | mdp->cur_rx = 0; |
| 1125 | mdp->cur_tx = 0; | 1129 | mdp->cur_tx = 0; |
| @@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
| 1133 | /* skb */ | 1137 | /* skb */ |
| 1134 | mdp->rx_skbuff[i] = NULL; | 1138 | mdp->rx_skbuff[i] = NULL; |
| 1135 | skb = netdev_alloc_skb(ndev, skbuff_size); | 1139 | skb = netdev_alloc_skb(ndev, skbuff_size); |
| 1136 | mdp->rx_skbuff[i] = skb; | ||
| 1137 | if (skb == NULL) | 1140 | if (skb == NULL) |
| 1138 | break; | 1141 | break; |
| 1139 | sh_eth_set_receive_align(skb); | 1142 | sh_eth_set_receive_align(skb); |
| @@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
| 1142 | rxdesc = &mdp->rx_ring[i]; | 1145 | rxdesc = &mdp->rx_ring[i]; |
| 1143 | /* The size of the buffer is a multiple of 16 bytes. */ | 1146 | /* The size of the buffer is a multiple of 16 bytes. */ |
| 1144 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 1147 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
| 1145 | dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, | 1148 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
| 1146 | DMA_FROM_DEVICE); | 1149 | rxdesc->buffer_length, |
| 1147 | rxdesc->addr = virt_to_phys(skb->data); | 1150 | DMA_FROM_DEVICE); |
| 1151 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | ||
| 1152 | kfree_skb(skb); | ||
| 1153 | break; | ||
| 1154 | } | ||
| 1155 | mdp->rx_skbuff[i] = skb; | ||
| 1156 | rxdesc->addr = dma_addr; | ||
| 1148 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1157 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
| 1149 | 1158 | ||
| 1150 | /* Rx descriptor address set */ | 1159 | /* Rx descriptor address set */ |
| @@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
| 1316 | RFLR); | 1325 | RFLR); |
| 1317 | 1326 | ||
| 1318 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); | 1327 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); |
| 1319 | if (start) | 1328 | if (start) { |
| 1329 | mdp->irq_enabled = true; | ||
| 1320 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1330 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
| 1331 | } | ||
| 1321 | 1332 | ||
| 1322 | /* PAUSE Prohibition */ | 1333 | /* PAUSE Prohibition */ |
| 1323 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | | 1334 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | |
| @@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) | |||
| 1356 | return ret; | 1367 | return ret; |
| 1357 | } | 1368 | } |
| 1358 | 1369 | ||
| 1370 | static void sh_eth_dev_exit(struct net_device *ndev) | ||
| 1371 | { | ||
| 1372 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
| 1373 | int i; | ||
| 1374 | |||
| 1375 | /* Deactivate all TX descriptors, so DMA should stop at next | ||
| 1376 | * packet boundary if it's currently running | ||
| 1377 | */ | ||
| 1378 | for (i = 0; i < mdp->num_tx_ring; i++) | ||
| 1379 | mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT); | ||
| 1380 | |||
| 1381 | /* Disable TX FIFO egress to MAC */ | ||
| 1382 | sh_eth_rcv_snd_disable(ndev); | ||
| 1383 | |||
| 1384 | /* Stop RX DMA at next packet boundary */ | ||
| 1385 | sh_eth_write(ndev, 0, EDRRR); | ||
| 1386 | |||
| 1387 | /* Aside from TX DMA, we can't tell when the hardware is | ||
| 1388 | * really stopped, so we need to reset to make sure. | ||
| 1389 | * Before doing that, wait for long enough to *probably* | ||
| 1390 | * finish transmitting the last packet and poll stats. | ||
| 1391 | */ | ||
| 1392 | msleep(2); /* max frame time at 10 Mbps < 1250 us */ | ||
| 1393 | sh_eth_get_stats(ndev); | ||
| 1394 | sh_eth_reset(ndev); | ||
| 1395 | } | ||
| 1396 | |||
| 1359 | /* free Tx skb function */ | 1397 | /* free Tx skb function */ |
| 1360 | static int sh_eth_txfree(struct net_device *ndev) | 1398 | static int sh_eth_txfree(struct net_device *ndev) |
| 1361 | { | 1399 | { |
| @@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1400 | u16 pkt_len = 0; | 1438 | u16 pkt_len = 0; |
| 1401 | u32 desc_status; | 1439 | u32 desc_status; |
| 1402 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | 1440 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; |
| 1441 | dma_addr_t dma_addr; | ||
| 1403 | 1442 | ||
| 1404 | boguscnt = min(boguscnt, *quota); | 1443 | boguscnt = min(boguscnt, *quota); |
| 1405 | limit = boguscnt; | 1444 | limit = boguscnt; |
| @@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1447 | mdp->rx_skbuff[entry] = NULL; | 1486 | mdp->rx_skbuff[entry] = NULL; |
| 1448 | if (mdp->cd->rpadir) | 1487 | if (mdp->cd->rpadir) |
| 1449 | skb_reserve(skb, NET_IP_ALIGN); | 1488 | skb_reserve(skb, NET_IP_ALIGN); |
| 1450 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, | 1489 | dma_unmap_single(&ndev->dev, rxdesc->addr, |
| 1451 | ALIGN(mdp->rx_buf_sz, 16), | 1490 | ALIGN(mdp->rx_buf_sz, 16), |
| 1452 | DMA_FROM_DEVICE); | 1491 | DMA_FROM_DEVICE); |
| 1453 | skb_put(skb, pkt_len); | 1492 | skb_put(skb, pkt_len); |
| 1454 | skb->protocol = eth_type_trans(skb, ndev); | 1493 | skb->protocol = eth_type_trans(skb, ndev); |
| 1455 | netif_receive_skb(skb); | 1494 | netif_receive_skb(skb); |
| @@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1469 | 1508 | ||
| 1470 | if (mdp->rx_skbuff[entry] == NULL) { | 1509 | if (mdp->rx_skbuff[entry] == NULL) { |
| 1471 | skb = netdev_alloc_skb(ndev, skbuff_size); | 1510 | skb = netdev_alloc_skb(ndev, skbuff_size); |
| 1472 | mdp->rx_skbuff[entry] = skb; | ||
| 1473 | if (skb == NULL) | 1511 | if (skb == NULL) |
| 1474 | break; /* Better luck next round. */ | 1512 | break; /* Better luck next round. */ |
| 1475 | sh_eth_set_receive_align(skb); | 1513 | sh_eth_set_receive_align(skb); |
| 1476 | dma_map_single(&ndev->dev, skb->data, | 1514 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
| 1477 | rxdesc->buffer_length, DMA_FROM_DEVICE); | 1515 | rxdesc->buffer_length, |
| 1516 | DMA_FROM_DEVICE); | ||
| 1517 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | ||
| 1518 | kfree_skb(skb); | ||
| 1519 | break; | ||
| 1520 | } | ||
| 1521 | mdp->rx_skbuff[entry] = skb; | ||
| 1478 | 1522 | ||
| 1479 | skb_checksum_none_assert(skb); | 1523 | skb_checksum_none_assert(skb); |
| 1480 | rxdesc->addr = virt_to_phys(skb->data); | 1524 | rxdesc->addr = dma_addr; |
| 1481 | } | 1525 | } |
| 1482 | if (entry >= mdp->num_rx_ring - 1) | 1526 | if (entry >= mdp->num_rx_ring - 1) |
| 1483 | rxdesc->status |= | 1527 | rxdesc->status |= |
| @@ -1573,7 +1617,6 @@ ignore_link: | |||
| 1573 | if (intr_status & EESR_RFRMER) { | 1617 | if (intr_status & EESR_RFRMER) { |
| 1574 | /* Receive Frame Overflow int */ | 1618 | /* Receive Frame Overflow int */ |
| 1575 | ndev->stats.rx_frame_errors++; | 1619 | ndev->stats.rx_frame_errors++; |
| 1576 | netif_err(mdp, rx_err, ndev, "Receive Abort\n"); | ||
| 1577 | } | 1620 | } |
| 1578 | } | 1621 | } |
| 1579 | 1622 | ||
| @@ -1592,13 +1635,11 @@ ignore_link: | |||
| 1592 | if (intr_status & EESR_RDE) { | 1635 | if (intr_status & EESR_RDE) { |
| 1593 | /* Receive Descriptor Empty int */ | 1636 | /* Receive Descriptor Empty int */ |
| 1594 | ndev->stats.rx_over_errors++; | 1637 | ndev->stats.rx_over_errors++; |
| 1595 | netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n"); | ||
| 1596 | } | 1638 | } |
| 1597 | 1639 | ||
| 1598 | if (intr_status & EESR_RFE) { | 1640 | if (intr_status & EESR_RFE) { |
| 1599 | /* Receive FIFO Overflow int */ | 1641 | /* Receive FIFO Overflow int */ |
| 1600 | ndev->stats.rx_fifo_errors++; | 1642 | ndev->stats.rx_fifo_errors++; |
| 1601 | netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n"); | ||
| 1602 | } | 1643 | } |
| 1603 | 1644 | ||
| 1604 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { | 1645 | if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { |
| @@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
| 1653 | if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) | 1694 | if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) |
| 1654 | ret = IRQ_HANDLED; | 1695 | ret = IRQ_HANDLED; |
| 1655 | else | 1696 | else |
| 1656 | goto other_irq; | 1697 | goto out; |
| 1698 | |||
| 1699 | if (!likely(mdp->irq_enabled)) { | ||
| 1700 | sh_eth_write(ndev, 0, EESIPR); | ||
| 1701 | goto out; | ||
| 1702 | } | ||
| 1657 | 1703 | ||
| 1658 | if (intr_status & EESR_RX_CHECK) { | 1704 | if (intr_status & EESR_RX_CHECK) { |
| 1659 | if (napi_schedule_prep(&mdp->napi)) { | 1705 | if (napi_schedule_prep(&mdp->napi)) { |
| @@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
| 1684 | sh_eth_error(ndev, intr_status); | 1730 | sh_eth_error(ndev, intr_status); |
| 1685 | } | 1731 | } |
| 1686 | 1732 | ||
| 1687 | other_irq: | 1733 | out: |
| 1688 | spin_unlock(&mdp->lock); | 1734 | spin_unlock(&mdp->lock); |
| 1689 | 1735 | ||
| 1690 | return ret; | 1736 | return ret; |
| @@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget) | |||
| 1712 | napi_complete(napi); | 1758 | napi_complete(napi); |
| 1713 | 1759 | ||
| 1714 | /* Reenable Rx interrupts */ | 1760 | /* Reenable Rx interrupts */ |
| 1715 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1761 | if (mdp->irq_enabled) |
| 1762 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | ||
| 1716 | out: | 1763 | out: |
| 1717 | return budget - quota; | 1764 | return budget - quota; |
| 1718 | } | 1765 | } |
| @@ -1827,6 +1874,9 @@ static int sh_eth_get_settings(struct net_device *ndev, | |||
| 1827 | unsigned long flags; | 1874 | unsigned long flags; |
| 1828 | int ret; | 1875 | int ret; |
| 1829 | 1876 | ||
| 1877 | if (!mdp->phydev) | ||
| 1878 | return -ENODEV; | ||
| 1879 | |||
| 1830 | spin_lock_irqsave(&mdp->lock, flags); | 1880 | spin_lock_irqsave(&mdp->lock, flags); |
| 1831 | ret = phy_ethtool_gset(mdp->phydev, ecmd); | 1881 | ret = phy_ethtool_gset(mdp->phydev, ecmd); |
| 1832 | spin_unlock_irqrestore(&mdp->lock, flags); | 1882 | spin_unlock_irqrestore(&mdp->lock, flags); |
| @@ -1841,6 +1891,9 @@ static int sh_eth_set_settings(struct net_device *ndev, | |||
| 1841 | unsigned long flags; | 1891 | unsigned long flags; |
| 1842 | int ret; | 1892 | int ret; |
| 1843 | 1893 | ||
| 1894 | if (!mdp->phydev) | ||
| 1895 | return -ENODEV; | ||
| 1896 | |||
| 1844 | spin_lock_irqsave(&mdp->lock, flags); | 1897 | spin_lock_irqsave(&mdp->lock, flags); |
| 1845 | 1898 | ||
| 1846 | /* disable tx and rx */ | 1899 | /* disable tx and rx */ |
| @@ -1875,6 +1928,9 @@ static int sh_eth_nway_reset(struct net_device *ndev) | |||
| 1875 | unsigned long flags; | 1928 | unsigned long flags; |
| 1876 | int ret; | 1929 | int ret; |
| 1877 | 1930 | ||
| 1931 | if (!mdp->phydev) | ||
| 1932 | return -ENODEV; | ||
| 1933 | |||
| 1878 | spin_lock_irqsave(&mdp->lock, flags); | 1934 | spin_lock_irqsave(&mdp->lock, flags); |
| 1879 | ret = phy_start_aneg(mdp->phydev); | 1935 | ret = phy_start_aneg(mdp->phydev); |
| 1880 | spin_unlock_irqrestore(&mdp->lock, flags); | 1936 | spin_unlock_irqrestore(&mdp->lock, flags); |
| @@ -1959,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev, | |||
| 1959 | return -EINVAL; | 2015 | return -EINVAL; |
| 1960 | 2016 | ||
| 1961 | if (netif_running(ndev)) { | 2017 | if (netif_running(ndev)) { |
| 2018 | netif_device_detach(ndev); | ||
| 1962 | netif_tx_disable(ndev); | 2019 | netif_tx_disable(ndev); |
| 1963 | /* Disable interrupts by clearing the interrupt mask. */ | 2020 | |
| 1964 | sh_eth_write(ndev, 0x0000, EESIPR); | 2021 | /* Serialise with the interrupt handler and NAPI, then |
| 1965 | /* Stop the chip's Tx and Rx processes. */ | 2022 | * disable interrupts. We have to clear the |
| 1966 | sh_eth_write(ndev, 0, EDTRR); | 2023 | * irq_enabled flag first to ensure that interrupts |
| 1967 | sh_eth_write(ndev, 0, EDRRR); | 2024 | * won't be re-enabled. |
| 2025 | */ | ||
| 2026 | mdp->irq_enabled = false; | ||
| 1968 | synchronize_irq(ndev->irq); | 2027 | synchronize_irq(ndev->irq); |
| 1969 | } | 2028 | napi_synchronize(&mdp->napi); |
| 2029 | sh_eth_write(ndev, 0x0000, EESIPR); | ||
| 1970 | 2030 | ||
| 1971 | /* Free all the skbuffs in the Rx queue. */ | 2031 | sh_eth_dev_exit(ndev); |
| 1972 | sh_eth_ring_free(ndev); | 2032 | |
| 1973 | /* Free DMA buffer */ | 2033 | /* Free all the skbuffs in the Rx queue. */ |
| 1974 | sh_eth_free_dma_buffer(mdp); | 2034 | sh_eth_ring_free(ndev); |
| 2035 | /* Free DMA buffer */ | ||
| 2036 | sh_eth_free_dma_buffer(mdp); | ||
| 2037 | } | ||
| 1975 | 2038 | ||
| 1976 | /* Set new parameters */ | 2039 | /* Set new parameters */ |
| 1977 | mdp->num_rx_ring = ring->rx_pending; | 2040 | mdp->num_rx_ring = ring->rx_pending; |
| 1978 | mdp->num_tx_ring = ring->tx_pending; | 2041 | mdp->num_tx_ring = ring->tx_pending; |
| 1979 | 2042 | ||
| 1980 | ret = sh_eth_ring_init(ndev); | ||
| 1981 | if (ret < 0) { | ||
| 1982 | netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__); | ||
| 1983 | return ret; | ||
| 1984 | } | ||
| 1985 | ret = sh_eth_dev_init(ndev, false); | ||
| 1986 | if (ret < 0) { | ||
| 1987 | netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); | ||
| 1988 | return ret; | ||
| 1989 | } | ||
| 1990 | |||
| 1991 | if (netif_running(ndev)) { | 2043 | if (netif_running(ndev)) { |
| 2044 | ret = sh_eth_ring_init(ndev); | ||
| 2045 | if (ret < 0) { | ||
| 2046 | netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", | ||
| 2047 | __func__); | ||
| 2048 | return ret; | ||
| 2049 | } | ||
| 2050 | ret = sh_eth_dev_init(ndev, false); | ||
| 2051 | if (ret < 0) { | ||
| 2052 | netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", | ||
| 2053 | __func__); | ||
| 2054 | return ret; | ||
| 2055 | } | ||
| 2056 | |||
| 2057 | mdp->irq_enabled = true; | ||
| 1992 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 2058 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
| 1993 | /* Setting the Rx mode will start the Rx process. */ | 2059 | /* Setting the Rx mode will start the Rx process. */ |
| 1994 | sh_eth_write(ndev, EDRRR_R, EDRRR); | 2060 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
| 1995 | netif_wake_queue(ndev); | 2061 | netif_device_attach(ndev); |
| 1996 | } | 2062 | } |
| 1997 | 2063 | ||
| 1998 | return 0; | 2064 | return 0; |
| @@ -2108,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 2108 | } | 2174 | } |
| 2109 | spin_unlock_irqrestore(&mdp->lock, flags); | 2175 | spin_unlock_irqrestore(&mdp->lock, flags); |
| 2110 | 2176 | ||
| 2177 | if (skb_padto(skb, ETH_ZLEN)) | ||
| 2178 | return NETDEV_TX_OK; | ||
| 2179 | |||
| 2111 | entry = mdp->cur_tx % mdp->num_tx_ring; | 2180 | entry = mdp->cur_tx % mdp->num_tx_ring; |
| 2112 | mdp->tx_skbuff[entry] = skb; | 2181 | mdp->tx_skbuff[entry] = skb; |
| 2113 | txdesc = &mdp->tx_ring[entry]; | 2182 | txdesc = &mdp->tx_ring[entry]; |
| @@ -2117,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 2117 | skb->len + 2); | 2186 | skb->len + 2); |
| 2118 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, | 2187 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, |
| 2119 | DMA_TO_DEVICE); | 2188 | DMA_TO_DEVICE); |
| 2120 | if (skb->len < ETH_ZLEN) | 2189 | if (dma_mapping_error(&ndev->dev, txdesc->addr)) { |
| 2121 | txdesc->buffer_length = ETH_ZLEN; | 2190 | kfree_skb(skb); |
| 2122 | else | 2191 | return NETDEV_TX_OK; |
| 2123 | txdesc->buffer_length = skb->len; | 2192 | } |
| 2193 | txdesc->buffer_length = skb->len; | ||
| 2124 | 2194 | ||
| 2125 | if (entry >= mdp->num_tx_ring - 1) | 2195 | if (entry >= mdp->num_tx_ring - 1) |
| 2126 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); | 2196 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); |
| @@ -2172,24 +2242,26 @@ static int sh_eth_close(struct net_device *ndev) | |||
| 2172 | 2242 | ||
| 2173 | netif_stop_queue(ndev); | 2243 | netif_stop_queue(ndev); |
| 2174 | 2244 | ||
| 2175 | /* Disable interrupts by clearing the interrupt mask. */ | 2245 | /* Serialise with the interrupt handler and NAPI, then disable |
| 2246 | * interrupts. We have to clear the irq_enabled flag first to | ||
| 2247 | * ensure that interrupts won't be re-enabled. | ||
| 2248 | */ | ||
| 2249 | mdp->irq_enabled = false; | ||
| 2250 | synchronize_irq(ndev->irq); | ||
| 2251 | napi_disable(&mdp->napi); | ||
| 2176 | sh_eth_write(ndev, 0x0000, EESIPR); | 2252 | sh_eth_write(ndev, 0x0000, EESIPR); |
| 2177 | 2253 | ||
| 2178 | /* Stop the chip's Tx and Rx processes. */ | 2254 | sh_eth_dev_exit(ndev); |
| 2179 | sh_eth_write(ndev, 0, EDTRR); | ||
| 2180 | sh_eth_write(ndev, 0, EDRRR); | ||
| 2181 | 2255 | ||
| 2182 | sh_eth_get_stats(ndev); | ||
| 2183 | /* PHY Disconnect */ | 2256 | /* PHY Disconnect */ |
| 2184 | if (mdp->phydev) { | 2257 | if (mdp->phydev) { |
| 2185 | phy_stop(mdp->phydev); | 2258 | phy_stop(mdp->phydev); |
| 2186 | phy_disconnect(mdp->phydev); | 2259 | phy_disconnect(mdp->phydev); |
| 2260 | mdp->phydev = NULL; | ||
| 2187 | } | 2261 | } |
| 2188 | 2262 | ||
| 2189 | free_irq(ndev->irq, ndev); | 2263 | free_irq(ndev->irq, ndev); |
| 2190 | 2264 | ||
| 2191 | napi_disable(&mdp->napi); | ||
| 2192 | |||
| 2193 | /* Free all the skbuffs in the Rx queue. */ | 2265 | /* Free all the skbuffs in the Rx queue. */ |
| 2194 | sh_eth_ring_free(ndev); | 2266 | sh_eth_ring_free(ndev); |
| 2195 | 2267 | ||
| @@ -2417,7 +2489,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev) | |||
| 2417 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2489 | struct sh_eth_private *mdp = netdev_priv(ndev); |
| 2418 | int i, ret; | 2490 | int i, ret; |
| 2419 | 2491 | ||
| 2420 | if (unlikely(!mdp->cd->tsu)) | 2492 | if (!mdp->cd->tsu) |
| 2421 | return 0; | 2493 | return 0; |
| 2422 | 2494 | ||
| 2423 | for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { | 2495 | for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { |
| @@ -2440,7 +2512,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) | |||
| 2440 | void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); | 2512 | void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); |
| 2441 | int i; | 2513 | int i; |
| 2442 | 2514 | ||
| 2443 | if (unlikely(!mdp->cd->tsu)) | 2515 | if (!mdp->cd->tsu) |
| 2444 | return; | 2516 | return; |
| 2445 | 2517 | ||
| 2446 | for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { | 2518 | for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { |
| @@ -2450,8 +2522,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) | |||
| 2450 | } | 2522 | } |
| 2451 | } | 2523 | } |
| 2452 | 2524 | ||
| 2453 | /* Multicast reception directions set */ | 2525 | /* Update promiscuous flag and multicast filter */ |
| 2454 | static void sh_eth_set_multicast_list(struct net_device *ndev) | 2526 | static void sh_eth_set_rx_mode(struct net_device *ndev) |
| 2455 | { | 2527 | { |
| 2456 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2528 | struct sh_eth_private *mdp = netdev_priv(ndev); |
| 2457 | u32 ecmr_bits; | 2529 | u32 ecmr_bits; |
| @@ -2462,7 +2534,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev) | |||
| 2462 | /* Initial condition is MCT = 1, PRM = 0. | 2534 | /* Initial condition is MCT = 1, PRM = 0. |
| 2463 | * Depending on ndev->flags, set PRM or clear MCT | 2535 | * Depending on ndev->flags, set PRM or clear MCT |
| 2464 | */ | 2536 | */ |
| 2465 | ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; | 2537 | ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM; |
| 2538 | if (mdp->cd->tsu) | ||
| 2539 | ecmr_bits |= ECMR_MCT; | ||
| 2466 | 2540 | ||
| 2467 | if (!(ndev->flags & IFF_MULTICAST)) { | 2541 | if (!(ndev->flags & IFF_MULTICAST)) { |
| 2468 | sh_eth_tsu_purge_mcast(ndev); | 2542 | sh_eth_tsu_purge_mcast(ndev); |
| @@ -2491,9 +2565,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev) | |||
| 2491 | } | 2565 | } |
| 2492 | } | 2566 | } |
| 2493 | } | 2567 | } |
| 2494 | } else { | ||
| 2495 | /* Normal, unicast/broadcast-only mode. */ | ||
| 2496 | ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; | ||
| 2497 | } | 2568 | } |
| 2498 | 2569 | ||
| 2499 | /* update the ethernet mode */ | 2570 | /* update the ethernet mode */ |
| @@ -2701,6 +2772,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { | |||
| 2701 | .ndo_stop = sh_eth_close, | 2772 | .ndo_stop = sh_eth_close, |
| 2702 | .ndo_start_xmit = sh_eth_start_xmit, | 2773 | .ndo_start_xmit = sh_eth_start_xmit, |
| 2703 | .ndo_get_stats = sh_eth_get_stats, | 2774 | .ndo_get_stats = sh_eth_get_stats, |
| 2775 | .ndo_set_rx_mode = sh_eth_set_rx_mode, | ||
| 2704 | .ndo_tx_timeout = sh_eth_tx_timeout, | 2776 | .ndo_tx_timeout = sh_eth_tx_timeout, |
| 2705 | .ndo_do_ioctl = sh_eth_do_ioctl, | 2777 | .ndo_do_ioctl = sh_eth_do_ioctl, |
| 2706 | .ndo_validate_addr = eth_validate_addr, | 2778 | .ndo_validate_addr = eth_validate_addr, |
| @@ -2713,7 +2785,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = { | |||
| 2713 | .ndo_stop = sh_eth_close, | 2785 | .ndo_stop = sh_eth_close, |
| 2714 | .ndo_start_xmit = sh_eth_start_xmit, | 2786 | .ndo_start_xmit = sh_eth_start_xmit, |
| 2715 | .ndo_get_stats = sh_eth_get_stats, | 2787 | .ndo_get_stats = sh_eth_get_stats, |
| 2716 | .ndo_set_rx_mode = sh_eth_set_multicast_list, | 2788 | .ndo_set_rx_mode = sh_eth_set_rx_mode, |
| 2717 | .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, | 2789 | .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, |
| 2718 | .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, | 2790 | .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, |
| 2719 | .ndo_tx_timeout = sh_eth_tx_timeout, | 2791 | .ndo_tx_timeout = sh_eth_tx_timeout, |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 71f5de1171bd..332d3c16d483 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
| @@ -513,6 +513,7 @@ struct sh_eth_private { | |||
| 513 | u32 rx_buf_sz; /* Based on MTU+slack. */ | 513 | u32 rx_buf_sz; /* Based on MTU+slack. */ |
| 514 | int edmac_endian; | 514 | int edmac_endian; |
| 515 | struct napi_struct napi; | 515 | struct napi_struct napi; |
| 516 | bool irq_enabled; | ||
| 516 | /* MII transceiver section. */ | 517 | /* MII transceiver section. */ |
| 517 | u32 phy_id; /* PHY ID */ | 518 | u32 phy_id; /* PHY ID */ |
| 518 | struct mii_bus *mii_bus; /* MDIO bus control */ | 519 | struct mii_bus *mii_bus; /* MDIO bus control */ |
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 698494481d18..b1a271853d85 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | |||
| @@ -474,13 +474,19 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, | |||
| 474 | /* allocate memory for RX skbuff array */ | 474 | /* allocate memory for RX skbuff array */ |
| 475 | rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, | 475 | rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, |
| 476 | sizeof(dma_addr_t), GFP_KERNEL); | 476 | sizeof(dma_addr_t), GFP_KERNEL); |
| 477 | if (rx_ring->rx_skbuff_dma == NULL) | 477 | if (!rx_ring->rx_skbuff_dma) { |
| 478 | goto dmamem_err; | 478 | dma_free_coherent(priv->device, |
| 479 | rx_rsize * sizeof(struct sxgbe_rx_norm_desc), | ||
| 480 | rx_ring->dma_rx, rx_ring->dma_rx_phy); | ||
| 481 | goto error; | ||
| 482 | } | ||
| 479 | 483 | ||
| 480 | rx_ring->rx_skbuff = kmalloc_array(rx_rsize, | 484 | rx_ring->rx_skbuff = kmalloc_array(rx_rsize, |
| 481 | sizeof(struct sk_buff *), GFP_KERNEL); | 485 | sizeof(struct sk_buff *), GFP_KERNEL); |
| 482 | if (rx_ring->rx_skbuff == NULL) | 486 | if (!rx_ring->rx_skbuff) { |
| 483 | goto rxbuff_err; | 487 | kfree(rx_ring->rx_skbuff_dma); |
| 488 | goto error; | ||
| 489 | } | ||
| 484 | 490 | ||
| 485 | /* initialise the buffers */ | 491 | /* initialise the buffers */ |
| 486 | for (desc_index = 0; desc_index < rx_rsize; desc_index++) { | 492 | for (desc_index = 0; desc_index < rx_rsize; desc_index++) { |
| @@ -502,13 +508,6 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, | |||
| 502 | err_init_rx_buffers: | 508 | err_init_rx_buffers: |
| 503 | while (--desc_index >= 0) | 509 | while (--desc_index >= 0) |
| 504 | free_rx_ring(priv->device, rx_ring, desc_index); | 510 | free_rx_ring(priv->device, rx_ring, desc_index); |
| 505 | kfree(rx_ring->rx_skbuff); | ||
| 506 | rxbuff_err: | ||
| 507 | kfree(rx_ring->rx_skbuff_dma); | ||
| 508 | dmamem_err: | ||
| 509 | dma_free_coherent(priv->device, | ||
| 510 | rx_rsize * sizeof(struct sxgbe_rx_norm_desc), | ||
| 511 | rx_ring->dma_rx, rx_ring->dma_rx_phy); | ||
| 512 | error: | 511 | error: |
| 513 | return -ENOMEM; | 512 | return -ENOMEM; |
| 514 | } | 513 | } |
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 866560ea9e18..b02eed12bfc5 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c | |||
| @@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) | |||
| 108 | } | 108 | } |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | /* Get MAC address if available (DT) */ | ||
| 112 | if (mac) | ||
| 113 | ether_addr_copy(priv->dev->dev_addr, mac); | ||
| 114 | |||
| 115 | priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr); | 111 | priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr); |
| 116 | if (!priv) { | 112 | if (!priv) { |
| 117 | pr_err("%s: main driver probe failed\n", __func__); | 113 | pr_err("%s: main driver probe failed\n", __func__); |
| @@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev) | |||
| 125 | goto err_drv_remove; | 121 | goto err_drv_remove; |
| 126 | } | 122 | } |
| 127 | 123 | ||
| 124 | /* Get MAC address if available (DT) */ | ||
| 125 | if (mac) | ||
| 126 | ether_addr_copy(priv->dev->dev_addr, mac); | ||
| 127 | |||
| 128 | /* Get the TX/RX IRQ numbers */ | 128 | /* Get the TX/RX IRQ numbers */ |
| 129 | for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { | 129 | for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { |
| 130 | priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); | 130 | priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8c6b7c1651e5..cf62ff4c8c56 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
| 2778 | * @addr: iobase memory address | 2778 | * @addr: iobase memory address |
| 2779 | * Description: this is the main probe function used to | 2779 | * Description: this is the main probe function used to |
| 2780 | * call the alloc_etherdev, allocate the priv structure. | 2780 | * call the alloc_etherdev, allocate the priv structure. |
| 2781 | * Return: | ||
| 2782 | * on success the new private structure is returned, otherwise the error | ||
| 2783 | * pointer. | ||
| 2781 | */ | 2784 | */ |
| 2782 | struct stmmac_priv *stmmac_dvr_probe(struct device *device, | 2785 | struct stmmac_priv *stmmac_dvr_probe(struct device *device, |
| 2783 | struct plat_stmmacenet_data *plat_dat, | 2786 | struct plat_stmmacenet_data *plat_dat, |
| @@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
| 2789 | 2792 | ||
| 2790 | ndev = alloc_etherdev(sizeof(struct stmmac_priv)); | 2793 | ndev = alloc_etherdev(sizeof(struct stmmac_priv)); |
| 2791 | if (!ndev) | 2794 | if (!ndev) |
| 2792 | return NULL; | 2795 | return ERR_PTR(-ENOMEM); |
| 2793 | 2796 | ||
| 2794 | SET_NETDEV_DEV(ndev, device); | 2797 | SET_NETDEV_DEV(ndev, device); |
| 2795 | 2798 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 64d1cef4cda1..a39131f494ec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1634,16 +1634,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, | |||
| 1634 | unsigned short vid) | 1634 | unsigned short vid) |
| 1635 | { | 1635 | { |
| 1636 | int ret; | 1636 | int ret; |
| 1637 | int unreg_mcast_mask; | 1637 | int unreg_mcast_mask = 0; |
| 1638 | u32 port_mask; | ||
| 1638 | 1639 | ||
| 1639 | if (priv->ndev->flags & IFF_ALLMULTI) | 1640 | if (priv->data.dual_emac) { |
| 1640 | unreg_mcast_mask = ALE_ALL_PORTS; | 1641 | port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; |
| 1641 | else | ||
| 1642 | unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; | ||
| 1643 | 1642 | ||
| 1644 | ret = cpsw_ale_add_vlan(priv->ale, vid, | 1643 | if (priv->ndev->flags & IFF_ALLMULTI) |
| 1645 | ALE_ALL_PORTS << priv->host_port, | 1644 | unreg_mcast_mask = port_mask; |
| 1646 | 0, ALE_ALL_PORTS << priv->host_port, | 1645 | } else { |
| 1646 | port_mask = ALE_ALL_PORTS; | ||
| 1647 | |||
| 1648 | if (priv->ndev->flags & IFF_ALLMULTI) | ||
| 1649 | unreg_mcast_mask = ALE_ALL_PORTS; | ||
| 1650 | else | ||
| 1651 | unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; | ||
| 1652 | } | ||
| 1653 | |||
| 1654 | ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask, | ||
| 1647 | unreg_mcast_mask << priv->host_port); | 1655 | unreg_mcast_mask << priv->host_port); |
| 1648 | if (ret != 0) | 1656 | if (ret != 0) |
| 1649 | return ret; | 1657 | return ret; |
| @@ -1654,8 +1662,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, | |||
| 1654 | goto clean_vid; | 1662 | goto clean_vid; |
| 1655 | 1663 | ||
| 1656 | ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, | 1664 | ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, |
| 1657 | ALE_ALL_PORTS << priv->host_port, | 1665 | port_mask, ALE_VLAN, vid, 0); |
| 1658 | ALE_VLAN, vid, 0); | ||
| 1659 | if (ret != 0) | 1666 | if (ret != 0) |
| 1660 | goto clean_vlan_ucast; | 1667 | goto clean_vlan_ucast; |
| 1661 | return 0; | 1668 | return 0; |
| @@ -1676,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, | |||
| 1676 | if (vid == priv->data.default_vlan) | 1683 | if (vid == priv->data.default_vlan) |
| 1677 | return 0; | 1684 | return 0; |
| 1678 | 1685 | ||
| 1686 | if (priv->data.dual_emac) { | ||
| 1687 | /* In dual EMAC, reserved VLAN id should not be used for | ||
| 1688 | * creating VLAN interfaces as this can break the dual | ||
| 1689 | * EMAC port separation | ||
| 1690 | */ | ||
| 1691 | int i; | ||
| 1692 | |||
| 1693 | for (i = 0; i < priv->data.slaves; i++) { | ||
| 1694 | if (vid == priv->slaves[i].port_vlan) | ||
| 1695 | return -EINVAL; | ||
| 1696 | } | ||
| 1697 | } | ||
| 1698 | |||
| 1679 | dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); | 1699 | dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); |
| 1680 | return cpsw_add_vlan_ale_entry(priv, vid); | 1700 | return cpsw_add_vlan_ale_entry(priv, vid); |
| 1681 | } | 1701 | } |
| @@ -1689,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, | |||
| 1689 | if (vid == priv->data.default_vlan) | 1709 | if (vid == priv->data.default_vlan) |
| 1690 | return 0; | 1710 | return 0; |
| 1691 | 1711 | ||
| 1712 | if (priv->data.dual_emac) { | ||
| 1713 | int i; | ||
| 1714 | |||
| 1715 | for (i = 0; i < priv->data.slaves; i++) { | ||
| 1716 | if (vid == priv->slaves[i].port_vlan) | ||
| 1717 | return -EINVAL; | ||
| 1718 | } | ||
| 1719 | } | ||
| 1720 | |||
| 1692 | dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); | 1721 | dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); |
| 1693 | ret = cpsw_ale_del_vlan(priv->ale, vid, 0); | 1722 | ret = cpsw_ale_del_vlan(priv->ale, vid, 0); |
| 1694 | if (ret != 0) | 1723 | if (ret != 0) |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ea712512c7d1..5fae4354722c 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
| @@ -62,6 +62,7 @@ | |||
| 62 | #include <linux/of.h> | 62 | #include <linux/of.h> |
| 63 | #include <linux/of_address.h> | 63 | #include <linux/of_address.h> |
| 64 | #include <linux/of_device.h> | 64 | #include <linux/of_device.h> |
| 65 | #include <linux/of_mdio.h> | ||
| 65 | #include <linux/of_irq.h> | 66 | #include <linux/of_irq.h> |
| 66 | #include <linux/of_net.h> | 67 | #include <linux/of_net.h> |
| 67 | 68 | ||
| @@ -343,9 +344,7 @@ struct emac_priv { | |||
| 343 | u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; | 344 | u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; |
| 344 | u32 rx_addr_type; | 345 | u32 rx_addr_type; |
| 345 | const char *phy_id; | 346 | const char *phy_id; |
| 346 | #ifdef CONFIG_OF | ||
| 347 | struct device_node *phy_node; | 347 | struct device_node *phy_node; |
| 348 | #endif | ||
| 349 | struct phy_device *phydev; | 348 | struct phy_device *phydev; |
| 350 | spinlock_t lock; | 349 | spinlock_t lock; |
| 351 | /*platform specific members*/ | 350 | /*platform specific members*/ |
| @@ -922,6 +921,16 @@ static void emac_int_disable(struct emac_priv *priv) | |||
| 922 | if (priv->int_disable) | 921 | if (priv->int_disable) |
| 923 | priv->int_disable(); | 922 | priv->int_disable(); |
| 924 | 923 | ||
| 924 | /* NOTE: Rx Threshold and Misc interrupts are not enabled */ | ||
| 925 | |||
| 926 | /* ack rxen only then a new pulse will be generated */ | ||
| 927 | emac_write(EMAC_DM646X_MACEOIVECTOR, | ||
| 928 | EMAC_DM646X_MAC_EOI_C0_RXEN); | ||
| 929 | |||
| 930 | /* ack txen- only then a new pulse will be generated */ | ||
| 931 | emac_write(EMAC_DM646X_MACEOIVECTOR, | ||
| 932 | EMAC_DM646X_MAC_EOI_C0_TXEN); | ||
| 933 | |||
| 925 | local_irq_restore(flags); | 934 | local_irq_restore(flags); |
| 926 | 935 | ||
| 927 | } else { | 936 | } else { |
| @@ -951,15 +960,6 @@ static void emac_int_enable(struct emac_priv *priv) | |||
| 951 | * register */ | 960 | * register */ |
| 952 | 961 | ||
| 953 | /* NOTE: Rx Threshold and Misc interrupts are not enabled */ | 962 | /* NOTE: Rx Threshold and Misc interrupts are not enabled */ |
| 954 | |||
| 955 | /* ack rxen only then a new pulse will be generated */ | ||
| 956 | emac_write(EMAC_DM646X_MACEOIVECTOR, | ||
| 957 | EMAC_DM646X_MAC_EOI_C0_RXEN); | ||
| 958 | |||
| 959 | /* ack txen- only then a new pulse will be generated */ | ||
| 960 | emac_write(EMAC_DM646X_MACEOIVECTOR, | ||
| 961 | EMAC_DM646X_MAC_EOI_C0_TXEN); | ||
| 962 | |||
| 963 | } else { | 963 | } else { |
| 964 | /* Set DM644x control registers for interrupt control */ | 964 | /* Set DM644x control registers for interrupt control */ |
| 965 | emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1); | 965 | emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1); |
| @@ -1537,7 +1537,13 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1537 | int i = 0; | 1537 | int i = 0; |
| 1538 | struct emac_priv *priv = netdev_priv(ndev); | 1538 | struct emac_priv *priv = netdev_priv(ndev); |
| 1539 | 1539 | ||
| 1540 | pm_runtime_get(&priv->pdev->dev); | 1540 | ret = pm_runtime_get_sync(&priv->pdev->dev); |
| 1541 | if (ret < 0) { | ||
| 1542 | pm_runtime_put_noidle(&priv->pdev->dev); | ||
| 1543 | dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", | ||
| 1544 | __func__, ret); | ||
| 1545 | return ret; | ||
| 1546 | } | ||
| 1541 | 1547 | ||
| 1542 | netif_carrier_off(ndev); | 1548 | netif_carrier_off(ndev); |
| 1543 | for (cnt = 0; cnt < ETH_ALEN; cnt++) | 1549 | for (cnt = 0; cnt < ETH_ALEN; cnt++) |
| @@ -1596,8 +1602,20 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1596 | cpdma_ctlr_start(priv->dma); | 1602 | cpdma_ctlr_start(priv->dma); |
| 1597 | 1603 | ||
| 1598 | priv->phydev = NULL; | 1604 | priv->phydev = NULL; |
| 1605 | |||
| 1606 | if (priv->phy_node) { | ||
| 1607 | priv->phydev = of_phy_connect(ndev, priv->phy_node, | ||
| 1608 | &emac_adjust_link, 0, 0); | ||
| 1609 | if (!priv->phydev) { | ||
| 1610 | dev_err(emac_dev, "could not connect to phy %s\n", | ||
| 1611 | priv->phy_node->full_name); | ||
| 1612 | ret = -ENODEV; | ||
| 1613 | goto err; | ||
| 1614 | } | ||
| 1615 | } | ||
| 1616 | |||
| 1599 | /* use the first phy on the bus if pdata did not give us a phy id */ | 1617 | /* use the first phy on the bus if pdata did not give us a phy id */ |
| 1600 | if (!priv->phy_id) { | 1618 | if (!priv->phydev && !priv->phy_id) { |
| 1601 | struct device *phy; | 1619 | struct device *phy; |
| 1602 | 1620 | ||
| 1603 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, | 1621 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, |
| @@ -1606,7 +1624,7 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1606 | priv->phy_id = dev_name(phy); | 1624 | priv->phy_id = dev_name(phy); |
| 1607 | } | 1625 | } |
| 1608 | 1626 | ||
| 1609 | if (priv->phy_id && *priv->phy_id) { | 1627 | if (!priv->phydev && priv->phy_id && *priv->phy_id) { |
| 1610 | priv->phydev = phy_connect(ndev, priv->phy_id, | 1628 | priv->phydev = phy_connect(ndev, priv->phy_id, |
| 1611 | &emac_adjust_link, | 1629 | &emac_adjust_link, |
| 1612 | PHY_INTERFACE_MODE_MII); | 1630 | PHY_INTERFACE_MODE_MII); |
| @@ -1627,7 +1645,9 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1627 | "(mii_bus:phy_addr=%s, id=%x)\n", | 1645 | "(mii_bus:phy_addr=%s, id=%x)\n", |
| 1628 | priv->phydev->drv->name, dev_name(&priv->phydev->dev), | 1646 | priv->phydev->drv->name, dev_name(&priv->phydev->dev), |
| 1629 | priv->phydev->phy_id); | 1647 | priv->phydev->phy_id); |
| 1630 | } else { | 1648 | } |
| 1649 | |||
| 1650 | if (!priv->phydev) { | ||
| 1631 | /* No PHY , fix the link, speed and duplex settings */ | 1651 | /* No PHY , fix the link, speed and duplex settings */ |
| 1632 | dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); | 1652 | dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); |
| 1633 | priv->link = 1; | 1653 | priv->link = 1; |
| @@ -1724,6 +1744,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) | |||
| 1724 | struct emac_priv *priv = netdev_priv(ndev); | 1744 | struct emac_priv *priv = netdev_priv(ndev); |
| 1725 | u32 mac_control; | 1745 | u32 mac_control; |
| 1726 | u32 stats_clear_mask; | 1746 | u32 stats_clear_mask; |
| 1747 | int err; | ||
| 1748 | |||
| 1749 | err = pm_runtime_get_sync(&priv->pdev->dev); | ||
| 1750 | if (err < 0) { | ||
| 1751 | pm_runtime_put_noidle(&priv->pdev->dev); | ||
| 1752 | dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", | ||
| 1753 | __func__, err); | ||
| 1754 | return &ndev->stats; | ||
| 1755 | } | ||
| 1727 | 1756 | ||
| 1728 | /* update emac hardware stats and reset the registers*/ | 1757 | /* update emac hardware stats and reset the registers*/ |
| 1729 | 1758 | ||
| @@ -1766,6 +1795,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) | |||
| 1766 | ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN); | 1795 | ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN); |
| 1767 | emac_write(EMAC_TXUNDERRUN, stats_clear_mask); | 1796 | emac_write(EMAC_TXUNDERRUN, stats_clear_mask); |
| 1768 | 1797 | ||
| 1798 | pm_runtime_put(&priv->pdev->dev); | ||
| 1799 | |||
| 1769 | return &ndev->stats; | 1800 | return &ndev->stats; |
| 1770 | } | 1801 | } |
| 1771 | 1802 | ||
| @@ -1859,7 +1890,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) | |||
| 1859 | static int davinci_emac_probe(struct platform_device *pdev) | 1890 | static int davinci_emac_probe(struct platform_device *pdev) |
| 1860 | { | 1891 | { |
| 1861 | int rc = 0; | 1892 | int rc = 0; |
| 1862 | struct resource *res; | 1893 | struct resource *res, *res_ctrl; |
| 1863 | struct net_device *ndev; | 1894 | struct net_device *ndev; |
| 1864 | struct emac_priv *priv; | 1895 | struct emac_priv *priv; |
| 1865 | unsigned long hw_ram_addr; | 1896 | unsigned long hw_ram_addr; |
| @@ -1876,6 +1907,7 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
| 1876 | return -EBUSY; | 1907 | return -EBUSY; |
| 1877 | } | 1908 | } |
| 1878 | emac_bus_frequency = clk_get_rate(emac_clk); | 1909 | emac_bus_frequency = clk_get_rate(emac_clk); |
| 1910 | devm_clk_put(&pdev->dev, emac_clk); | ||
| 1879 | 1911 | ||
| 1880 | /* TODO: Probe PHY here if possible */ | 1912 | /* TODO: Probe PHY here if possible */ |
| 1881 | 1913 | ||
| @@ -1917,11 +1949,20 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
| 1917 | rc = PTR_ERR(priv->remap_addr); | 1949 | rc = PTR_ERR(priv->remap_addr); |
| 1918 | goto no_pdata; | 1950 | goto no_pdata; |
| 1919 | } | 1951 | } |
| 1952 | |||
| 1953 | res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 1954 | if (res_ctrl) { | ||
| 1955 | priv->ctrl_base = | ||
| 1956 | devm_ioremap_resource(&pdev->dev, res_ctrl); | ||
| 1957 | if (IS_ERR(priv->ctrl_base)) | ||
| 1958 | goto no_pdata; | ||
| 1959 | } else { | ||
| 1960 | priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; | ||
| 1961 | } | ||
| 1962 | |||
| 1920 | priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; | 1963 | priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; |
| 1921 | ndev->base_addr = (unsigned long)priv->remap_addr; | 1964 | ndev->base_addr = (unsigned long)priv->remap_addr; |
| 1922 | 1965 | ||
| 1923 | priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; | ||
| 1924 | |||
| 1925 | hw_ram_addr = pdata->hw_ram_addr; | 1966 | hw_ram_addr = pdata->hw_ram_addr; |
| 1926 | if (!hw_ram_addr) | 1967 | if (!hw_ram_addr) |
| 1927 | hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; | 1968 | hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; |
| @@ -1980,12 +2021,22 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
| 1980 | ndev->ethtool_ops = ðtool_ops; | 2021 | ndev->ethtool_ops = ðtool_ops; |
| 1981 | netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); | 2022 | netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); |
| 1982 | 2023 | ||
| 2024 | pm_runtime_enable(&pdev->dev); | ||
| 2025 | rc = pm_runtime_get_sync(&pdev->dev); | ||
| 2026 | if (rc < 0) { | ||
| 2027 | pm_runtime_put_noidle(&pdev->dev); | ||
| 2028 | dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", | ||
| 2029 | __func__, rc); | ||
| 2030 | goto no_cpdma_chan; | ||
| 2031 | } | ||
| 2032 | |||
| 1983 | /* register the network device */ | 2033 | /* register the network device */ |
| 1984 | SET_NETDEV_DEV(ndev, &pdev->dev); | 2034 | SET_NETDEV_DEV(ndev, &pdev->dev); |
| 1985 | rc = register_netdev(ndev); | 2035 | rc = register_netdev(ndev); |
| 1986 | if (rc) { | 2036 | if (rc) { |
| 1987 | dev_err(&pdev->dev, "error in register_netdev\n"); | 2037 | dev_err(&pdev->dev, "error in register_netdev\n"); |
| 1988 | rc = -ENODEV; | 2038 | rc = -ENODEV; |
| 2039 | pm_runtime_put(&pdev->dev); | ||
| 1989 | goto no_cpdma_chan; | 2040 | goto no_cpdma_chan; |
| 1990 | } | 2041 | } |
| 1991 | 2042 | ||
| @@ -1995,9 +2046,7 @@ static int davinci_emac_probe(struct platform_device *pdev) | |||
| 1995 | "(regs: %p, irq: %d)\n", | 2046 | "(regs: %p, irq: %d)\n", |
| 1996 | (void *)priv->emac_base_phys, ndev->irq); | 2047 | (void *)priv->emac_base_phys, ndev->irq); |
| 1997 | } | 2048 | } |
| 1998 | 2049 | pm_runtime_put(&pdev->dev); | |
| 1999 | pm_runtime_enable(&pdev->dev); | ||
| 2000 | pm_runtime_resume(&pdev->dev); | ||
| 2001 | 2050 | ||
| 2002 | return 0; | 2051 | return 0; |
| 2003 | 2052 | ||
| @@ -2071,9 +2120,14 @@ static const struct emac_platform_data am3517_emac_data = { | |||
| 2071 | .hw_ram_addr = 0x01e20000, | 2120 | .hw_ram_addr = 0x01e20000, |
| 2072 | }; | 2121 | }; |
| 2073 | 2122 | ||
| 2123 | static const struct emac_platform_data dm816_emac_data = { | ||
| 2124 | .version = EMAC_VERSION_2, | ||
| 2125 | }; | ||
| 2126 | |||
| 2074 | static const struct of_device_id davinci_emac_of_match[] = { | 2127 | static const struct of_device_id davinci_emac_of_match[] = { |
| 2075 | {.compatible = "ti,davinci-dm6467-emac", }, | 2128 | {.compatible = "ti,davinci-dm6467-emac", }, |
| 2076 | {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, | 2129 | {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, |
| 2130 | {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, }, | ||
| 2077 | {}, | 2131 | {}, |
| 2078 | }; | 2132 | }; |
| 2079 | MODULE_DEVICE_TABLE(of, davinci_emac_of_match); | 2133 | MODULE_DEVICE_TABLE(of, davinci_emac_of_match); |
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index a14d87783245..2e195289ddf4 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c | |||
| @@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) | |||
| 377 | }; | 377 | }; |
| 378 | 378 | ||
| 379 | dst = ip6_route_output(dev_net(dev), NULL, &fl6); | 379 | dst = ip6_route_output(dev_net(dev), NULL, &fl6); |
| 380 | if (IS_ERR(dst)) | 380 | if (dst->error) { |
| 381 | ret = dst->error; | ||
| 382 | dst_release(dst); | ||
| 381 | goto err; | 383 | goto err; |
| 382 | 384 | } | |
| 383 | skb_dst_drop(skb); | 385 | skb_dst_drop(skb); |
| 384 | skb_dst_set(skb, dst); | 386 | skb_dst_set(skb, dst); |
| 385 | err = ip6_local_out(skb); | 387 | err = ip6_local_out(skb); |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 57ec23e8ccfa..bf405f134d3a 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) | |||
| 833 | index &= ~3; | 833 | index &= ~3; |
| 834 | } | 834 | } |
| 835 | 835 | ||
| 836 | generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); | ||
| 837 | |||
| 838 | data |= __le32_to_cpu(tmp) & ~mask; | ||
| 839 | tmp = __cpu_to_le32(data); | 836 | tmp = __cpu_to_le32(data); |
| 840 | 837 | ||
| 841 | generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); | 838 | generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); |
| @@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) | |||
| 874 | index &= ~3; | 871 | index &= ~3; |
| 875 | } | 872 | } |
| 876 | 873 | ||
| 877 | generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); | ||
| 878 | |||
| 879 | data |= __le32_to_cpu(tmp) & ~mask; | ||
| 880 | tmp = __cpu_to_le32(data); | 874 | tmp = __cpu_to_le32(data); |
| 881 | 875 | ||
| 882 | generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); | 876 | generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); |
| @@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data) | |||
| 926 | ocp_reg_write(tp, OCP_SRAM_DATA, data); | 920 | ocp_reg_write(tp, OCP_SRAM_DATA, data); |
| 927 | } | 921 | } |
| 928 | 922 | ||
| 929 | static u16 sram_read(struct r8152 *tp, u16 addr) | ||
| 930 | { | ||
| 931 | ocp_reg_write(tp, OCP_SRAM_ADDR, addr); | ||
| 932 | return ocp_reg_read(tp, OCP_SRAM_DATA); | ||
| 933 | } | ||
| 934 | |||
| 935 | static int read_mii_word(struct net_device *netdev, int phy_id, int reg) | 923 | static int read_mii_word(struct net_device *netdev, int phy_id, int reg) |
| 936 | { | 924 | { |
| 937 | struct r8152 *tp = netdev_priv(netdev); | 925 | struct r8152 *tp = netdev_priv(netdev); |
| @@ -2518,24 +2506,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) | |||
| 2518 | data = ocp_reg_read(tp, OCP_POWER_CFG); | 2506 | data = ocp_reg_read(tp, OCP_POWER_CFG); |
| 2519 | data |= EN_10M_PLLOFF; | 2507 | data |= EN_10M_PLLOFF; |
| 2520 | ocp_reg_write(tp, OCP_POWER_CFG, data); | 2508 | ocp_reg_write(tp, OCP_POWER_CFG, data); |
| 2521 | data = sram_read(tp, SRAM_IMPEDANCE); | 2509 | sram_write(tp, SRAM_IMPEDANCE, 0x0b13); |
| 2522 | data &= ~RX_DRIVING_MASK; | ||
| 2523 | sram_write(tp, SRAM_IMPEDANCE, data); | ||
| 2524 | 2510 | ||
| 2525 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); | 2511 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); |
| 2526 | ocp_data |= PFM_PWM_SWITCH; | 2512 | ocp_data |= PFM_PWM_SWITCH; |
| 2527 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); | 2513 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); |
| 2528 | 2514 | ||
| 2529 | data = sram_read(tp, SRAM_LPF_CFG); | 2515 | /* Enable LPF corner auto tune */ |
| 2530 | data |= LPF_AUTO_TUNE; | 2516 | sram_write(tp, SRAM_LPF_CFG, 0xf70f); |
| 2531 | sram_write(tp, SRAM_LPF_CFG, data); | ||
| 2532 | 2517 | ||
| 2533 | data = sram_read(tp, SRAM_10M_AMP1); | 2518 | /* Adjust 10M Amplitude */ |
| 2534 | data |= GDAC_IB_UPALL; | 2519 | sram_write(tp, SRAM_10M_AMP1, 0x00af); |
| 2535 | sram_write(tp, SRAM_10M_AMP1, data); | 2520 | sram_write(tp, SRAM_10M_AMP2, 0x0208); |
| 2536 | data = sram_read(tp, SRAM_10M_AMP2); | ||
| 2537 | data |= AMP_DN; | ||
| 2538 | sram_write(tp, SRAM_10M_AMP2, data); | ||
| 2539 | 2521 | ||
| 2540 | set_bit(PHY_RESET, &tp->flags); | 2522 | set_bit(PHY_RESET, &tp->flags); |
| 2541 | } | 2523 | } |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 9a72640237cb..62b0bf4fdf6b 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
| 285 | 285 | ||
| 286 | __ath_cancel_work(sc); | 286 | __ath_cancel_work(sc); |
| 287 | 287 | ||
| 288 | disable_irq(sc->irq); | ||
| 288 | tasklet_disable(&sc->intr_tq); | 289 | tasklet_disable(&sc->intr_tq); |
| 289 | tasklet_disable(&sc->bcon_tasklet); | 290 | tasklet_disable(&sc->bcon_tasklet); |
| 290 | spin_lock_bh(&sc->sc_pcu_lock); | 291 | spin_lock_bh(&sc->sc_pcu_lock); |
| @@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
| 331 | r = -EIO; | 332 | r = -EIO; |
| 332 | 333 | ||
| 333 | out: | 334 | out: |
| 335 | enable_irq(sc->irq); | ||
| 334 | spin_unlock_bh(&sc->sc_pcu_lock); | 336 | spin_unlock_bh(&sc->sc_pcu_lock); |
| 335 | tasklet_enable(&sc->bcon_tasklet); | 337 | tasklet_enable(&sc->bcon_tasklet); |
| 336 | tasklet_enable(&sc->intr_tq); | 338 | tasklet_enable(&sc->intr_tq); |
| @@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
| 512 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) | 514 | if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags)) |
| 513 | return IRQ_NONE; | 515 | return IRQ_NONE; |
| 514 | 516 | ||
| 515 | if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) | ||
| 516 | return IRQ_NONE; | ||
| 517 | |||
| 518 | /* shared irq, not for us */ | 517 | /* shared irq, not for us */ |
| 519 | if (!ath9k_hw_intrpend(ah)) | 518 | if (!ath9k_hw_intrpend(ah)) |
| 520 | return IRQ_NONE; | 519 | return IRQ_NONE; |
| @@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
| 529 | ath9k_debug_sync_cause(sc, sync_cause); | 528 | ath9k_debug_sync_cause(sc, sync_cause); |
| 530 | status &= ah->imask; /* discard unasked-for bits */ | 529 | status &= ah->imask; /* discard unasked-for bits */ |
| 531 | 530 | ||
| 532 | if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags)) | 531 | if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) |
| 533 | return IRQ_HANDLED; | 532 | return IRQ_HANDLED; |
| 534 | 533 | ||
| 535 | /* | 534 | /* |
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 1bbe4fc47b97..660ddb1b7d8a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h | |||
| @@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag { | |||
| 246 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, | 246 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, |
| 247 | * regardless of the band or the number of the probes. FW will calculate | 247 | * regardless of the band or the number of the probes. FW will calculate |
| 248 | * the actual dwell time. | 248 | * the actual dwell time. |
| 249 | * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. | ||
| 249 | */ | 250 | */ |
| 250 | enum iwl_ucode_tlv_api { | 251 | enum iwl_ucode_tlv_api { |
| 251 | IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), | 252 | IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), |
| @@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api { | |||
| 257 | IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), | 258 | IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), |
| 258 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), | 259 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), |
| 259 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), | 260 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), |
| 261 | IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), | ||
| 260 | }; | 262 | }; |
| 261 | 263 | ||
| 262 | /** | 264 | /** |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 201846de94e7..cfc0e65b34a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | |||
| @@ -653,8 +653,11 @@ enum iwl_scan_channel_flags { | |||
| 653 | }; | 653 | }; |
| 654 | 654 | ||
| 655 | /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S | 655 | /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S |
| 656 | * @flags: enum iwl_scan_channel_flgs | 656 | * @flags: enum iwl_scan_channel_flags |
| 657 | * @non_ebs_ratio: how many regular scan iteration before EBS | 657 | * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is |
| 658 | * involved. | ||
| 659 | * 1 - EBS is disabled. | ||
| 660 | * 2 - every second scan will be full scan(and so on). | ||
| 658 | */ | 661 | */ |
| 659 | struct iwl_scan_channel_opt { | 662 | struct iwl_scan_channel_opt { |
| 660 | __le16 flags; | 663 | __le16 flags; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index e880f9d4717b..20915587c820 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
| @@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, | |||
| 3343 | msk |= mvmsta->tfd_queue_msk; | 3343 | msk |= mvmsta->tfd_queue_msk; |
| 3344 | } | 3344 | } |
| 3345 | 3345 | ||
| 3346 | if (drop) { | 3346 | msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); |
| 3347 | if (iwl_mvm_flush_tx_path(mvm, msk, true)) | ||
| 3348 | IWL_ERR(mvm, "flush request fail\n"); | ||
| 3349 | mutex_unlock(&mvm->mutex); | ||
| 3350 | } else { | ||
| 3351 | mutex_unlock(&mvm->mutex); | ||
| 3352 | 3347 | ||
| 3353 | /* this can take a while, and we may need/want other operations | 3348 | if (iwl_mvm_flush_tx_path(mvm, msk, true)) |
| 3354 | * to succeed while doing this, so do it without the mutex held | 3349 | IWL_ERR(mvm, "flush request fail\n"); |
| 3355 | */ | 3350 | mutex_unlock(&mvm->mutex); |
| 3356 | iwl_trans_wait_tx_queue_empty(mvm->trans, msk); | 3351 | |
| 3357 | } | 3352 | /* this can take a while, and we may need/want other operations |
| 3353 | * to succeed while doing this, so do it without the mutex held | ||
| 3354 | */ | ||
| 3355 | iwl_trans_wait_tx_queue_empty(mvm->trans, msk); | ||
| 3358 | } | 3356 | } |
| 3359 | 3357 | ||
| 3360 | const struct ieee80211_ops iwl_mvm_hw_ops = { | 3358 | const struct ieee80211_ops iwl_mvm_hw_ops = { |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index ec9a8e7bae1d..844bf7c4c8de 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
| @@ -72,6 +72,8 @@ | |||
| 72 | 72 | ||
| 73 | #define IWL_PLCP_QUIET_THRESH 1 | 73 | #define IWL_PLCP_QUIET_THRESH 1 |
| 74 | #define IWL_ACTIVE_QUIET_TIME 10 | 74 | #define IWL_ACTIVE_QUIET_TIME 10 |
| 75 | #define IWL_DENSE_EBS_SCAN_RATIO 5 | ||
| 76 | #define IWL_SPARSE_EBS_SCAN_RATIO 1 | ||
| 75 | 77 | ||
| 76 | struct iwl_mvm_scan_params { | 78 | struct iwl_mvm_scan_params { |
| 77 | u32 max_out_time; | 79 | u32 max_out_time; |
| @@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
| 1105 | return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, | 1107 | return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, |
| 1106 | notify); | 1108 | notify); |
| 1107 | 1109 | ||
| 1110 | if (mvm->scan_status == IWL_MVM_SCAN_NONE) | ||
| 1111 | return 0; | ||
| 1112 | |||
| 1113 | if (iwl_mvm_is_radio_killed(mvm)) | ||
| 1114 | goto out; | ||
| 1115 | |||
| 1108 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && | 1116 | if (mvm->scan_status != IWL_MVM_SCAN_SCHED && |
| 1109 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || | 1117 | (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || |
| 1110 | mvm->scan_status != IWL_MVM_SCAN_OS)) { | 1118 | mvm->scan_status != IWL_MVM_SCAN_OS)) { |
| @@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) | |||
| 1141 | if (mvm->scan_status == IWL_MVM_SCAN_OS) | 1149 | if (mvm->scan_status == IWL_MVM_SCAN_OS) |
| 1142 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); | 1150 | iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); |
| 1143 | 1151 | ||
| 1152 | out: | ||
| 1144 | mvm->scan_status = IWL_MVM_SCAN_NONE; | 1153 | mvm->scan_status = IWL_MVM_SCAN_NONE; |
| 1145 | 1154 | ||
| 1146 | if (notify) { | 1155 | if (notify) { |
| @@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, | |||
| 1297 | cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); | 1306 | cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); |
| 1298 | cmd->iter_num = cpu_to_le32(1); | 1307 | cmd->iter_num = cpu_to_le32(1); |
| 1299 | 1308 | ||
| 1300 | if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && | ||
| 1301 | mvm->last_ebs_successful) { | ||
| 1302 | cmd->channel_opt[0].flags = | ||
| 1303 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
| 1304 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
| 1305 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
| 1306 | cmd->channel_opt[1].flags = | ||
| 1307 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
| 1308 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
| 1309 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | if (iwl_mvm_rrm_scan_needed(mvm)) | 1309 | if (iwl_mvm_rrm_scan_needed(mvm)) |
| 1313 | cmd->scan_flags |= | 1310 | cmd->scan_flags |= |
| 1314 | cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); | 1311 | cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED); |
| @@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, | |||
| 1383 | cmd->schedule[1].iterations = 0; | 1380 | cmd->schedule[1].iterations = 0; |
| 1384 | cmd->schedule[1].full_scan_mul = 0; | 1381 | cmd->schedule[1].full_scan_mul = 0; |
| 1385 | 1382 | ||
| 1383 | if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS && | ||
| 1384 | mvm->last_ebs_successful) { | ||
| 1385 | cmd->channel_opt[0].flags = | ||
| 1386 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
| 1387 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
| 1388 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
| 1389 | cmd->channel_opt[0].non_ebs_ratio = | ||
| 1390 | cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); | ||
| 1391 | cmd->channel_opt[1].flags = | ||
| 1392 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
| 1393 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
| 1394 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
| 1395 | cmd->channel_opt[1].non_ebs_ratio = | ||
| 1396 | cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); | ||
| 1397 | } | ||
| 1398 | |||
| 1386 | for (i = 1; i <= req->req.n_ssids; i++) | 1399 | for (i = 1; i <= req->req.n_ssids; i++) |
| 1387 | ssid_bitmap |= BIT(i); | 1400 | ssid_bitmap |= BIT(i); |
| 1388 | 1401 | ||
| @@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, | |||
| 1483 | cmd->schedule[1].iterations = 0xff; | 1496 | cmd->schedule[1].iterations = 0xff; |
| 1484 | cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; | 1497 | cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; |
| 1485 | 1498 | ||
| 1499 | if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT && | ||
| 1500 | mvm->last_ebs_successful) { | ||
| 1501 | cmd->channel_opt[0].flags = | ||
| 1502 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
| 1503 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
| 1504 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
| 1505 | cmd->channel_opt[0].non_ebs_ratio = | ||
| 1506 | cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); | ||
| 1507 | cmd->channel_opt[1].flags = | ||
| 1508 | cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | | ||
| 1509 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | ||
| 1510 | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); | ||
| 1511 | cmd->channel_opt[1].non_ebs_ratio = | ||
| 1512 | cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); | ||
| 1513 | } | ||
| 1514 | |||
| 1486 | iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, | 1515 | iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, |
| 1487 | ssid_bitmap, cmd); | 1516 | ssid_bitmap, cmd); |
| 1488 | 1517 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4333306ccdee..c59d07567d90 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
| @@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
| 90 | 90 | ||
| 91 | if (ieee80211_is_probe_resp(fc)) | 91 | if (ieee80211_is_probe_resp(fc)) |
| 92 | tx_flags |= TX_CMD_FLG_TSF; | 92 | tx_flags |= TX_CMD_FLG_TSF; |
| 93 | else if (ieee80211_is_back_req(fc)) | ||
| 94 | tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; | ||
| 95 | 93 | ||
| 96 | if (ieee80211_has_morefrags(fc)) | 94 | if (ieee80211_has_morefrags(fc)) |
| 97 | tx_flags |= TX_CMD_FLG_MORE_FRAG; | 95 | tx_flags |= TX_CMD_FLG_MORE_FRAG; |
| @@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
| 100 | u8 *qc = ieee80211_get_qos_ctl(hdr); | 98 | u8 *qc = ieee80211_get_qos_ctl(hdr); |
| 101 | tx_cmd->tid_tspec = qc[0] & 0xf; | 99 | tx_cmd->tid_tspec = qc[0] & 0xf; |
| 102 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; | 100 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL; |
| 101 | } else if (ieee80211_is_back_req(fc)) { | ||
| 102 | struct ieee80211_bar *bar = (void *)skb->data; | ||
| 103 | u16 control = le16_to_cpu(bar->control); | ||
| 104 | |||
| 105 | tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; | ||
| 106 | tx_cmd->tid_tspec = (control & | ||
| 107 | IEEE80211_BAR_CTRL_TID_INFO_MASK) >> | ||
| 108 | IEEE80211_BAR_CTRL_TID_INFO_SHIFT; | ||
| 109 | WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); | ||
| 103 | } else { | 110 | } else { |
| 104 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; | 111 | tx_cmd->tid_tspec = IWL_TID_NON_QOS; |
| 105 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) | 112 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) |
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index ea63fbd228ed..352b4f28f82c 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c | |||
| @@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, | |||
| 114 | ret = of_overlay_apply_one(ov, tchild, child); | 114 | ret = of_overlay_apply_one(ov, tchild, child); |
| 115 | if (ret) | 115 | if (ret) |
| 116 | return ret; | 116 | return ret; |
| 117 | |||
| 118 | /* The properties are already copied, now do the child nodes */ | ||
| 119 | for_each_child_of_node(child, grandchild) { | ||
| 120 | ret = of_overlay_apply_single_device_node(ov, tchild, grandchild); | ||
| 121 | if (ret) { | ||
| 122 | pr_err("%s: Failed to apply single node @%s/%s\n", | ||
| 123 | __func__, tchild->full_name, | ||
| 124 | grandchild->name); | ||
| 125 | return ret; | ||
| 126 | } | ||
| 127 | } | ||
| 128 | } | 117 | } |
| 129 | 118 | ||
| 130 | return ret; | 119 | return ret; |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 5b33c6a21807..b0d50d70a8a1 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
| @@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev) | |||
| 188 | size = dev->coherent_dma_mask; | 188 | size = dev->coherent_dma_mask; |
| 189 | } else { | 189 | } else { |
| 190 | offset = PFN_DOWN(paddr - dma_addr); | 190 | offset = PFN_DOWN(paddr - dma_addr); |
| 191 | dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); | 191 | dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); |
| 192 | } | 192 | } |
| 193 | dev->dma_pfn_offset = offset; | 193 | dev->dma_pfn_offset = offset; |
| 194 | 194 | ||
| @@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb, | |||
| 566 | if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) | 566 | if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) |
| 567 | return NOTIFY_OK; /* not for us */ | 567 | return NOTIFY_OK; /* not for us */ |
| 568 | 568 | ||
| 569 | /* already populated? (driver using of_populate manually) */ | ||
| 570 | if (of_node_check_flag(rd->dn, OF_POPULATED)) | ||
| 571 | return NOTIFY_OK; | ||
| 572 | |||
| 569 | /* pdev_parent may be NULL when no bus platform device */ | 573 | /* pdev_parent may be NULL when no bus platform device */ |
| 570 | pdev_parent = of_find_device_by_node(rd->dn->parent); | 574 | pdev_parent = of_find_device_by_node(rd->dn->parent); |
| 571 | pdev = of_platform_device_create(rd->dn, NULL, | 575 | pdev = of_platform_device_create(rd->dn, NULL, |
| @@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb, | |||
| 581 | break; | 585 | break; |
| 582 | 586 | ||
| 583 | case OF_RECONFIG_CHANGE_REMOVE: | 587 | case OF_RECONFIG_CHANGE_REMOVE: |
| 588 | |||
| 589 | /* already depopulated? */ | ||
| 590 | if (!of_node_check_flag(rd->dn, OF_POPULATED)) | ||
| 591 | return NOTIFY_OK; | ||
| 592 | |||
| 584 | /* find our device by node */ | 593 | /* find our device by node */ |
| 585 | pdev = of_find_device_by_node(rd->dn); | 594 | pdev = of_find_device_by_node(rd->dn); |
| 586 | if (pdev == NULL) | 595 | if (pdev == NULL) |
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi index 75976da22b2e..a2b687d5f324 100644 --- a/drivers/of/unittest-data/tests-overlay.dtsi +++ b/drivers/of/unittest-data/tests-overlay.dtsi | |||
| @@ -176,5 +176,60 @@ | |||
| 176 | }; | 176 | }; |
| 177 | }; | 177 | }; |
| 178 | 178 | ||
| 179 | overlay10 { | ||
| 180 | fragment@0 { | ||
| 181 | target-path = "/testcase-data/overlay-node/test-bus"; | ||
| 182 | __overlay__ { | ||
| 183 | |||
| 184 | /* suppress DTC warning */ | ||
| 185 | #address-cells = <1>; | ||
| 186 | #size-cells = <0>; | ||
| 187 | |||
| 188 | test-selftest10 { | ||
| 189 | compatible = "selftest"; | ||
| 190 | status = "okay"; | ||
| 191 | reg = <10>; | ||
| 192 | |||
| 193 | #address-cells = <1>; | ||
| 194 | #size-cells = <0>; | ||
| 195 | |||
| 196 | test-selftest101 { | ||
| 197 | compatible = "selftest"; | ||
| 198 | status = "okay"; | ||
| 199 | reg = <1>; | ||
| 200 | }; | ||
| 201 | |||
| 202 | }; | ||
| 203 | }; | ||
| 204 | }; | ||
| 205 | }; | ||
| 206 | |||
| 207 | overlay11 { | ||
| 208 | fragment@0 { | ||
| 209 | target-path = "/testcase-data/overlay-node/test-bus"; | ||
| 210 | __overlay__ { | ||
| 211 | |||
| 212 | /* suppress DTC warning */ | ||
| 213 | #address-cells = <1>; | ||
| 214 | #size-cells = <0>; | ||
| 215 | |||
| 216 | test-selftest11 { | ||
| 217 | compatible = "selftest"; | ||
| 218 | status = "okay"; | ||
| 219 | reg = <11>; | ||
| 220 | |||
| 221 | #address-cells = <1>; | ||
| 222 | #size-cells = <0>; | ||
| 223 | |||
| 224 | test-selftest111 { | ||
| 225 | compatible = "selftest"; | ||
| 226 | status = "okay"; | ||
| 227 | reg = <1>; | ||
| 228 | }; | ||
| 229 | |||
| 230 | }; | ||
| 231 | }; | ||
| 232 | }; | ||
| 233 | }; | ||
| 179 | }; | 234 | }; |
| 180 | }; | 235 | }; |
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 844838e11ef1..41a4a138f53b 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c | |||
| @@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev) | |||
| 978 | } | 978 | } |
| 979 | 979 | ||
| 980 | dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); | 980 | dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name); |
| 981 | |||
| 982 | of_platform_populate(np, NULL, NULL, &pdev->dev); | ||
| 983 | |||
| 981 | return 0; | 984 | return 0; |
| 982 | } | 985 | } |
| 983 | 986 | ||
| @@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void) | |||
| 1385 | selftest(1, "overlay test %d passed\n", 8); | 1388 | selftest(1, "overlay test %d passed\n", 8); |
| 1386 | } | 1389 | } |
| 1387 | 1390 | ||
| 1391 | /* test insertion of a bus with parent devices */ | ||
| 1392 | static void of_selftest_overlay_10(void) | ||
| 1393 | { | ||
| 1394 | int ret; | ||
| 1395 | char *child_path; | ||
| 1396 | |||
| 1397 | /* device should disable */ | ||
| 1398 | ret = of_selftest_apply_overlay_check(10, 10, 0, 1); | ||
| 1399 | if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10)) | ||
| 1400 | return; | ||
| 1401 | |||
| 1402 | child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101", | ||
| 1403 | selftest_path(10)); | ||
| 1404 | if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10)) | ||
| 1405 | return; | ||
| 1406 | |||
| 1407 | ret = of_path_platform_device_exists(child_path); | ||
| 1408 | kfree(child_path); | ||
| 1409 | if (selftest(ret, "overlay test %d failed; no child device\n", 10)) | ||
| 1410 | return; | ||
| 1411 | } | ||
| 1412 | |||
| 1413 | /* test insertion of a bus with parent devices (and revert) */ | ||
| 1414 | static void of_selftest_overlay_11(void) | ||
| 1415 | { | ||
| 1416 | int ret; | ||
| 1417 | |||
| 1418 | /* device should disable */ | ||
| 1419 | ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1); | ||
| 1420 | if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11)) | ||
| 1421 | return; | ||
| 1422 | } | ||
| 1423 | |||
| 1388 | static void __init of_selftest_overlay(void) | 1424 | static void __init of_selftest_overlay(void) |
| 1389 | { | 1425 | { |
| 1390 | struct device_node *bus_np = NULL; | 1426 | struct device_node *bus_np = NULL; |
| @@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void) | |||
| 1433 | of_selftest_overlay_6(); | 1469 | of_selftest_overlay_6(); |
| 1434 | of_selftest_overlay_8(); | 1470 | of_selftest_overlay_8(); |
| 1435 | 1471 | ||
| 1472 | of_selftest_overlay_10(); | ||
| 1473 | of_selftest_overlay_11(); | ||
| 1474 | |||
| 1436 | out: | 1475 | out: |
| 1437 | of_node_put(bus_np); | 1476 | of_node_put(bus_np); |
| 1438 | } | 1477 | } |
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 37e71ff6408d..dceb9ddfd99a 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c | |||
| @@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus) | |||
| 694 | int i; | 694 | int i; |
| 695 | /* PCI-PCI Bridge */ | 695 | /* PCI-PCI Bridge */ |
| 696 | pci_read_bridge_bases(bus); | 696 | pci_read_bridge_bases(bus); |
| 697 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { | 697 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) |
| 698 | pci_claim_resource(bus->self, i); | 698 | pci_claim_bridge_resource(bus->self, i); |
| 699 | } | ||
| 700 | } else { | 699 | } else { |
| 701 | /* Host-PCI Bridge */ | 700 | /* Host-PCI Bridge */ |
| 702 | int err; | 701 | int err; |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 73aef51a28f0..8fb16188cd82 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
| @@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, | |||
| 228 | } | 228 | } |
| 229 | EXPORT_SYMBOL(pci_bus_alloc_resource); | 229 | EXPORT_SYMBOL(pci_bus_alloc_resource); |
| 230 | 230 | ||
| 231 | /* | ||
| 232 | * The @idx resource of @dev should be a PCI-PCI bridge window. If this | ||
| 233 | * resource fits inside a window of an upstream bridge, do nothing. If it | ||
| 234 | * overlaps an upstream window but extends outside it, clip the resource so | ||
| 235 | * it fits completely inside. | ||
| 236 | */ | ||
| 237 | bool pci_bus_clip_resource(struct pci_dev *dev, int idx) | ||
| 238 | { | ||
| 239 | struct pci_bus *bus = dev->bus; | ||
| 240 | struct resource *res = &dev->resource[idx]; | ||
| 241 | struct resource orig_res = *res; | ||
| 242 | struct resource *r; | ||
| 243 | int i; | ||
| 244 | |||
| 245 | pci_bus_for_each_resource(bus, r, i) { | ||
| 246 | resource_size_t start, end; | ||
| 247 | |||
| 248 | if (!r) | ||
| 249 | continue; | ||
| 250 | |||
| 251 | if (resource_type(res) != resource_type(r)) | ||
| 252 | continue; | ||
| 253 | |||
| 254 | start = max(r->start, res->start); | ||
| 255 | end = min(r->end, res->end); | ||
| 256 | |||
| 257 | if (start > end) | ||
| 258 | continue; /* no overlap */ | ||
| 259 | |||
| 260 | if (res->start == start && res->end == end) | ||
| 261 | return false; /* no change */ | ||
| 262 | |||
| 263 | res->start = start; | ||
| 264 | res->end = end; | ||
| 265 | dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", | ||
| 266 | &orig_res, res); | ||
| 267 | |||
| 268 | return true; | ||
| 269 | } | ||
| 270 | |||
| 271 | return false; | ||
| 272 | } | ||
| 273 | |||
| 231 | void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } | 274 | void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } |
| 232 | 275 | ||
| 233 | /** | 276 | /** |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index cab05f31223f..e9d4fd861ba1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) | |||
| 3271 | { | 3271 | { |
| 3272 | struct pci_dev *pdev; | 3272 | struct pci_dev *pdev; |
| 3273 | 3273 | ||
| 3274 | if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) | 3274 | if (pci_is_root_bus(dev->bus) || dev->subordinate || |
| 3275 | !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) | ||
| 3275 | return -ENOTTY; | 3276 | return -ENOTTY; |
| 3276 | 3277 | ||
| 3277 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) | 3278 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) |
| @@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) | |||
| 3305 | { | 3306 | { |
| 3306 | struct pci_dev *pdev; | 3307 | struct pci_dev *pdev; |
| 3307 | 3308 | ||
| 3308 | if (dev->subordinate || !dev->slot) | 3309 | if (dev->subordinate || !dev->slot || |
| 3310 | dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) | ||
| 3309 | return -ENOTTY; | 3311 | return -ENOTTY; |
| 3310 | 3312 | ||
| 3311 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) | 3313 | list_for_each_entry(pdev, &dev->bus->devices, bus_list) |
| @@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev) | |||
| 3557 | } | 3559 | } |
| 3558 | EXPORT_SYMBOL_GPL(pci_try_reset_function); | 3560 | EXPORT_SYMBOL_GPL(pci_try_reset_function); |
| 3559 | 3561 | ||
| 3562 | /* Do any devices on or below this bus prevent a bus reset? */ | ||
| 3563 | static bool pci_bus_resetable(struct pci_bus *bus) | ||
| 3564 | { | ||
| 3565 | struct pci_dev *dev; | ||
| 3566 | |||
| 3567 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
| 3568 | if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || | ||
| 3569 | (dev->subordinate && !pci_bus_resetable(dev->subordinate))) | ||
| 3570 | return false; | ||
| 3571 | } | ||
| 3572 | |||
| 3573 | return true; | ||
| 3574 | } | ||
| 3575 | |||
| 3560 | /* Lock devices from the top of the tree down */ | 3576 | /* Lock devices from the top of the tree down */ |
| 3561 | static void pci_bus_lock(struct pci_bus *bus) | 3577 | static void pci_bus_lock(struct pci_bus *bus) |
| 3562 | { | 3578 | { |
| @@ -3607,6 +3623,22 @@ unlock: | |||
| 3607 | return 0; | 3623 | return 0; |
| 3608 | } | 3624 | } |
| 3609 | 3625 | ||
| 3626 | /* Do any devices on or below this slot prevent a bus reset? */ | ||
| 3627 | static bool pci_slot_resetable(struct pci_slot *slot) | ||
| 3628 | { | ||
| 3629 | struct pci_dev *dev; | ||
| 3630 | |||
| 3631 | list_for_each_entry(dev, &slot->bus->devices, bus_list) { | ||
| 3632 | if (!dev->slot || dev->slot != slot) | ||
| 3633 | continue; | ||
| 3634 | if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || | ||
| 3635 | (dev->subordinate && !pci_bus_resetable(dev->subordinate))) | ||
| 3636 | return false; | ||
| 3637 | } | ||
| 3638 | |||
| 3639 | return true; | ||
| 3640 | } | ||
| 3641 | |||
| 3610 | /* Lock devices from the top of the tree down */ | 3642 | /* Lock devices from the top of the tree down */ |
| 3611 | static void pci_slot_lock(struct pci_slot *slot) | 3643 | static void pci_slot_lock(struct pci_slot *slot) |
| 3612 | { | 3644 | { |
| @@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe) | |||
| 3728 | { | 3760 | { |
| 3729 | int rc; | 3761 | int rc; |
| 3730 | 3762 | ||
| 3731 | if (!slot) | 3763 | if (!slot || !pci_slot_resetable(slot)) |
| 3732 | return -ENOTTY; | 3764 | return -ENOTTY; |
| 3733 | 3765 | ||
| 3734 | if (!probe) | 3766 | if (!probe) |
| @@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot); | |||
| 3820 | 3852 | ||
| 3821 | static int pci_bus_reset(struct pci_bus *bus, int probe) | 3853 | static int pci_bus_reset(struct pci_bus *bus, int probe) |
| 3822 | { | 3854 | { |
| 3823 | if (!bus->self) | 3855 | if (!bus->self || !pci_bus_resetable(bus)) |
| 3824 | return -ENOTTY; | 3856 | return -ENOTTY; |
| 3825 | 3857 | ||
| 3826 | if (probe) | 3858 | if (probe) |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 8aff29a804ff..d54632a1db43 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, | |||
| 208 | void __pci_bus_assign_resources(const struct pci_bus *bus, | 208 | void __pci_bus_assign_resources(const struct pci_bus *bus, |
| 209 | struct list_head *realloc_head, | 209 | struct list_head *realloc_head, |
| 210 | struct list_head *fail_head); | 210 | struct list_head *fail_head); |
| 211 | bool pci_bus_clip_resource(struct pci_dev *dev, int idx); | ||
| 211 | 212 | ||
| 212 | /** | 213 | /** |
| 213 | * pci_ari_enabled - query ARI forwarding status | 214 | * pci_ari_enabled - query ARI forwarding status |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ed6f89b6efe5..e52356aa09b8 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -3028,6 +3028,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, | |||
| 3028 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, | 3028 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, |
| 3029 | quirk_broken_intx_masking); | 3029 | quirk_broken_intx_masking); |
| 3030 | 3030 | ||
| 3031 | static void quirk_no_bus_reset(struct pci_dev *dev) | ||
| 3032 | { | ||
| 3033 | dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; | ||
| 3034 | } | ||
| 3035 | |||
| 3036 | /* | ||
| 3037 | * Atheros AR93xx chips do not behave after a bus reset. The device will | ||
| 3038 | * throw a Link Down error on AER-capable systems and regardless of AER, | ||
| 3039 | * config space of the device is never accessible again and typically | ||
| 3040 | * causes the system to hang or reset when access is attempted. | ||
| 3041 | * http://www.spinics.net/lists/linux-pci/msg34797.html | ||
| 3042 | */ | ||
| 3043 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); | ||
| 3044 | |||
| 3031 | #ifdef CONFIG_ACPI | 3045 | #ifdef CONFIG_ACPI |
| 3032 | /* | 3046 | /* |
| 3033 | * Apple: Shutdown Cactus Ridge Thunderbolt controller. | 3047 | * Apple: Shutdown Cactus Ridge Thunderbolt controller. |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 0482235eee92..e3e17f3c0f0f 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
| @@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus); | |||
| 530 | config space writes, so it's quite possible that an I/O window of | 530 | config space writes, so it's quite possible that an I/O window of |
| 531 | the bridge will have some undesirable address (e.g. 0) after the | 531 | the bridge will have some undesirable address (e.g. 0) after the |
| 532 | first write. Ditto 64-bit prefetchable MMIO. */ | 532 | first write. Ditto 64-bit prefetchable MMIO. */ |
| 533 | static void pci_setup_bridge_io(struct pci_bus *bus) | 533 | static void pci_setup_bridge_io(struct pci_dev *bridge) |
| 534 | { | 534 | { |
| 535 | struct pci_dev *bridge = bus->self; | ||
| 536 | struct resource *res; | 535 | struct resource *res; |
| 537 | struct pci_bus_region region; | 536 | struct pci_bus_region region; |
| 538 | unsigned long io_mask; | 537 | unsigned long io_mask; |
| @@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus) | |||
| 545 | io_mask = PCI_IO_1K_RANGE_MASK; | 544 | io_mask = PCI_IO_1K_RANGE_MASK; |
| 546 | 545 | ||
| 547 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | 546 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ |
| 548 | res = bus->resource[0]; | 547 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0]; |
| 549 | pcibios_resource_to_bus(bridge->bus, ®ion, res); | 548 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
| 550 | if (res->flags & IORESOURCE_IO) { | 549 | if (res->flags & IORESOURCE_IO) { |
| 551 | pci_read_config_word(bridge, PCI_IO_BASE, &l); | 550 | pci_read_config_word(bridge, PCI_IO_BASE, &l); |
| @@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus) | |||
| 568 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | 567 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); |
| 569 | } | 568 | } |
| 570 | 569 | ||
| 571 | static void pci_setup_bridge_mmio(struct pci_bus *bus) | 570 | static void pci_setup_bridge_mmio(struct pci_dev *bridge) |
| 572 | { | 571 | { |
| 573 | struct pci_dev *bridge = bus->self; | ||
| 574 | struct resource *res; | 572 | struct resource *res; |
| 575 | struct pci_bus_region region; | 573 | struct pci_bus_region region; |
| 576 | u32 l; | 574 | u32 l; |
| 577 | 575 | ||
| 578 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ | 576 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ |
| 579 | res = bus->resource[1]; | 577 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1]; |
| 580 | pcibios_resource_to_bus(bridge->bus, ®ion, res); | 578 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
| 581 | if (res->flags & IORESOURCE_MEM) { | 579 | if (res->flags & IORESOURCE_MEM) { |
| 582 | l = (region.start >> 16) & 0xfff0; | 580 | l = (region.start >> 16) & 0xfff0; |
| @@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus) | |||
| 588 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); | 586 | pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); |
| 589 | } | 587 | } |
| 590 | 588 | ||
| 591 | static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) | 589 | static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) |
| 592 | { | 590 | { |
| 593 | struct pci_dev *bridge = bus->self; | ||
| 594 | struct resource *res; | 591 | struct resource *res; |
| 595 | struct pci_bus_region region; | 592 | struct pci_bus_region region; |
| 596 | u32 l, bu, lu; | 593 | u32 l, bu, lu; |
| @@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus) | |||
| 602 | 599 | ||
| 603 | /* Set up PREF base/limit. */ | 600 | /* Set up PREF base/limit. */ |
| 604 | bu = lu = 0; | 601 | bu = lu = 0; |
| 605 | res = bus->resource[2]; | 602 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2]; |
| 606 | pcibios_resource_to_bus(bridge->bus, ®ion, res); | 603 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
| 607 | if (res->flags & IORESOURCE_PREFETCH) { | 604 | if (res->flags & IORESOURCE_PREFETCH) { |
| 608 | l = (region.start >> 16) & 0xfff0; | 605 | l = (region.start >> 16) & 0xfff0; |
| @@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) | |||
| 630 | &bus->busn_res); | 627 | &bus->busn_res); |
| 631 | 628 | ||
| 632 | if (type & IORESOURCE_IO) | 629 | if (type & IORESOURCE_IO) |
| 633 | pci_setup_bridge_io(bus); | 630 | pci_setup_bridge_io(bridge); |
| 634 | 631 | ||
| 635 | if (type & IORESOURCE_MEM) | 632 | if (type & IORESOURCE_MEM) |
| 636 | pci_setup_bridge_mmio(bus); | 633 | pci_setup_bridge_mmio(bridge); |
| 637 | 634 | ||
| 638 | if (type & IORESOURCE_PREFETCH) | 635 | if (type & IORESOURCE_PREFETCH) |
| 639 | pci_setup_bridge_mmio_pref(bus); | 636 | pci_setup_bridge_mmio_pref(bridge); |
| 640 | 637 | ||
| 641 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | 638 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); |
| 642 | } | 639 | } |
| @@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus) | |||
| 649 | __pci_setup_bridge(bus, type); | 646 | __pci_setup_bridge(bus, type); |
| 650 | } | 647 | } |
| 651 | 648 | ||
| 649 | |||
| 650 | int pci_claim_bridge_resource(struct pci_dev *bridge, int i) | ||
| 651 | { | ||
| 652 | if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END) | ||
| 653 | return 0; | ||
| 654 | |||
| 655 | if (pci_claim_resource(bridge, i) == 0) | ||
| 656 | return 0; /* claimed the window */ | ||
| 657 | |||
| 658 | if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI) | ||
| 659 | return 0; | ||
| 660 | |||
| 661 | if (!pci_bus_clip_resource(bridge, i)) | ||
| 662 | return -EINVAL; /* clipping didn't change anything */ | ||
| 663 | |||
| 664 | switch (i - PCI_BRIDGE_RESOURCES) { | ||
| 665 | case 0: | ||
| 666 | pci_setup_bridge_io(bridge); | ||
| 667 | break; | ||
| 668 | case 1: | ||
| 669 | pci_setup_bridge_mmio(bridge); | ||
| 670 | break; | ||
| 671 | case 2: | ||
| 672 | pci_setup_bridge_mmio_pref(bridge); | ||
| 673 | break; | ||
| 674 | default: | ||
| 675 | return -EINVAL; | ||
| 676 | } | ||
| 677 | |||
| 678 | if (pci_claim_resource(bridge, i) == 0) | ||
| 679 | return 0; /* claimed a smaller window */ | ||
| 680 | |||
| 681 | return -EINVAL; | ||
| 682 | } | ||
| 683 | |||
| 652 | /* Check whether the bridge supports optional I/O and | 684 | /* Check whether the bridge supports optional I/O and |
| 653 | prefetchable memory ranges. If not, the respective | 685 | prefetchable memory ranges. If not, the respective |
| 654 | base/limit registers must be read-only and read as 0. */ | 686 | base/limit registers must be read-only and read as 0. */ |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index e4f65510c87e..89dca77ca038 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
| @@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev) | |||
| 1801 | if (pctldev == NULL) | 1801 | if (pctldev == NULL) |
| 1802 | return; | 1802 | return; |
| 1803 | 1803 | ||
| 1804 | mutex_lock(&pinctrldev_list_mutex); | ||
| 1805 | mutex_lock(&pctldev->mutex); | 1804 | mutex_lock(&pctldev->mutex); |
| 1806 | |||
| 1807 | pinctrl_remove_device_debugfs(pctldev); | 1805 | pinctrl_remove_device_debugfs(pctldev); |
| 1806 | mutex_unlock(&pctldev->mutex); | ||
| 1808 | 1807 | ||
| 1809 | if (!IS_ERR(pctldev->p)) | 1808 | if (!IS_ERR(pctldev->p)) |
| 1810 | pinctrl_put(pctldev->p); | 1809 | pinctrl_put(pctldev->p); |
| 1811 | 1810 | ||
| 1811 | mutex_lock(&pinctrldev_list_mutex); | ||
| 1812 | mutex_lock(&pctldev->mutex); | ||
| 1812 | /* TODO: check that no pinmuxes are still active? */ | 1813 | /* TODO: check that no pinmuxes are still active? */ |
| 1813 | list_del(&pctldev->node); | 1814 | list_del(&pctldev->node); |
| 1814 | /* Destroy descriptor tree */ | 1815 | /* Destroy descriptor tree */ |
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index dfd021e8268f..f4cd0b9b2438 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c | |||
| @@ -177,7 +177,7 @@ struct at91_pinctrl { | |||
| 177 | struct device *dev; | 177 | struct device *dev; |
| 178 | struct pinctrl_dev *pctl; | 178 | struct pinctrl_dev *pctl; |
| 179 | 179 | ||
| 180 | int nbanks; | 180 | int nactive_banks; |
| 181 | 181 | ||
| 182 | uint32_t *mux_mask; | 182 | uint32_t *mux_mask; |
| 183 | int nmux; | 183 | int nmux; |
| @@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name, | |||
| 653 | int mux; | 653 | int mux; |
| 654 | 654 | ||
| 655 | /* check if it's a valid config */ | 655 | /* check if it's a valid config */ |
| 656 | if (pin->bank >= info->nbanks) { | 656 | if (pin->bank >= gpio_banks) { |
| 657 | dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", | 657 | dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n", |
| 658 | name, index, pin->bank, info->nbanks); | 658 | name, index, pin->bank, gpio_banks); |
| 659 | return -EINVAL; | 659 | return -EINVAL; |
| 660 | } | 660 | } |
| 661 | 661 | ||
| 662 | if (!gpio_chips[pin->bank]) { | ||
| 663 | dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n", | ||
| 664 | name, index, pin->bank); | ||
| 665 | return -ENXIO; | ||
| 666 | } | ||
| 667 | |||
| 662 | if (pin->pin >= MAX_NB_GPIO_PER_BANK) { | 668 | if (pin->pin >= MAX_NB_GPIO_PER_BANK) { |
| 663 | dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", | 669 | dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n", |
| 664 | name, index, pin->pin, MAX_NB_GPIO_PER_BANK); | 670 | name, index, pin->pin, MAX_NB_GPIO_PER_BANK); |
| @@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info, | |||
| 981 | 987 | ||
| 982 | for_each_child_of_node(np, child) { | 988 | for_each_child_of_node(np, child) { |
| 983 | if (of_device_is_compatible(child, gpio_compat)) { | 989 | if (of_device_is_compatible(child, gpio_compat)) { |
| 984 | info->nbanks++; | 990 | if (of_device_is_available(child)) |
| 991 | info->nactive_banks++; | ||
| 985 | } else { | 992 | } else { |
| 986 | info->nfunctions++; | 993 | info->nfunctions++; |
| 987 | info->ngroups += of_get_child_count(child); | 994 | info->ngroups += of_get_child_count(child); |
| @@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info, | |||
| 1003 | } | 1010 | } |
| 1004 | 1011 | ||
| 1005 | size /= sizeof(*list); | 1012 | size /= sizeof(*list); |
| 1006 | if (!size || size % info->nbanks) { | 1013 | if (!size || size % gpio_banks) { |
| 1007 | dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks); | 1014 | dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks); |
| 1008 | return -EINVAL; | 1015 | return -EINVAL; |
| 1009 | } | 1016 | } |
| 1010 | info->nmux = size / info->nbanks; | 1017 | info->nmux = size / gpio_banks; |
| 1011 | 1018 | ||
| 1012 | info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); | 1019 | info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL); |
| 1013 | if (!info->mux_mask) { | 1020 | if (!info->mux_mask) { |
| @@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
| 1131 | of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; | 1138 | of_match_device(at91_pinctrl_of_match, &pdev->dev)->data; |
| 1132 | at91_pinctrl_child_count(info, np); | 1139 | at91_pinctrl_child_count(info, np); |
| 1133 | 1140 | ||
| 1134 | if (info->nbanks < 1) { | 1141 | if (gpio_banks < 1) { |
| 1135 | dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); | 1142 | dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n"); |
| 1136 | return -EINVAL; | 1143 | return -EINVAL; |
| 1137 | } | 1144 | } |
| @@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
| 1144 | 1151 | ||
| 1145 | dev_dbg(&pdev->dev, "mux-mask\n"); | 1152 | dev_dbg(&pdev->dev, "mux-mask\n"); |
| 1146 | tmp = info->mux_mask; | 1153 | tmp = info->mux_mask; |
| 1147 | for (i = 0; i < info->nbanks; i++) { | 1154 | for (i = 0; i < gpio_banks; i++) { |
| 1148 | for (j = 0; j < info->nmux; j++, tmp++) { | 1155 | for (j = 0; j < info->nmux; j++, tmp++) { |
| 1149 | dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); | 1156 | dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]); |
| 1150 | } | 1157 | } |
| @@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev, | |||
| 1162 | if (!info->groups) | 1169 | if (!info->groups) |
| 1163 | return -ENOMEM; | 1170 | return -ENOMEM; |
| 1164 | 1171 | ||
| 1165 | dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks); | 1172 | dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks); |
| 1166 | dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); | 1173 | dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions); |
| 1167 | dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); | 1174 | dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups); |
| 1168 | 1175 | ||
| @@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
| 1185 | { | 1192 | { |
| 1186 | struct at91_pinctrl *info; | 1193 | struct at91_pinctrl *info; |
| 1187 | struct pinctrl_pin_desc *pdesc; | 1194 | struct pinctrl_pin_desc *pdesc; |
| 1188 | int ret, i, j, k; | 1195 | int ret, i, j, k, ngpio_chips_enabled = 0; |
| 1189 | 1196 | ||
| 1190 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); | 1197 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); |
| 1191 | if (!info) | 1198 | if (!info) |
| @@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
| 1200 | * to obtain references to the struct gpio_chip * for them, and we | 1207 | * to obtain references to the struct gpio_chip * for them, and we |
| 1201 | * need this to proceed. | 1208 | * need this to proceed. |
| 1202 | */ | 1209 | */ |
| 1203 | for (i = 0; i < info->nbanks; i++) { | 1210 | for (i = 0; i < gpio_banks; i++) |
| 1204 | if (!gpio_chips[i]) { | 1211 | if (gpio_chips[i]) |
| 1205 | dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); | 1212 | ngpio_chips_enabled++; |
| 1206 | devm_kfree(&pdev->dev, info); | 1213 | |
| 1207 | return -EPROBE_DEFER; | 1214 | if (ngpio_chips_enabled < info->nactive_banks) { |
| 1208 | } | 1215 | dev_warn(&pdev->dev, |
| 1216 | "All GPIO chips are not registered yet (%d/%d)\n", | ||
| 1217 | ngpio_chips_enabled, info->nactive_banks); | ||
| 1218 | devm_kfree(&pdev->dev, info); | ||
| 1219 | return -EPROBE_DEFER; | ||
| 1209 | } | 1220 | } |
| 1210 | 1221 | ||
| 1211 | at91_pinctrl_desc.name = dev_name(&pdev->dev); | 1222 | at91_pinctrl_desc.name = dev_name(&pdev->dev); |
| 1212 | at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK; | 1223 | at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK; |
| 1213 | at91_pinctrl_desc.pins = pdesc = | 1224 | at91_pinctrl_desc.pins = pdesc = |
| 1214 | devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); | 1225 | devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL); |
| 1215 | 1226 | ||
| 1216 | if (!at91_pinctrl_desc.pins) | 1227 | if (!at91_pinctrl_desc.pins) |
| 1217 | return -ENOMEM; | 1228 | return -ENOMEM; |
| 1218 | 1229 | ||
| 1219 | for (i = 0 , k = 0; i < info->nbanks; i++) { | 1230 | for (i = 0, k = 0; i < gpio_banks; i++) { |
| 1220 | for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { | 1231 | for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) { |
| 1221 | pdesc->number = k; | 1232 | pdesc->number = k; |
| 1222 | pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); | 1233 | pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j); |
| @@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev) | |||
| 1234 | } | 1245 | } |
| 1235 | 1246 | ||
| 1236 | /* We will handle a range of GPIO pins */ | 1247 | /* We will handle a range of GPIO pins */ |
| 1237 | for (i = 0; i < info->nbanks; i++) | 1248 | for (i = 0; i < gpio_banks; i++) |
| 1238 | pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); | 1249 | if (gpio_chips[i]) |
| 1250 | pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range); | ||
| 1239 | 1251 | ||
| 1240 | dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); | 1252 | dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n"); |
| 1241 | 1253 | ||
| @@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
| 1613 | static int at91_gpio_of_irq_setup(struct platform_device *pdev, | 1625 | static int at91_gpio_of_irq_setup(struct platform_device *pdev, |
| 1614 | struct at91_gpio_chip *at91_gpio) | 1626 | struct at91_gpio_chip *at91_gpio) |
| 1615 | { | 1627 | { |
| 1628 | struct gpio_chip *gpiochip_prev = NULL; | ||
| 1616 | struct at91_gpio_chip *prev = NULL; | 1629 | struct at91_gpio_chip *prev = NULL; |
| 1617 | struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); | 1630 | struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq); |
| 1618 | int ret; | 1631 | int ret, i; |
| 1619 | 1632 | ||
| 1620 | at91_gpio->pioc_hwirq = irqd_to_hwirq(d); | 1633 | at91_gpio->pioc_hwirq = irqd_to_hwirq(d); |
| 1621 | 1634 | ||
| @@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev, | |||
| 1641 | return ret; | 1654 | return ret; |
| 1642 | } | 1655 | } |
| 1643 | 1656 | ||
| 1644 | /* Setup chained handler */ | ||
| 1645 | if (at91_gpio->pioc_idx) | ||
| 1646 | prev = gpio_chips[at91_gpio->pioc_idx - 1]; | ||
| 1647 | |||
| 1648 | /* The top level handler handles one bank of GPIOs, except | 1657 | /* The top level handler handles one bank of GPIOs, except |
| 1649 | * on some SoC it can handle up to three... | 1658 | * on some SoC it can handle up to three... |
| 1650 | * We only set up the handler for the first of the list. | 1659 | * We only set up the handler for the first of the list. |
| 1651 | */ | 1660 | */ |
| 1652 | if (prev && prev->next == at91_gpio) | 1661 | gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq); |
| 1662 | if (!gpiochip_prev) { | ||
| 1663 | /* Then register the chain on the parent IRQ */ | ||
| 1664 | gpiochip_set_chained_irqchip(&at91_gpio->chip, | ||
| 1665 | &gpio_irqchip, | ||
| 1666 | at91_gpio->pioc_virq, | ||
| 1667 | gpio_irq_handler); | ||
| 1653 | return 0; | 1668 | return 0; |
| 1669 | } | ||
| 1654 | 1670 | ||
| 1655 | /* Then register the chain on the parent IRQ */ | 1671 | prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip); |
| 1656 | gpiochip_set_chained_irqchip(&at91_gpio->chip, | ||
| 1657 | &gpio_irqchip, | ||
| 1658 | at91_gpio->pioc_virq, | ||
| 1659 | gpio_irq_handler); | ||
| 1660 | 1672 | ||
| 1661 | return 0; | 1673 | /* we can only have 2 banks before */ |
| 1674 | for (i = 0; i < 2; i++) { | ||
| 1675 | if (prev->next) { | ||
| 1676 | prev = prev->next; | ||
| 1677 | } else { | ||
| 1678 | prev->next = at91_gpio; | ||
| 1679 | return 0; | ||
| 1680 | } | ||
| 1681 | } | ||
| 1682 | |||
| 1683 | return -EINVAL; | ||
| 1662 | } | 1684 | } |
| 1663 | 1685 | ||
| 1664 | /* This structure is replicated for each GPIO block allocated at probe time */ | 1686 | /* This structure is replicated for each GPIO block allocated at probe time */ |
| @@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = { | |||
| 1675 | .ngpio = MAX_NB_GPIO_PER_BANK, | 1697 | .ngpio = MAX_NB_GPIO_PER_BANK, |
| 1676 | }; | 1698 | }; |
| 1677 | 1699 | ||
| 1678 | static void at91_gpio_probe_fixup(void) | ||
| 1679 | { | ||
| 1680 | unsigned i; | ||
| 1681 | struct at91_gpio_chip *at91_gpio, *last = NULL; | ||
| 1682 | |||
| 1683 | for (i = 0; i < gpio_banks; i++) { | ||
| 1684 | at91_gpio = gpio_chips[i]; | ||
| 1685 | |||
| 1686 | /* | ||
| 1687 | * GPIO controller are grouped on some SoC: | ||
| 1688 | * PIOC, PIOD and PIOE can share the same IRQ line | ||
| 1689 | */ | ||
| 1690 | if (last && last->pioc_virq == at91_gpio->pioc_virq) | ||
| 1691 | last->next = at91_gpio; | ||
| 1692 | last = at91_gpio; | ||
| 1693 | } | ||
| 1694 | } | ||
| 1695 | |||
| 1696 | static struct of_device_id at91_gpio_of_match[] = { | 1700 | static struct of_device_id at91_gpio_of_match[] = { |
| 1697 | { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, | 1701 | { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, |
| 1698 | { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, | 1702 | { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, |
| @@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev) | |||
| 1805 | gpio_chips[alias_idx] = at91_chip; | 1809 | gpio_chips[alias_idx] = at91_chip; |
| 1806 | gpio_banks = max(gpio_banks, alias_idx + 1); | 1810 | gpio_banks = max(gpio_banks, alias_idx + 1); |
| 1807 | 1811 | ||
| 1808 | at91_gpio_probe_fixup(); | ||
| 1809 | |||
| 1810 | ret = at91_gpio_of_irq_setup(pdev, at91_chip); | 1812 | ret = at91_gpio_of_irq_setup(pdev, at91_chip); |
| 1811 | if (ret) | 1813 | if (ret) |
| 1812 | goto irq_setup_err; | 1814 | goto irq_setup_err; |
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 3c22dbebc80f..43eacc924b7e 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c | |||
| @@ -1398,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
| 1398 | { | 1398 | { |
| 1399 | struct irq_chip *chip = irq_get_chip(irq); | 1399 | struct irq_chip *chip = irq_get_chip(irq); |
| 1400 | struct rockchip_pin_bank *bank = irq_get_handler_data(irq); | 1400 | struct rockchip_pin_bank *bank = irq_get_handler_data(irq); |
| 1401 | u32 polarity = 0, data = 0; | ||
| 1402 | u32 pend; | 1401 | u32 pend; |
| 1403 | bool edge_changed = false; | ||
| 1404 | unsigned long flags; | ||
| 1405 | 1402 | ||
| 1406 | dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name); | 1403 | dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name); |
| 1407 | 1404 | ||
| @@ -1409,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
| 1409 | 1406 | ||
| 1410 | pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS); | 1407 | pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS); |
| 1411 | 1408 | ||
| 1412 | if (bank->toggle_edge_mode) { | ||
| 1413 | polarity = readl_relaxed(bank->reg_base + | ||
| 1414 | GPIO_INT_POLARITY); | ||
| 1415 | data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT); | ||
| 1416 | } | ||
| 1417 | |||
| 1418 | while (pend) { | 1409 | while (pend) { |
| 1419 | unsigned int virq; | 1410 | unsigned int virq; |
| 1420 | 1411 | ||
| @@ -1434,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
| 1434 | * needs manual intervention. | 1425 | * needs manual intervention. |
| 1435 | */ | 1426 | */ |
| 1436 | if (bank->toggle_edge_mode & BIT(irq)) { | 1427 | if (bank->toggle_edge_mode & BIT(irq)) { |
| 1437 | if (data & BIT(irq)) | 1428 | u32 data, data_old, polarity; |
| 1438 | polarity &= ~BIT(irq); | 1429 | unsigned long flags; |
| 1439 | else | ||
| 1440 | polarity |= BIT(irq); | ||
| 1441 | 1430 | ||
| 1442 | edge_changed = true; | 1431 | data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT); |
| 1443 | } | 1432 | do { |
| 1433 | spin_lock_irqsave(&bank->slock, flags); | ||
| 1444 | 1434 | ||
| 1445 | generic_handle_irq(virq); | 1435 | polarity = readl_relaxed(bank->reg_base + |
| 1446 | } | 1436 | GPIO_INT_POLARITY); |
| 1437 | if (data & BIT(irq)) | ||
| 1438 | polarity &= ~BIT(irq); | ||
| 1439 | else | ||
| 1440 | polarity |= BIT(irq); | ||
| 1441 | writel(polarity, | ||
| 1442 | bank->reg_base + GPIO_INT_POLARITY); | ||
| 1447 | 1443 | ||
| 1448 | if (bank->toggle_edge_mode && edge_changed) { | 1444 | spin_unlock_irqrestore(&bank->slock, flags); |
| 1449 | /* Interrupt params should only be set with ints disabled */ | ||
| 1450 | spin_lock_irqsave(&bank->slock, flags); | ||
| 1451 | 1445 | ||
| 1452 | data = readl_relaxed(bank->reg_base + GPIO_INTEN); | 1446 | data_old = data; |
| 1453 | writel_relaxed(0, bank->reg_base + GPIO_INTEN); | 1447 | data = readl_relaxed(bank->reg_base + |
| 1454 | writel(polarity, bank->reg_base + GPIO_INT_POLARITY); | 1448 | GPIO_EXT_PORT); |
| 1455 | writel(data, bank->reg_base + GPIO_INTEN); | 1449 | } while ((data & BIT(irq)) != (data_old & BIT(irq))); |
| 1450 | } | ||
| 1456 | 1451 | ||
| 1457 | spin_unlock_irqrestore(&bank->slock, flags); | 1452 | generic_handle_irq(virq); |
| 1458 | } | 1453 | } |
| 1459 | 1454 | ||
| 1460 | chained_irq_exit(chip, desc); | 1455 | chained_irq_exit(chip, desc); |
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index c5cef59f5965..779950c62e53 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c | |||
| @@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev) | |||
| 798 | 798 | ||
| 799 | /* load the gpio chip */ | 799 | /* load the gpio chip */ |
| 800 | xway_chip.dev = &pdev->dev; | 800 | xway_chip.dev = &pdev->dev; |
| 801 | of_gpiochip_add(&xway_chip); | ||
| 802 | ret = gpiochip_add(&xway_chip); | 801 | ret = gpiochip_add(&xway_chip); |
| 803 | if (ret) { | 802 | if (ret) { |
| 804 | of_gpiochip_remove(&xway_chip); | ||
| 805 | dev_err(&pdev->dev, "Failed to register gpio chip\n"); | 803 | dev_err(&pdev->dev, "Failed to register gpio chip\n"); |
| 806 | return ret; | 804 | return ret; |
| 807 | } | 805 | } |
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index e730935fa457..ed7017df065d 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c | |||
| @@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, | |||
| 865 | 865 | ||
| 866 | static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) | 866 | static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) |
| 867 | { | 867 | { |
| 868 | int i = 0; | 868 | int i; |
| 869 | const struct msm_function *func = pctrl->soc->functions; | 869 | const struct msm_function *func = pctrl->soc->functions; |
| 870 | 870 | ||
| 871 | for (; i <= pctrl->soc->nfunctions; i++) | 871 | for (i = 0; i < pctrl->soc->nfunctions; i++) |
| 872 | if (!strcmp(func[i].name, "ps_hold")) { | 872 | if (!strcmp(func[i].name, "ps_hold")) { |
| 873 | pctrl->restart_nb.notifier_call = msm_ps_hold_restart; | 873 | pctrl->restart_nb.notifier_call = msm_ps_hold_restart; |
| 874 | pctrl->restart_nb.priority = 128; | 874 | pctrl->restart_nb.priority = 128; |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 9411eae39a4e..3d21efe11d7b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
| @@ -2,11 +2,9 @@ | |||
| 2 | * Driver for Dell laptop extras | 2 | * Driver for Dell laptop extras |
| 3 | * | 3 | * |
| 4 | * Copyright (c) Red Hat <mjg@redhat.com> | 4 | * Copyright (c) Red Hat <mjg@redhat.com> |
| 5 | * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com> | ||
| 6 | * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com> | ||
| 7 | * | 5 | * |
| 8 | * Based on documentation in the libsmbios package: | 6 | * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell |
| 9 | * Copyright (C) 2005-2014 Dell Inc. | 7 | * Inc. |
| 10 | * | 8 | * |
| 11 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
| @@ -34,13 +32,6 @@ | |||
| 34 | #include "../../firmware/dcdbas.h" | 32 | #include "../../firmware/dcdbas.h" |
| 35 | 33 | ||
| 36 | #define BRIGHTNESS_TOKEN 0x7d | 34 | #define BRIGHTNESS_TOKEN 0x7d |
| 37 | #define KBD_LED_OFF_TOKEN 0x01E1 | ||
| 38 | #define KBD_LED_ON_TOKEN 0x01E2 | ||
| 39 | #define KBD_LED_AUTO_TOKEN 0x01E3 | ||
| 40 | #define KBD_LED_AUTO_25_TOKEN 0x02EA | ||
| 41 | #define KBD_LED_AUTO_50_TOKEN 0x02EB | ||
| 42 | #define KBD_LED_AUTO_75_TOKEN 0x02EC | ||
| 43 | #define KBD_LED_AUTO_100_TOKEN 0x02F6 | ||
| 44 | 35 | ||
| 45 | /* This structure will be modified by the firmware when we enter | 36 | /* This structure will be modified by the firmware when we enter |
| 46 | * system management mode, hence the volatiles */ | 37 | * system management mode, hence the volatiles */ |
| @@ -71,13 +62,6 @@ struct calling_interface_structure { | |||
| 71 | 62 | ||
| 72 | struct quirk_entry { | 63 | struct quirk_entry { |
| 73 | u8 touchpad_led; | 64 | u8 touchpad_led; |
| 74 | |||
| 75 | int needs_kbd_timeouts; | ||
| 76 | /* | ||
| 77 | * Ordered list of timeouts expressed in seconds. | ||
| 78 | * The list must end with -1 | ||
| 79 | */ | ||
| 80 | int kbd_timeouts[]; | ||
| 81 | }; | 65 | }; |
| 82 | 66 | ||
| 83 | static struct quirk_entry *quirks; | 67 | static struct quirk_entry *quirks; |
| @@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi) | |||
| 92 | return 1; | 76 | return 1; |
| 93 | } | 77 | } |
| 94 | 78 | ||
| 95 | /* | ||
| 96 | * These values come from Windows utility provided by Dell. If any other value | ||
| 97 | * is used then BIOS silently set timeout to 0 without any error message. | ||
| 98 | */ | ||
| 99 | static struct quirk_entry quirk_dell_xps13_9333 = { | ||
| 100 | .needs_kbd_timeouts = 1, | ||
| 101 | .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, | ||
| 102 | }; | ||
| 103 | |||
| 104 | static int da_command_address; | 79 | static int da_command_address; |
| 105 | static int da_command_code; | 80 | static int da_command_code; |
| 106 | static int da_num_tokens; | 81 | static int da_num_tokens; |
| @@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = { | |||
| 292 | }, | 267 | }, |
| 293 | .driver_data = &quirk_dell_vostro_v130, | 268 | .driver_data = &quirk_dell_vostro_v130, |
| 294 | }, | 269 | }, |
| 295 | { | ||
| 296 | .callback = dmi_matched, | ||
| 297 | .ident = "Dell XPS13 9333", | ||
| 298 | .matches = { | ||
| 299 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 300 | DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"), | ||
| 301 | }, | ||
| 302 | .driver_data = &quirk_dell_xps13_9333, | ||
| 303 | }, | ||
| 304 | { } | 270 | { } |
| 305 | }; | 271 | }; |
| 306 | 272 | ||
| @@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy) | |||
| 365 | } | 331 | } |
| 366 | } | 332 | } |
| 367 | 333 | ||
| 368 | static int find_token_id(int tokenid) | 334 | static int find_token_location(int tokenid) |
| 369 | { | 335 | { |
| 370 | int i; | 336 | int i; |
| 371 | |||
| 372 | for (i = 0; i < da_num_tokens; i++) { | 337 | for (i = 0; i < da_num_tokens; i++) { |
| 373 | if (da_tokens[i].tokenID == tokenid) | 338 | if (da_tokens[i].tokenID == tokenid) |
| 374 | return i; | 339 | return da_tokens[i].location; |
| 375 | } | 340 | } |
| 376 | 341 | ||
| 377 | return -1; | 342 | return -1; |
| 378 | } | 343 | } |
| 379 | 344 | ||
| 380 | static int find_token_location(int tokenid) | ||
| 381 | { | ||
| 382 | int id; | ||
| 383 | |||
| 384 | id = find_token_id(tokenid); | ||
| 385 | if (id == -1) | ||
| 386 | return -1; | ||
| 387 | |||
| 388 | return da_tokens[id].location; | ||
| 389 | } | ||
| 390 | |||
| 391 | static struct calling_interface_buffer * | 345 | static struct calling_interface_buffer * |
| 392 | dell_send_request(struct calling_interface_buffer *buffer, int class, | 346 | dell_send_request(struct calling_interface_buffer *buffer, int class, |
| 393 | int select) | 347 | int select) |
| @@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class, | |||
| 408 | return buffer; | 362 | return buffer; |
| 409 | } | 363 | } |
| 410 | 364 | ||
| 411 | static inline int dell_smi_error(int value) | ||
| 412 | { | ||
| 413 | switch (value) { | ||
| 414 | case 0: /* Completed successfully */ | ||
| 415 | return 0; | ||
| 416 | case -1: /* Completed with error */ | ||
| 417 | return -EIO; | ||
| 418 | case -2: /* Function not supported */ | ||
| 419 | return -ENXIO; | ||
| 420 | default: /* Unknown error */ | ||
| 421 | return -EINVAL; | ||
| 422 | } | ||
| 423 | } | ||
| 424 | |||
| 425 | /* Derived from information in DellWirelessCtl.cpp: | 365 | /* Derived from information in DellWirelessCtl.cpp: |
| 426 | Class 17, select 11 is radio control. It returns an array of 32-bit values. | 366 | Class 17, select 11 is radio control. It returns an array of 32-bit values. |
| 427 | 367 | ||
| @@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd) | |||
| 776 | else | 716 | else |
| 777 | dell_send_request(buffer, 1, 1); | 717 | dell_send_request(buffer, 1, 1); |
| 778 | 718 | ||
| 779 | out: | 719 | out: |
| 780 | release_buffer(); | 720 | release_buffer(); |
| 781 | return ret; | 721 | return ret; |
| 782 | } | 722 | } |
| @@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd) | |||
| 800 | 740 | ||
| 801 | ret = buffer->output[1]; | 741 | ret = buffer->output[1]; |
| 802 | 742 | ||
| 803 | out: | 743 | out: |
| 804 | release_buffer(); | 744 | release_buffer(); |
| 805 | return ret; | 745 | return ret; |
| 806 | } | 746 | } |
| @@ -849,984 +789,6 @@ static void touchpad_led_exit(void) | |||
| 849 | led_classdev_unregister(&touchpad_led); | 789 | led_classdev_unregister(&touchpad_led); |
| 850 | } | 790 | } |
| 851 | 791 | ||
| 852 | /* | ||
| 853 | * Derived from information in smbios-keyboard-ctl: | ||
| 854 | * | ||
| 855 | * cbClass 4 | ||
| 856 | * cbSelect 11 | ||
| 857 | * Keyboard illumination | ||
| 858 | * cbArg1 determines the function to be performed | ||
| 859 | * | ||
| 860 | * cbArg1 0x0 = Get Feature Information | ||
| 861 | * cbRES1 Standard return codes (0, -1, -2) | ||
| 862 | * cbRES2, word0 Bitmap of user-selectable modes | ||
| 863 | * bit 0 Always off (All systems) | ||
| 864 | * bit 1 Always on (Travis ATG, Siberia) | ||
| 865 | * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) | ||
| 866 | * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off | ||
| 867 | * bit 4 Auto: Input-activity-based On; input-activity based Off | ||
| 868 | * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off | ||
| 869 | * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off | ||
| 870 | * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off | ||
| 871 | * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off | ||
| 872 | * bits 9-15 Reserved for future use | ||
| 873 | * cbRES2, byte2 Reserved for future use | ||
| 874 | * cbRES2, byte3 Keyboard illumination type | ||
| 875 | * 0 Reserved | ||
| 876 | * 1 Tasklight | ||
| 877 | * 2 Backlight | ||
| 878 | * 3-255 Reserved for future use | ||
| 879 | * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap. | ||
| 880 | * bit 0 Any keystroke | ||
| 881 | * bit 1 Touchpad activity | ||
| 882 | * bit 2 Pointing stick | ||
| 883 | * bit 3 Any mouse | ||
| 884 | * bits 4-7 Reserved for future use | ||
| 885 | * cbRES3, byte1 Supported timeout unit bitmap | ||
| 886 | * bit 0 Seconds | ||
| 887 | * bit 1 Minutes | ||
| 888 | * bit 2 Hours | ||
| 889 | * bit 3 Days | ||
| 890 | * bits 4-7 Reserved for future use | ||
| 891 | * cbRES3, byte2 Number of keyboard light brightness levels | ||
| 892 | * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported). | ||
| 893 | * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported). | ||
| 894 | * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported). | ||
| 895 | * cbRES4, byte3 Maximum acceptable days value (0 if days not supported) | ||
| 896 | * | ||
| 897 | * cbArg1 0x1 = Get Current State | ||
| 898 | * cbRES1 Standard return codes (0, -1, -2) | ||
| 899 | * cbRES2, word0 Bitmap of current mode state | ||
| 900 | * bit 0 Always off (All systems) | ||
| 901 | * bit 1 Always on (Travis ATG, Siberia) | ||
| 902 | * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) | ||
| 903 | * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off | ||
| 904 | * bit 4 Auto: Input-activity-based On; input-activity based Off | ||
| 905 | * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off | ||
| 906 | * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off | ||
| 907 | * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off | ||
| 908 | * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off | ||
| 909 | * bits 9-15 Reserved for future use | ||
| 910 | * Note: Only One bit can be set | ||
| 911 | * cbRES2, byte2 Currently active auto keyboard illumination triggers. | ||
| 912 | * bit 0 Any keystroke | ||
| 913 | * bit 1 Touchpad activity | ||
| 914 | * bit 2 Pointing stick | ||
| 915 | * bit 3 Any mouse | ||
| 916 | * bits 4-7 Reserved for future use | ||
| 917 | * cbRES2, byte3 Current Timeout | ||
| 918 | * bits 7:6 Timeout units indicator: | ||
| 919 | * 00b Seconds | ||
| 920 | * 01b Minutes | ||
| 921 | * 10b Hours | ||
| 922 | * 11b Days | ||
| 923 | * bits 5:0 Timeout value (0-63) in sec/min/hr/day | ||
| 924 | * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte | ||
| 925 | * are set upon return from the [Get feature information] call. | ||
| 926 | * cbRES3, byte0 Current setting of ALS value that turns the light on or off. | ||
| 927 | * cbRES3, byte1 Current ALS reading | ||
| 928 | * cbRES3, byte2 Current keyboard light level. | ||
| 929 | * | ||
| 930 | * cbArg1 0x2 = Set New State | ||
| 931 | * cbRES1 Standard return codes (0, -1, -2) | ||
| 932 | * cbArg2, word0 Bitmap of current mode state | ||
| 933 | * bit 0 Always off (All systems) | ||
| 934 | * bit 1 Always on (Travis ATG, Siberia) | ||
| 935 | * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG) | ||
| 936 | * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off | ||
| 937 | * bit 4 Auto: Input-activity-based On; input-activity based Off | ||
| 938 | * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off | ||
| 939 | * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off | ||
| 940 | * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off | ||
| 941 | * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off | ||
| 942 | * bits 9-15 Reserved for future use | ||
| 943 | * Note: Only One bit can be set | ||
| 944 | * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow | ||
| 945 | * keyboard to turn off automatically. | ||
| 946 | * bit 0 Any keystroke | ||
| 947 | * bit 1 Touchpad activity | ||
| 948 | * bit 2 Pointing stick | ||
| 949 | * bit 3 Any mouse | ||
| 950 | * bits 4-7 Reserved for future use | ||
| 951 | * cbArg2, byte3 Desired Timeout | ||
| 952 | * bits 7:6 Timeout units indicator: | ||
| 953 | * 00b Seconds | ||
| 954 | * 01b Minutes | ||
| 955 | * 10b Hours | ||
| 956 | * 11b Days | ||
| 957 | * bits 5:0 Timeout value (0-63) in sec/min/hr/day | ||
| 958 | * cbArg3, byte0 Desired setting of ALS value that turns the light on or off. | ||
| 959 | * cbArg3, byte2 Desired keyboard light level. | ||
| 960 | */ | ||
| 961 | |||
| 962 | |||
| 963 | enum kbd_timeout_unit { | ||
| 964 | KBD_TIMEOUT_SECONDS = 0, | ||
| 965 | KBD_TIMEOUT_MINUTES, | ||
| 966 | KBD_TIMEOUT_HOURS, | ||
| 967 | KBD_TIMEOUT_DAYS, | ||
| 968 | }; | ||
| 969 | |||
| 970 | enum kbd_mode_bit { | ||
| 971 | KBD_MODE_BIT_OFF = 0, | ||
| 972 | KBD_MODE_BIT_ON, | ||
| 973 | KBD_MODE_BIT_ALS, | ||
| 974 | KBD_MODE_BIT_TRIGGER_ALS, | ||
| 975 | KBD_MODE_BIT_TRIGGER, | ||
| 976 | KBD_MODE_BIT_TRIGGER_25, | ||
| 977 | KBD_MODE_BIT_TRIGGER_50, | ||
| 978 | KBD_MODE_BIT_TRIGGER_75, | ||
| 979 | KBD_MODE_BIT_TRIGGER_100, | ||
| 980 | }; | ||
| 981 | |||
| 982 | #define kbd_is_als_mode_bit(bit) \ | ||
| 983 | ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS) | ||
| 984 | #define kbd_is_trigger_mode_bit(bit) \ | ||
| 985 | ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100) | ||
| 986 | #define kbd_is_level_mode_bit(bit) \ | ||
| 987 | ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100) | ||
| 988 | |||
| 989 | struct kbd_info { | ||
| 990 | u16 modes; | ||
| 991 | u8 type; | ||
| 992 | u8 triggers; | ||
| 993 | u8 levels; | ||
| 994 | u8 seconds; | ||
| 995 | u8 minutes; | ||
| 996 | u8 hours; | ||
| 997 | u8 days; | ||
| 998 | }; | ||
| 999 | |||
| 1000 | struct kbd_state { | ||
| 1001 | u8 mode_bit; | ||
| 1002 | u8 triggers; | ||
| 1003 | u8 timeout_value; | ||
| 1004 | u8 timeout_unit; | ||
| 1005 | u8 als_setting; | ||
| 1006 | u8 als_value; | ||
| 1007 | u8 level; | ||
| 1008 | }; | ||
| 1009 | |||
| 1010 | static const int kbd_tokens[] = { | ||
| 1011 | KBD_LED_OFF_TOKEN, | ||
| 1012 | KBD_LED_AUTO_25_TOKEN, | ||
| 1013 | KBD_LED_AUTO_50_TOKEN, | ||
| 1014 | KBD_LED_AUTO_75_TOKEN, | ||
| 1015 | KBD_LED_AUTO_100_TOKEN, | ||
| 1016 | KBD_LED_ON_TOKEN, | ||
| 1017 | }; | ||
| 1018 | |||
| 1019 | static u16 kbd_token_bits; | ||
| 1020 | |||
| 1021 | static struct kbd_info kbd_info; | ||
| 1022 | static bool kbd_als_supported; | ||
| 1023 | static bool kbd_triggers_supported; | ||
| 1024 | |||
| 1025 | static u8 kbd_mode_levels[16]; | ||
| 1026 | static int kbd_mode_levels_count; | ||
| 1027 | |||
| 1028 | static u8 kbd_previous_level; | ||
| 1029 | static u8 kbd_previous_mode_bit; | ||
| 1030 | |||
| 1031 | static bool kbd_led_present; | ||
| 1032 | |||
| 1033 | /* | ||
| 1034 | * NOTE: there are three ways to set the keyboard backlight level. | ||
| 1035 | * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value). | ||
| 1036 | * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels). | ||
| 1037 | * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens) | ||
| 1038 | * | ||
| 1039 | * There are laptops which support only one of these methods. If we want to | ||
| 1040 | * support as many machines as possible we need to implement all three methods. | ||
| 1041 | * The first two methods use the kbd_state structure. The third uses SMBIOS | ||
| 1042 | * tokens. If kbd_info.levels == 0, the machine does not support setting the | ||
| 1043 | * keyboard backlight level via kbd_state.level. | ||
| 1044 | */ | ||
| 1045 | |||
| 1046 | static int kbd_get_info(struct kbd_info *info) | ||
| 1047 | { | ||
| 1048 | u8 units; | ||
| 1049 | int ret; | ||
| 1050 | |||
| 1051 | get_buffer(); | ||
| 1052 | |||
| 1053 | buffer->input[0] = 0x0; | ||
| 1054 | dell_send_request(buffer, 4, 11); | ||
| 1055 | ret = buffer->output[0]; | ||
| 1056 | |||
| 1057 | if (ret) { | ||
| 1058 | ret = dell_smi_error(ret); | ||
| 1059 | goto out; | ||
| 1060 | } | ||
| 1061 | |||
| 1062 | info->modes = buffer->output[1] & 0xFFFF; | ||
| 1063 | info->type = (buffer->output[1] >> 24) & 0xFF; | ||
| 1064 | info->triggers = buffer->output[2] & 0xFF; | ||
| 1065 | units = (buffer->output[2] >> 8) & 0xFF; | ||
| 1066 | info->levels = (buffer->output[2] >> 16) & 0xFF; | ||
| 1067 | |||
| 1068 | if (units & BIT(0)) | ||
| 1069 | info->seconds = (buffer->output[3] >> 0) & 0xFF; | ||
| 1070 | if (units & BIT(1)) | ||
| 1071 | info->minutes = (buffer->output[3] >> 8) & 0xFF; | ||
| 1072 | if (units & BIT(2)) | ||
| 1073 | info->hours = (buffer->output[3] >> 16) & 0xFF; | ||
| 1074 | if (units & BIT(3)) | ||
| 1075 | info->days = (buffer->output[3] >> 24) & 0xFF; | ||
| 1076 | |||
| 1077 | out: | ||
| 1078 | release_buffer(); | ||
| 1079 | return ret; | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | static unsigned int kbd_get_max_level(void) | ||
| 1083 | { | ||
| 1084 | if (kbd_info.levels != 0) | ||
| 1085 | return kbd_info.levels; | ||
| 1086 | if (kbd_mode_levels_count > 0) | ||
| 1087 | return kbd_mode_levels_count - 1; | ||
| 1088 | return 0; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | static int kbd_get_level(struct kbd_state *state) | ||
| 1092 | { | ||
| 1093 | int i; | ||
| 1094 | |||
| 1095 | if (kbd_info.levels != 0) | ||
| 1096 | return state->level; | ||
| 1097 | |||
| 1098 | if (kbd_mode_levels_count > 0) { | ||
| 1099 | for (i = 0; i < kbd_mode_levels_count; ++i) | ||
| 1100 | if (kbd_mode_levels[i] == state->mode_bit) | ||
| 1101 | return i; | ||
| 1102 | return 0; | ||
| 1103 | } | ||
| 1104 | |||
| 1105 | return -EINVAL; | ||
| 1106 | } | ||
| 1107 | |||
| 1108 | static int kbd_set_level(struct kbd_state *state, u8 level) | ||
| 1109 | { | ||
| 1110 | if (kbd_info.levels != 0) { | ||
| 1111 | if (level != 0) | ||
| 1112 | kbd_previous_level = level; | ||
| 1113 | if (state->level == level) | ||
| 1114 | return 0; | ||
| 1115 | state->level = level; | ||
| 1116 | if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF) | ||
| 1117 | state->mode_bit = kbd_previous_mode_bit; | ||
| 1118 | else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) { | ||
| 1119 | kbd_previous_mode_bit = state->mode_bit; | ||
| 1120 | state->mode_bit = KBD_MODE_BIT_OFF; | ||
| 1121 | } | ||
| 1122 | return 0; | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) { | ||
| 1126 | if (level != 0) | ||
| 1127 | kbd_previous_level = level; | ||
| 1128 | state->mode_bit = kbd_mode_levels[level]; | ||
| 1129 | return 0; | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | return -EINVAL; | ||
| 1133 | } | ||
| 1134 | |||
| 1135 | static int kbd_get_state(struct kbd_state *state) | ||
| 1136 | { | ||
| 1137 | int ret; | ||
| 1138 | |||
| 1139 | get_buffer(); | ||
| 1140 | |||
| 1141 | buffer->input[0] = 0x1; | ||
| 1142 | dell_send_request(buffer, 4, 11); | ||
| 1143 | ret = buffer->output[0]; | ||
| 1144 | |||
| 1145 | if (ret) { | ||
| 1146 | ret = dell_smi_error(ret); | ||
| 1147 | goto out; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | state->mode_bit = ffs(buffer->output[1] & 0xFFFF); | ||
| 1151 | if (state->mode_bit != 0) | ||
| 1152 | state->mode_bit--; | ||
| 1153 | |||
| 1154 | state->triggers = (buffer->output[1] >> 16) & 0xFF; | ||
| 1155 | state->timeout_value = (buffer->output[1] >> 24) & 0x3F; | ||
| 1156 | state->timeout_unit = (buffer->output[1] >> 30) & 0x3; | ||
| 1157 | state->als_setting = buffer->output[2] & 0xFF; | ||
| 1158 | state->als_value = (buffer->output[2] >> 8) & 0xFF; | ||
| 1159 | state->level = (buffer->output[2] >> 16) & 0xFF; | ||
| 1160 | |||
| 1161 | out: | ||
| 1162 | release_buffer(); | ||
| 1163 | return ret; | ||
| 1164 | } | ||
| 1165 | |||
| 1166 | static int kbd_set_state(struct kbd_state *state) | ||
| 1167 | { | ||
| 1168 | int ret; | ||
| 1169 | |||
| 1170 | get_buffer(); | ||
| 1171 | buffer->input[0] = 0x2; | ||
| 1172 | buffer->input[1] = BIT(state->mode_bit) & 0xFFFF; | ||
| 1173 | buffer->input[1] |= (state->triggers & 0xFF) << 16; | ||
| 1174 | buffer->input[1] |= (state->timeout_value & 0x3F) << 24; | ||
| 1175 | buffer->input[1] |= (state->timeout_unit & 0x3) << 30; | ||
| 1176 | buffer->input[2] = state->als_setting & 0xFF; | ||
| 1177 | buffer->input[2] |= (state->level & 0xFF) << 16; | ||
| 1178 | dell_send_request(buffer, 4, 11); | ||
| 1179 | ret = buffer->output[0]; | ||
| 1180 | release_buffer(); | ||
| 1181 | |||
| 1182 | return dell_smi_error(ret); | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old) | ||
| 1186 | { | ||
| 1187 | int ret; | ||
| 1188 | |||
| 1189 | ret = kbd_set_state(state); | ||
| 1190 | if (ret == 0) | ||
| 1191 | return 0; | ||
| 1192 | |||
| 1193 | /* | ||
| 1194 | * When setting the new state fails,try to restore the previous one. | ||
| 1195 | * This is needed on some machines where BIOS sets a default state when | ||
| 1196 | * setting a new state fails. This default state could be all off. | ||
| 1197 | */ | ||
| 1198 | |||
| 1199 | if (kbd_set_state(old)) | ||
| 1200 | pr_err("Setting old previous keyboard state failed\n"); | ||
| 1201 | |||
| 1202 | return ret; | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | static int kbd_set_token_bit(u8 bit) | ||
| 1206 | { | ||
| 1207 | int id; | ||
| 1208 | int ret; | ||
| 1209 | |||
| 1210 | if (bit >= ARRAY_SIZE(kbd_tokens)) | ||
| 1211 | return -EINVAL; | ||
| 1212 | |||
| 1213 | id = find_token_id(kbd_tokens[bit]); | ||
| 1214 | if (id == -1) | ||
| 1215 | return -EINVAL; | ||
| 1216 | |||
| 1217 | get_buffer(); | ||
| 1218 | buffer->input[0] = da_tokens[id].location; | ||
| 1219 | buffer->input[1] = da_tokens[id].value; | ||
| 1220 | dell_send_request(buffer, 1, 0); | ||
| 1221 | ret = buffer->output[0]; | ||
| 1222 | release_buffer(); | ||
| 1223 | |||
| 1224 | return dell_smi_error(ret); | ||
| 1225 | } | ||
| 1226 | |||
| 1227 | static int kbd_get_token_bit(u8 bit) | ||
| 1228 | { | ||
| 1229 | int id; | ||
| 1230 | int ret; | ||
| 1231 | int val; | ||
| 1232 | |||
| 1233 | if (bit >= ARRAY_SIZE(kbd_tokens)) | ||
| 1234 | return -EINVAL; | ||
| 1235 | |||
| 1236 | id = find_token_id(kbd_tokens[bit]); | ||
| 1237 | if (id == -1) | ||
| 1238 | return -EINVAL; | ||
| 1239 | |||
| 1240 | get_buffer(); | ||
| 1241 | buffer->input[0] = da_tokens[id].location; | ||
| 1242 | dell_send_request(buffer, 0, 0); | ||
| 1243 | ret = buffer->output[0]; | ||
| 1244 | val = buffer->output[1]; | ||
| 1245 | release_buffer(); | ||
| 1246 | |||
| 1247 | if (ret) | ||
| 1248 | return dell_smi_error(ret); | ||
| 1249 | |||
| 1250 | return (val == da_tokens[id].value); | ||
| 1251 | } | ||
| 1252 | |||
| 1253 | static int kbd_get_first_active_token_bit(void) | ||
| 1254 | { | ||
| 1255 | int i; | ||
| 1256 | int ret; | ||
| 1257 | |||
| 1258 | for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) { | ||
| 1259 | ret = kbd_get_token_bit(i); | ||
| 1260 | if (ret == 1) | ||
| 1261 | return i; | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | return ret; | ||
| 1265 | } | ||
| 1266 | |||
| 1267 | static int kbd_get_valid_token_counts(void) | ||
| 1268 | { | ||
| 1269 | return hweight16(kbd_token_bits); | ||
| 1270 | } | ||
| 1271 | |||
| 1272 | static inline int kbd_init_info(void) | ||
| 1273 | { | ||
| 1274 | struct kbd_state state; | ||
| 1275 | int ret; | ||
| 1276 | int i; | ||
| 1277 | |||
| 1278 | ret = kbd_get_info(&kbd_info); | ||
| 1279 | if (ret) | ||
| 1280 | return ret; | ||
| 1281 | |||
| 1282 | kbd_get_state(&state); | ||
| 1283 | |||
| 1284 | /* NOTE: timeout value is stored in 6 bits so max value is 63 */ | ||
| 1285 | if (kbd_info.seconds > 63) | ||
| 1286 | kbd_info.seconds = 63; | ||
| 1287 | if (kbd_info.minutes > 63) | ||
| 1288 | kbd_info.minutes = 63; | ||
| 1289 | if (kbd_info.hours > 63) | ||
| 1290 | kbd_info.hours = 63; | ||
| 1291 | if (kbd_info.days > 63) | ||
| 1292 | kbd_info.days = 63; | ||
| 1293 | |||
| 1294 | /* NOTE: On tested machines ON mode did not work and caused | ||
| 1295 | * problems (turned backlight off) so do not use it | ||
| 1296 | */ | ||
| 1297 | kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON); | ||
| 1298 | |||
| 1299 | kbd_previous_level = kbd_get_level(&state); | ||
| 1300 | kbd_previous_mode_bit = state.mode_bit; | ||
| 1301 | |||
| 1302 | if (kbd_previous_level == 0 && kbd_get_max_level() != 0) | ||
| 1303 | kbd_previous_level = 1; | ||
| 1304 | |||
| 1305 | if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) { | ||
| 1306 | kbd_previous_mode_bit = | ||
| 1307 | ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF)); | ||
| 1308 | if (kbd_previous_mode_bit != 0) | ||
| 1309 | kbd_previous_mode_bit--; | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) | | ||
| 1313 | BIT(KBD_MODE_BIT_TRIGGER_ALS))) | ||
| 1314 | kbd_als_supported = true; | ||
| 1315 | |||
| 1316 | if (kbd_info.modes & ( | ||
| 1317 | BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) | | ||
| 1318 | BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) | | ||
| 1319 | BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100) | ||
| 1320 | )) | ||
| 1321 | kbd_triggers_supported = true; | ||
| 1322 | |||
| 1323 | /* kbd_mode_levels[0] is reserved, see below */ | ||
| 1324 | for (i = 0; i < 16; ++i) | ||
| 1325 | if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes)) | ||
| 1326 | kbd_mode_levels[1 + kbd_mode_levels_count++] = i; | ||
| 1327 | |||
| 1328 | /* | ||
| 1329 | * Find the first supported mode and assign to kbd_mode_levels[0]. | ||
| 1330 | * This should be 0 (off), but we cannot depend on the BIOS to | ||
| 1331 | * support 0. | ||
| 1332 | */ | ||
| 1333 | if (kbd_mode_levels_count > 0) { | ||
| 1334 | for (i = 0; i < 16; ++i) { | ||
| 1335 | if (BIT(i) & kbd_info.modes) { | ||
| 1336 | kbd_mode_levels[0] = i; | ||
| 1337 | break; | ||
| 1338 | } | ||
| 1339 | } | ||
| 1340 | kbd_mode_levels_count++; | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | return 0; | ||
| 1344 | |||
| 1345 | } | ||
| 1346 | |||
| 1347 | static inline void kbd_init_tokens(void) | ||
| 1348 | { | ||
| 1349 | int i; | ||
| 1350 | |||
| 1351 | for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) | ||
| 1352 | if (find_token_id(kbd_tokens[i]) != -1) | ||
| 1353 | kbd_token_bits |= BIT(i); | ||
| 1354 | } | ||
| 1355 | |||
| 1356 | static void kbd_init(void) | ||
| 1357 | { | ||
| 1358 | int ret; | ||
| 1359 | |||
| 1360 | ret = kbd_init_info(); | ||
| 1361 | kbd_init_tokens(); | ||
| 1362 | |||
| 1363 | if (kbd_token_bits != 0 || ret == 0) | ||
| 1364 | kbd_led_present = true; | ||
| 1365 | } | ||
| 1366 | |||
| 1367 | static ssize_t kbd_led_timeout_store(struct device *dev, | ||
| 1368 | struct device_attribute *attr, | ||
| 1369 | const char *buf, size_t count) | ||
| 1370 | { | ||
| 1371 | struct kbd_state new_state; | ||
| 1372 | struct kbd_state state; | ||
| 1373 | bool convert; | ||
| 1374 | int value; | ||
| 1375 | int ret; | ||
| 1376 | char ch; | ||
| 1377 | u8 unit; | ||
| 1378 | int i; | ||
| 1379 | |||
| 1380 | ret = sscanf(buf, "%d %c", &value, &ch); | ||
| 1381 | if (ret < 1) | ||
| 1382 | return -EINVAL; | ||
| 1383 | else if (ret == 1) | ||
| 1384 | ch = 's'; | ||
| 1385 | |||
| 1386 | if (value < 0) | ||
| 1387 | return -EINVAL; | ||
| 1388 | |||
| 1389 | convert = false; | ||
| 1390 | |||
| 1391 | switch (ch) { | ||
| 1392 | case 's': | ||
| 1393 | if (value > kbd_info.seconds) | ||
| 1394 | convert = true; | ||
| 1395 | unit = KBD_TIMEOUT_SECONDS; | ||
| 1396 | break; | ||
| 1397 | case 'm': | ||
| 1398 | if (value > kbd_info.minutes) | ||
| 1399 | convert = true; | ||
| 1400 | unit = KBD_TIMEOUT_MINUTES; | ||
| 1401 | break; | ||
| 1402 | case 'h': | ||
| 1403 | if (value > kbd_info.hours) | ||
| 1404 | convert = true; | ||
| 1405 | unit = KBD_TIMEOUT_HOURS; | ||
| 1406 | break; | ||
| 1407 | case 'd': | ||
| 1408 | if (value > kbd_info.days) | ||
| 1409 | convert = true; | ||
| 1410 | unit = KBD_TIMEOUT_DAYS; | ||
| 1411 | break; | ||
| 1412 | default: | ||
| 1413 | return -EINVAL; | ||
| 1414 | } | ||
| 1415 | |||
| 1416 | if (quirks && quirks->needs_kbd_timeouts) | ||
| 1417 | convert = true; | ||
| 1418 | |||
| 1419 | if (convert) { | ||
| 1420 | /* Convert value from current units to seconds */ | ||
| 1421 | switch (unit) { | ||
| 1422 | case KBD_TIMEOUT_DAYS: | ||
| 1423 | value *= 24; | ||
| 1424 | case KBD_TIMEOUT_HOURS: | ||
| 1425 | value *= 60; | ||
| 1426 | case KBD_TIMEOUT_MINUTES: | ||
| 1427 | value *= 60; | ||
| 1428 | unit = KBD_TIMEOUT_SECONDS; | ||
| 1429 | } | ||
| 1430 | |||
| 1431 | if (quirks && quirks->needs_kbd_timeouts) { | ||
| 1432 | for (i = 0; quirks->kbd_timeouts[i] != -1; i++) { | ||
| 1433 | if (value <= quirks->kbd_timeouts[i]) { | ||
| 1434 | value = quirks->kbd_timeouts[i]; | ||
| 1435 | break; | ||
| 1436 | } | ||
| 1437 | } | ||
| 1438 | } | ||
| 1439 | |||
| 1440 | if (value <= kbd_info.seconds && kbd_info.seconds) { | ||
| 1441 | unit = KBD_TIMEOUT_SECONDS; | ||
| 1442 | } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) { | ||
| 1443 | value /= 60; | ||
| 1444 | unit = KBD_TIMEOUT_MINUTES; | ||
| 1445 | } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) { | ||
| 1446 | value /= (60 * 60); | ||
| 1447 | unit = KBD_TIMEOUT_HOURS; | ||
| 1448 | } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) { | ||
| 1449 | value /= (60 * 60 * 24); | ||
| 1450 | unit = KBD_TIMEOUT_DAYS; | ||
| 1451 | } else { | ||
| 1452 | return -EINVAL; | ||
| 1453 | } | ||
| 1454 | } | ||
| 1455 | |||
| 1456 | ret = kbd_get_state(&state); | ||
| 1457 | if (ret) | ||
| 1458 | return ret; | ||
| 1459 | |||
| 1460 | new_state = state; | ||
| 1461 | new_state.timeout_value = value; | ||
| 1462 | new_state.timeout_unit = unit; | ||
| 1463 | |||
| 1464 | ret = kbd_set_state_safe(&new_state, &state); | ||
| 1465 | if (ret) | ||
| 1466 | return ret; | ||
| 1467 | |||
| 1468 | return count; | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | static ssize_t kbd_led_timeout_show(struct device *dev, | ||
| 1472 | struct device_attribute *attr, char *buf) | ||
| 1473 | { | ||
| 1474 | struct kbd_state state; | ||
| 1475 | int ret; | ||
| 1476 | int len; | ||
| 1477 | |||
| 1478 | ret = kbd_get_state(&state); | ||
| 1479 | if (ret) | ||
| 1480 | return ret; | ||
| 1481 | |||
| 1482 | len = sprintf(buf, "%d", state.timeout_value); | ||
| 1483 | |||
| 1484 | switch (state.timeout_unit) { | ||
| 1485 | case KBD_TIMEOUT_SECONDS: | ||
| 1486 | return len + sprintf(buf+len, "s\n"); | ||
| 1487 | case KBD_TIMEOUT_MINUTES: | ||
| 1488 | return len + sprintf(buf+len, "m\n"); | ||
| 1489 | case KBD_TIMEOUT_HOURS: | ||
| 1490 | return len + sprintf(buf+len, "h\n"); | ||
| 1491 | case KBD_TIMEOUT_DAYS: | ||
| 1492 | return len + sprintf(buf+len, "d\n"); | ||
| 1493 | default: | ||
| 1494 | return -EINVAL; | ||
| 1495 | } | ||
| 1496 | |||
| 1497 | return len; | ||
| 1498 | } | ||
| 1499 | |||
| 1500 | static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR, | ||
| 1501 | kbd_led_timeout_show, kbd_led_timeout_store); | ||
| 1502 | |||
| 1503 | static const char * const kbd_led_triggers[] = { | ||
| 1504 | "keyboard", | ||
| 1505 | "touchpad", | ||
| 1506 | /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */ | ||
| 1507 | "mouse", | ||
| 1508 | }; | ||
| 1509 | |||
| 1510 | static ssize_t kbd_led_triggers_store(struct device *dev, | ||
| 1511 | struct device_attribute *attr, | ||
| 1512 | const char *buf, size_t count) | ||
| 1513 | { | ||
| 1514 | struct kbd_state new_state; | ||
| 1515 | struct kbd_state state; | ||
| 1516 | bool triggers_enabled = false; | ||
| 1517 | bool als_enabled = false; | ||
| 1518 | bool disable_als = false; | ||
| 1519 | bool enable_als = false; | ||
| 1520 | int trigger_bit = -1; | ||
| 1521 | char trigger[21]; | ||
| 1522 | int i, ret; | ||
| 1523 | |||
| 1524 | ret = sscanf(buf, "%20s", trigger); | ||
| 1525 | if (ret != 1) | ||
| 1526 | return -EINVAL; | ||
| 1527 | |||
| 1528 | if (trigger[0] != '+' && trigger[0] != '-') | ||
| 1529 | return -EINVAL; | ||
| 1530 | |||
| 1531 | ret = kbd_get_state(&state); | ||
| 1532 | if (ret) | ||
| 1533 | return ret; | ||
| 1534 | |||
| 1535 | if (kbd_als_supported) | ||
| 1536 | als_enabled = kbd_is_als_mode_bit(state.mode_bit); | ||
| 1537 | |||
| 1538 | if (kbd_triggers_supported) | ||
| 1539 | triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit); | ||
| 1540 | |||
| 1541 | if (kbd_als_supported) { | ||
| 1542 | if (strcmp(trigger, "+als") == 0) { | ||
| 1543 | if (als_enabled) | ||
| 1544 | return count; | ||
| 1545 | enable_als = true; | ||
| 1546 | } else if (strcmp(trigger, "-als") == 0) { | ||
| 1547 | if (!als_enabled) | ||
| 1548 | return count; | ||
| 1549 | disable_als = true; | ||
| 1550 | } | ||
| 1551 | } | ||
| 1552 | |||
| 1553 | if (enable_als || disable_als) { | ||
| 1554 | new_state = state; | ||
| 1555 | if (enable_als) { | ||
| 1556 | if (triggers_enabled) | ||
| 1557 | new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS; | ||
| 1558 | else | ||
| 1559 | new_state.mode_bit = KBD_MODE_BIT_ALS; | ||
| 1560 | } else { | ||
| 1561 | if (triggers_enabled) { | ||
| 1562 | new_state.mode_bit = KBD_MODE_BIT_TRIGGER; | ||
| 1563 | kbd_set_level(&new_state, kbd_previous_level); | ||
| 1564 | } else { | ||
| 1565 | new_state.mode_bit = KBD_MODE_BIT_ON; | ||
| 1566 | } | ||
| 1567 | } | ||
| 1568 | if (!(kbd_info.modes & BIT(new_state.mode_bit))) | ||
| 1569 | return -EINVAL; | ||
| 1570 | ret = kbd_set_state_safe(&new_state, &state); | ||
| 1571 | if (ret) | ||
| 1572 | return ret; | ||
| 1573 | kbd_previous_mode_bit = new_state.mode_bit; | ||
| 1574 | return count; | ||
| 1575 | } | ||
| 1576 | |||
| 1577 | if (kbd_triggers_supported) { | ||
| 1578 | for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) { | ||
| 1579 | if (!(kbd_info.triggers & BIT(i))) | ||
| 1580 | continue; | ||
| 1581 | if (!kbd_led_triggers[i]) | ||
| 1582 | continue; | ||
| 1583 | if (strcmp(trigger+1, kbd_led_triggers[i]) != 0) | ||
| 1584 | continue; | ||
| 1585 | if (trigger[0] == '+' && | ||
| 1586 | triggers_enabled && (state.triggers & BIT(i))) | ||
| 1587 | return count; | ||
| 1588 | if (trigger[0] == '-' && | ||
| 1589 | (!triggers_enabled || !(state.triggers & BIT(i)))) | ||
| 1590 | return count; | ||
| 1591 | trigger_bit = i; | ||
| 1592 | break; | ||
| 1593 | } | ||
| 1594 | } | ||
| 1595 | |||
| 1596 | if (trigger_bit != -1) { | ||
| 1597 | new_state = state; | ||
| 1598 | if (trigger[0] == '+') | ||
| 1599 | new_state.triggers |= BIT(trigger_bit); | ||
| 1600 | else { | ||
| 1601 | new_state.triggers &= ~BIT(trigger_bit); | ||
| 1602 | /* NOTE: trackstick bit (2) must be disabled when | ||
| 1603 | * disabling touchpad bit (1), otherwise touchpad | ||
| 1604 | * bit (1) will not be disabled */ | ||
| 1605 | if (trigger_bit == 1) | ||
| 1606 | new_state.triggers &= ~BIT(2); | ||
| 1607 | } | ||
| 1608 | if ((kbd_info.triggers & new_state.triggers) != | ||
| 1609 | new_state.triggers) | ||
| 1610 | return -EINVAL; | ||
| 1611 | if (new_state.triggers && !triggers_enabled) { | ||
| 1612 | if (als_enabled) | ||
| 1613 | new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS; | ||
| 1614 | else { | ||
| 1615 | new_state.mode_bit = KBD_MODE_BIT_TRIGGER; | ||
| 1616 | kbd_set_level(&new_state, kbd_previous_level); | ||
| 1617 | } | ||
| 1618 | } else if (new_state.triggers == 0) { | ||
| 1619 | if (als_enabled) | ||
| 1620 | new_state.mode_bit = KBD_MODE_BIT_ALS; | ||
| 1621 | else | ||
| 1622 | kbd_set_level(&new_state, 0); | ||
| 1623 | } | ||
| 1624 | if (!(kbd_info.modes & BIT(new_state.mode_bit))) | ||
| 1625 | return -EINVAL; | ||
| 1626 | ret = kbd_set_state_safe(&new_state, &state); | ||
| 1627 | if (ret) | ||
| 1628 | return ret; | ||
| 1629 | if (new_state.mode_bit != KBD_MODE_BIT_OFF) | ||
| 1630 | kbd_previous_mode_bit = new_state.mode_bit; | ||
| 1631 | return count; | ||
| 1632 | } | ||
| 1633 | |||
| 1634 | return -EINVAL; | ||
| 1635 | } | ||
| 1636 | |||
| 1637 | static ssize_t kbd_led_triggers_show(struct device *dev, | ||
| 1638 | struct device_attribute *attr, char *buf) | ||
| 1639 | { | ||
| 1640 | struct kbd_state state; | ||
| 1641 | bool triggers_enabled; | ||
| 1642 | int level, i, ret; | ||
| 1643 | int len = 0; | ||
| 1644 | |||
| 1645 | ret = kbd_get_state(&state); | ||
| 1646 | if (ret) | ||
| 1647 | return ret; | ||
| 1648 | |||
| 1649 | len = 0; | ||
| 1650 | |||
| 1651 | if (kbd_triggers_supported) { | ||
| 1652 | triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit); | ||
| 1653 | level = kbd_get_level(&state); | ||
| 1654 | for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) { | ||
| 1655 | if (!(kbd_info.triggers & BIT(i))) | ||
| 1656 | continue; | ||
| 1657 | if (!kbd_led_triggers[i]) | ||
| 1658 | continue; | ||
| 1659 | if ((triggers_enabled || level <= 0) && | ||
| 1660 | (state.triggers & BIT(i))) | ||
| 1661 | buf[len++] = '+'; | ||
| 1662 | else | ||
| 1663 | buf[len++] = '-'; | ||
| 1664 | len += sprintf(buf+len, "%s ", kbd_led_triggers[i]); | ||
| 1665 | } | ||
| 1666 | } | ||
| 1667 | |||
| 1668 | if (kbd_als_supported) { | ||
| 1669 | if (kbd_is_als_mode_bit(state.mode_bit)) | ||
| 1670 | len += sprintf(buf+len, "+als "); | ||
| 1671 | else | ||
| 1672 | len += sprintf(buf+len, "-als "); | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | if (len) | ||
| 1676 | buf[len - 1] = '\n'; | ||
| 1677 | |||
| 1678 | return len; | ||
| 1679 | } | ||
| 1680 | |||
| 1681 | static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR, | ||
| 1682 | kbd_led_triggers_show, kbd_led_triggers_store); | ||
| 1683 | |||
| 1684 | static ssize_t kbd_led_als_store(struct device *dev, | ||
| 1685 | struct device_attribute *attr, | ||
| 1686 | const char *buf, size_t count) | ||
| 1687 | { | ||
| 1688 | struct kbd_state state; | ||
| 1689 | struct kbd_state new_state; | ||
| 1690 | u8 setting; | ||
| 1691 | int ret; | ||
| 1692 | |||
| 1693 | ret = kstrtou8(buf, 10, &setting); | ||
| 1694 | if (ret) | ||
| 1695 | return ret; | ||
| 1696 | |||
| 1697 | ret = kbd_get_state(&state); | ||
| 1698 | if (ret) | ||
| 1699 | return ret; | ||
| 1700 | |||
| 1701 | new_state = state; | ||
| 1702 | new_state.als_setting = setting; | ||
| 1703 | |||
| 1704 | ret = kbd_set_state_safe(&new_state, &state); | ||
| 1705 | if (ret) | ||
| 1706 | return ret; | ||
| 1707 | |||
| 1708 | return count; | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | static ssize_t kbd_led_als_show(struct device *dev, | ||
| 1712 | struct device_attribute *attr, char *buf) | ||
| 1713 | { | ||
| 1714 | struct kbd_state state; | ||
| 1715 | int ret; | ||
| 1716 | |||
| 1717 | ret = kbd_get_state(&state); | ||
| 1718 | if (ret) | ||
| 1719 | return ret; | ||
| 1720 | |||
| 1721 | return sprintf(buf, "%d\n", state.als_setting); | ||
| 1722 | } | ||
| 1723 | |||
| 1724 | static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR, | ||
| 1725 | kbd_led_als_show, kbd_led_als_store); | ||
| 1726 | |||
| 1727 | static struct attribute *kbd_led_attrs[] = { | ||
| 1728 | &dev_attr_stop_timeout.attr, | ||
| 1729 | &dev_attr_start_triggers.attr, | ||
| 1730 | &dev_attr_als_setting.attr, | ||
| 1731 | NULL, | ||
| 1732 | }; | ||
| 1733 | ATTRIBUTE_GROUPS(kbd_led); | ||
| 1734 | |||
| 1735 | static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev) | ||
| 1736 | { | ||
| 1737 | int ret; | ||
| 1738 | u16 num; | ||
| 1739 | struct kbd_state state; | ||
| 1740 | |||
| 1741 | if (kbd_get_max_level()) { | ||
| 1742 | ret = kbd_get_state(&state); | ||
| 1743 | if (ret) | ||
| 1744 | return 0; | ||
| 1745 | ret = kbd_get_level(&state); | ||
| 1746 | if (ret < 0) | ||
| 1747 | return 0; | ||
| 1748 | return ret; | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | if (kbd_get_valid_token_counts()) { | ||
| 1752 | ret = kbd_get_first_active_token_bit(); | ||
| 1753 | if (ret < 0) | ||
| 1754 | return 0; | ||
| 1755 | for (num = kbd_token_bits; num != 0 && ret > 0; --ret) | ||
| 1756 | num &= num - 1; /* clear the first bit set */ | ||
| 1757 | if (num == 0) | ||
| 1758 | return 0; | ||
| 1759 | return ffs(num) - 1; | ||
| 1760 | } | ||
| 1761 | |||
| 1762 | pr_warn("Keyboard brightness level control not supported\n"); | ||
| 1763 | return 0; | ||
| 1764 | } | ||
| 1765 | |||
| 1766 | static void kbd_led_level_set(struct led_classdev *led_cdev, | ||
| 1767 | enum led_brightness value) | ||
| 1768 | { | ||
| 1769 | struct kbd_state state; | ||
| 1770 | struct kbd_state new_state; | ||
| 1771 | u16 num; | ||
| 1772 | |||
| 1773 | if (kbd_get_max_level()) { | ||
| 1774 | if (kbd_get_state(&state)) | ||
| 1775 | return; | ||
| 1776 | new_state = state; | ||
| 1777 | if (kbd_set_level(&new_state, value)) | ||
| 1778 | return; | ||
| 1779 | kbd_set_state_safe(&new_state, &state); | ||
| 1780 | return; | ||
| 1781 | } | ||
| 1782 | |||
| 1783 | if (kbd_get_valid_token_counts()) { | ||
| 1784 | for (num = kbd_token_bits; num != 0 && value > 0; --value) | ||
| 1785 | num &= num - 1; /* clear the first bit set */ | ||
| 1786 | if (num == 0) | ||
| 1787 | return; | ||
| 1788 | kbd_set_token_bit(ffs(num) - 1); | ||
| 1789 | return; | ||
| 1790 | } | ||
| 1791 | |||
| 1792 | pr_warn("Keyboard brightness level control not supported\n"); | ||
| 1793 | } | ||
| 1794 | |||
| 1795 | static struct led_classdev kbd_led = { | ||
| 1796 | .name = "dell::kbd_backlight", | ||
| 1797 | .brightness_set = kbd_led_level_set, | ||
| 1798 | .brightness_get = kbd_led_level_get, | ||
| 1799 | .groups = kbd_led_groups, | ||
| 1800 | }; | ||
| 1801 | |||
| 1802 | static int __init kbd_led_init(struct device *dev) | ||
| 1803 | { | ||
| 1804 | kbd_init(); | ||
| 1805 | if (!kbd_led_present) | ||
| 1806 | return -ENODEV; | ||
| 1807 | kbd_led.max_brightness = kbd_get_max_level(); | ||
| 1808 | if (!kbd_led.max_brightness) { | ||
| 1809 | kbd_led.max_brightness = kbd_get_valid_token_counts(); | ||
| 1810 | if (kbd_led.max_brightness) | ||
| 1811 | kbd_led.max_brightness--; | ||
| 1812 | } | ||
| 1813 | return led_classdev_register(dev, &kbd_led); | ||
| 1814 | } | ||
| 1815 | |||
| 1816 | static void brightness_set_exit(struct led_classdev *led_cdev, | ||
| 1817 | enum led_brightness value) | ||
| 1818 | { | ||
| 1819 | /* Don't change backlight level on exit */ | ||
| 1820 | }; | ||
| 1821 | |||
| 1822 | static void kbd_led_exit(void) | ||
| 1823 | { | ||
| 1824 | if (!kbd_led_present) | ||
| 1825 | return; | ||
| 1826 | kbd_led.brightness_set = brightness_set_exit; | ||
| 1827 | led_classdev_unregister(&kbd_led); | ||
| 1828 | } | ||
| 1829 | |||
| 1830 | static int __init dell_init(void) | 792 | static int __init dell_init(void) |
| 1831 | { | 793 | { |
| 1832 | int max_intensity = 0; | 794 | int max_intensity = 0; |
| @@ -1879,8 +841,6 @@ static int __init dell_init(void) | |||
| 1879 | if (quirks && quirks->touchpad_led) | 841 | if (quirks && quirks->touchpad_led) |
| 1880 | touchpad_led_init(&platform_device->dev); | 842 | touchpad_led_init(&platform_device->dev); |
| 1881 | 843 | ||
| 1882 | kbd_led_init(&platform_device->dev); | ||
| 1883 | |||
| 1884 | dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); | 844 | dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); |
| 1885 | if (dell_laptop_dir != NULL) | 845 | if (dell_laptop_dir != NULL) |
| 1886 | debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, | 846 | debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL, |
| @@ -1948,7 +908,6 @@ static void __exit dell_exit(void) | |||
| 1948 | debugfs_remove_recursive(dell_laptop_dir); | 908 | debugfs_remove_recursive(dell_laptop_dir); |
| 1949 | if (quirks && quirks->touchpad_led) | 909 | if (quirks && quirks->touchpad_led) |
| 1950 | touchpad_led_exit(); | 910 | touchpad_led_exit(); |
| 1951 | kbd_led_exit(); | ||
| 1952 | i8042_remove_filter(dell_laptop_i8042_filter); | 911 | i8042_remove_filter(dell_laptop_i8042_filter); |
| 1953 | cancel_delayed_work_sync(&dell_rfkill_work); | 912 | cancel_delayed_work_sync(&dell_rfkill_work); |
| 1954 | backlight_device_unregister(dell_backlight_device); | 913 | backlight_device_unregister(dell_backlight_device); |
| @@ -1965,7 +924,5 @@ module_init(dell_init); | |||
| 1965 | module_exit(dell_exit); | 924 | module_exit(dell_exit); |
| 1966 | 925 | ||
| 1967 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); | 926 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); |
| 1968 | MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); | ||
| 1969 | MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); | ||
| 1970 | MODULE_DESCRIPTION("Dell laptop driver"); | 927 | MODULE_DESCRIPTION("Dell laptop driver"); |
| 1971 | MODULE_LICENSE("GPL"); | 928 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index e225711bb8bc..9c48fb32f660 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
| @@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id) | |||
| 1488 | } | 1488 | } |
| 1489 | EXPORT_SYMBOL_GPL(regulator_get_optional); | 1489 | EXPORT_SYMBOL_GPL(regulator_get_optional); |
| 1490 | 1490 | ||
| 1491 | /* Locks held by regulator_put() */ | 1491 | /* regulator_list_mutex lock held by regulator_put() */ |
| 1492 | static void _regulator_put(struct regulator *regulator) | 1492 | static void _regulator_put(struct regulator *regulator) |
| 1493 | { | 1493 | { |
| 1494 | struct regulator_dev *rdev; | 1494 | struct regulator_dev *rdev; |
| @@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator) | |||
| 1503 | /* remove any sysfs entries */ | 1503 | /* remove any sysfs entries */ |
| 1504 | if (regulator->dev) | 1504 | if (regulator->dev) |
| 1505 | sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); | 1505 | sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); |
| 1506 | mutex_lock(&rdev->mutex); | ||
| 1506 | kfree(regulator->supply_name); | 1507 | kfree(regulator->supply_name); |
| 1507 | list_del(®ulator->list); | 1508 | list_del(®ulator->list); |
| 1508 | kfree(regulator); | 1509 | kfree(regulator); |
| 1509 | 1510 | ||
| 1510 | rdev->open_count--; | 1511 | rdev->open_count--; |
| 1511 | rdev->exclusive = 0; | 1512 | rdev->exclusive = 0; |
| 1513 | mutex_unlock(&rdev->mutex); | ||
| 1512 | 1514 | ||
| 1513 | module_put(rdev->owner); | 1515 | module_put(rdev->owner); |
| 1514 | } | 1516 | } |
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 2809ae0d6bcd..ff828117798f 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c | |||
| @@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops; | |||
| 405 | .enable_mask = S2MPS14_ENABLE_MASK \ | 405 | .enable_mask = S2MPS14_ENABLE_MASK \ |
| 406 | } | 406 | } |
| 407 | 407 | ||
| 408 | #define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \ | ||
| 409 | .name = "BUCK"#num, \ | ||
| 410 | .id = S2MPS13_BUCK##num, \ | ||
| 411 | .ops = &s2mps14_reg_ops, \ | ||
| 412 | .type = REGULATOR_VOLTAGE, \ | ||
| 413 | .owner = THIS_MODULE, \ | ||
| 414 | .min_uV = min, \ | ||
| 415 | .uV_step = step, \ | ||
| 416 | .linear_min_sel = min_sel, \ | ||
| 417 | .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ | ||
| 418 | .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \ | ||
| 419 | .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \ | ||
| 420 | .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ | ||
| 421 | .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \ | ||
| 422 | .enable_mask = S2MPS14_ENABLE_MASK \ | ||
| 423 | } | ||
| 424 | |||
| 425 | #define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \ | ||
| 426 | .name = "BUCK"#num, \ | ||
| 427 | .id = S2MPS13_BUCK##num, \ | ||
| 428 | .ops = &s2mps14_reg_ops, \ | ||
| 429 | .type = REGULATOR_VOLTAGE, \ | ||
| 430 | .owner = THIS_MODULE, \ | ||
| 431 | .min_uV = min, \ | ||
| 432 | .uV_step = step, \ | ||
| 433 | .linear_min_sel = min_sel, \ | ||
| 434 | .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ | ||
| 435 | .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \ | ||
| 436 | .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \ | ||
| 437 | .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ | ||
| 438 | .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \ | ||
| 439 | .enable_mask = S2MPS14_ENABLE_MASK \ | ||
| 440 | } | ||
| 441 | |||
| 408 | static const struct regulator_desc s2mps13_regulators[] = { | 442 | static const struct regulator_desc s2mps13_regulators[] = { |
| 409 | regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00), | 443 | regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00), |
| 410 | regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C), | 444 | regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C), |
| @@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = { | |||
| 452 | regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10), | 486 | regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10), |
| 453 | regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10), | 487 | regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10), |
| 454 | regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10), | 488 | regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10), |
| 455 | regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10), | 489 | regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10), |
| 456 | regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20), | 490 | regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20), |
| 457 | regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20), | 491 | regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20), |
| 458 | regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10), | 492 | regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10), |
| 459 | }; | 493 | }; |
| 460 | 494 | ||
| 461 | static int s2mps14_regulator_enable(struct regulator_dev *rdev) | 495 | static int s2mps14_regulator_enable(struct regulator_dev *rdev) |
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c index b5e7c4670205..89ac1d5083c6 100644 --- a/drivers/rtc/rtc-s5m.c +++ b/drivers/rtc/rtc-s5m.c | |||
| @@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume); | |||
| 832 | static const struct platform_device_id s5m_rtc_id[] = { | 832 | static const struct platform_device_id s5m_rtc_id[] = { |
| 833 | { "s5m-rtc", S5M8767X }, | 833 | { "s5m-rtc", S5M8767X }, |
| 834 | { "s2mps14-rtc", S2MPS14X }, | 834 | { "s2mps14-rtc", S2MPS14X }, |
| 835 | { }, | ||
| 835 | }; | 836 | }; |
| 836 | 837 | ||
| 837 | static struct platform_driver s5m_rtc_driver = { | 838 | static struct platform_driver s5m_rtc_driver = { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f407e3763432..642c77c76b84 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
| @@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
| 1784 | QETH_DBF_TEXT(SETUP, 2, "idxanswr"); | 1784 | QETH_DBF_TEXT(SETUP, 2, "idxanswr"); |
| 1785 | card = CARD_FROM_CDEV(channel->ccwdev); | 1785 | card = CARD_FROM_CDEV(channel->ccwdev); |
| 1786 | iob = qeth_get_buffer(channel); | 1786 | iob = qeth_get_buffer(channel); |
| 1787 | if (!iob) | ||
| 1788 | return -ENOMEM; | ||
| 1787 | iob->callback = idx_reply_cb; | 1789 | iob->callback = idx_reply_cb; |
| 1788 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); | 1790 | memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); |
| 1789 | channel->ccw.count = QETH_BUFSIZE; | 1791 | channel->ccw.count = QETH_BUFSIZE; |
| @@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
| 1834 | QETH_DBF_TEXT(SETUP, 2, "idxactch"); | 1836 | QETH_DBF_TEXT(SETUP, 2, "idxactch"); |
| 1835 | 1837 | ||
| 1836 | iob = qeth_get_buffer(channel); | 1838 | iob = qeth_get_buffer(channel); |
| 1839 | if (!iob) | ||
| 1840 | return -ENOMEM; | ||
| 1837 | iob->callback = idx_reply_cb; | 1841 | iob->callback = idx_reply_cb; |
| 1838 | memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); | 1842 | memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); |
| 1839 | channel->ccw.count = IDX_ACTIVATE_SIZE; | 1843 | channel->ccw.count = IDX_ACTIVATE_SIZE; |
| @@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, | |||
| 2021 | } | 2025 | } |
| 2022 | EXPORT_SYMBOL_GPL(qeth_prepare_control_data); | 2026 | EXPORT_SYMBOL_GPL(qeth_prepare_control_data); |
| 2023 | 2027 | ||
| 2028 | /** | ||
| 2029 | * qeth_send_control_data() - send control command to the card | ||
| 2030 | * @card: qeth_card structure pointer | ||
| 2031 | * @len: size of the command buffer | ||
| 2032 | * @iob: qeth_cmd_buffer pointer | ||
| 2033 | * @reply_cb: callback function pointer | ||
| 2034 | * @cb_card: pointer to the qeth_card structure | ||
| 2035 | * @cb_reply: pointer to the qeth_reply structure | ||
| 2036 | * @cb_cmd: pointer to the original iob for non-IPA | ||
| 2037 | * commands, or to the qeth_ipa_cmd structure | ||
| 2038 | * for the IPA commands. | ||
| 2039 | * @reply_param: private pointer passed to the callback | ||
| 2040 | * | ||
| 2041 | * Returns the value of the `return_code' field of the response | ||
| 2042 | * block returned from the hardware, or other error indication. | ||
| 2043 | * Value of zero indicates successful execution of the command. | ||
| 2044 | * | ||
| 2045 | * Callback function gets called one or more times, with cb_cmd | ||
| 2046 | * pointing to the response returned by the hardware. Callback | ||
| 2047 | * function must return non-zero if more reply blocks are expected, | ||
| 2048 | * and zero if the last or only reply block is received. Callback | ||
| 2049 | * function can get the value of the reply_param pointer from the | ||
| 2050 | * field 'param' of the structure qeth_reply. | ||
| 2051 | */ | ||
| 2052 | |||
| 2024 | int qeth_send_control_data(struct qeth_card *card, int len, | 2053 | int qeth_send_control_data(struct qeth_card *card, int len, |
| 2025 | struct qeth_cmd_buffer *iob, | 2054 | struct qeth_cmd_buffer *iob, |
| 2026 | int (*reply_cb)(struct qeth_card *, struct qeth_reply *, | 2055 | int (*reply_cb)(struct qeth_card *cb_card, |
| 2027 | unsigned long), | 2056 | struct qeth_reply *cb_reply, |
| 2057 | unsigned long cb_cmd), | ||
| 2028 | void *reply_param) | 2058 | void *reply_param) |
| 2029 | { | 2059 | { |
| 2030 | int rc; | 2060 | int rc; |
| @@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, | |||
| 2914 | struct qeth_cmd_buffer *iob; | 2944 | struct qeth_cmd_buffer *iob; |
| 2915 | struct qeth_ipa_cmd *cmd; | 2945 | struct qeth_ipa_cmd *cmd; |
| 2916 | 2946 | ||
| 2917 | iob = qeth_wait_for_buffer(&card->write); | 2947 | iob = qeth_get_buffer(&card->write); |
| 2918 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 2948 | if (iob) { |
| 2919 | qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); | 2949 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 2950 | qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); | ||
| 2951 | } else { | ||
| 2952 | dev_warn(&card->gdev->dev, | ||
| 2953 | "The qeth driver ran out of channel command buffers\n"); | ||
| 2954 | QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", | ||
| 2955 | dev_name(&card->gdev->dev)); | ||
| 2956 | } | ||
| 2920 | 2957 | ||
| 2921 | return iob; | 2958 | return iob; |
| 2922 | } | 2959 | } |
| @@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | |||
| 2932 | } | 2969 | } |
| 2933 | EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); | 2970 | EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); |
| 2934 | 2971 | ||
| 2972 | /** | ||
| 2973 | * qeth_send_ipa_cmd() - send an IPA command | ||
| 2974 | * | ||
| 2975 | * See qeth_send_control_data() for explanation of the arguments. | ||
| 2976 | */ | ||
| 2977 | |||
| 2935 | int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, | 2978 | int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, |
| 2936 | int (*reply_cb)(struct qeth_card *, struct qeth_reply*, | 2979 | int (*reply_cb)(struct qeth_card *, struct qeth_reply*, |
| 2937 | unsigned long), | 2980 | unsigned long), |
| @@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card) | |||
| 2968 | QETH_DBF_TEXT(SETUP, 2, "strtlan"); | 3011 | QETH_DBF_TEXT(SETUP, 2, "strtlan"); |
| 2969 | 3012 | ||
| 2970 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); | 3013 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); |
| 3014 | if (!iob) | ||
| 3015 | return -ENOMEM; | ||
| 2971 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); | 3016 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); |
| 2972 | return rc; | 3017 | return rc; |
| 2973 | } | 3018 | } |
| @@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, | |||
| 3013 | 3058 | ||
| 3014 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, | 3059 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, |
| 3015 | QETH_PROT_IPV4); | 3060 | QETH_PROT_IPV4); |
| 3016 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3061 | if (iob) { |
| 3017 | cmd->data.setadapterparms.hdr.cmdlength = cmdlen; | 3062 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 3018 | cmd->data.setadapterparms.hdr.command_code = command; | 3063 | cmd->data.setadapterparms.hdr.cmdlength = cmdlen; |
| 3019 | cmd->data.setadapterparms.hdr.used_total = 1; | 3064 | cmd->data.setadapterparms.hdr.command_code = command; |
| 3020 | cmd->data.setadapterparms.hdr.seq_no = 1; | 3065 | cmd->data.setadapterparms.hdr.used_total = 1; |
| 3066 | cmd->data.setadapterparms.hdr.seq_no = 1; | ||
| 3067 | } | ||
| 3021 | 3068 | ||
| 3022 | return iob; | 3069 | return iob; |
| 3023 | } | 3070 | } |
| @@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card) | |||
| 3030 | QETH_CARD_TEXT(card, 3, "queryadp"); | 3077 | QETH_CARD_TEXT(card, 3, "queryadp"); |
| 3031 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, | 3078 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, |
| 3032 | sizeof(struct qeth_ipacmd_setadpparms)); | 3079 | sizeof(struct qeth_ipacmd_setadpparms)); |
| 3080 | if (!iob) | ||
| 3081 | return -ENOMEM; | ||
| 3033 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); | 3082 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); |
| 3034 | return rc; | 3083 | return rc; |
| 3035 | } | 3084 | } |
| @@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) | |||
| 3080 | 3129 | ||
| 3081 | QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); | 3130 | QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); |
| 3082 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); | 3131 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); |
| 3132 | if (!iob) | ||
| 3133 | return -ENOMEM; | ||
| 3083 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); | 3134 | rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); |
| 3084 | return rc; | 3135 | return rc; |
| 3085 | } | 3136 | } |
| @@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card, | |||
| 3119 | return -ENOMEDIUM; | 3170 | return -ENOMEDIUM; |
| 3120 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, | 3171 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, |
| 3121 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); | 3172 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); |
| 3173 | if (!iob) | ||
| 3174 | return -ENOMEM; | ||
| 3122 | return qeth_send_ipa_cmd(card, iob, | 3175 | return qeth_send_ipa_cmd(card, iob, |
| 3123 | qeth_query_switch_attributes_cb, sw_info); | 3176 | qeth_query_switch_attributes_cb, sw_info); |
| 3124 | } | 3177 | } |
| @@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card) | |||
| 3146 | 3199 | ||
| 3147 | QETH_DBF_TEXT(SETUP, 2, "qdiagass"); | 3200 | QETH_DBF_TEXT(SETUP, 2, "qdiagass"); |
| 3148 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 3201 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
| 3202 | if (!iob) | ||
| 3203 | return -ENOMEM; | ||
| 3149 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3204 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 3150 | cmd->data.diagass.subcmd_len = 16; | 3205 | cmd->data.diagass.subcmd_len = 16; |
| 3151 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; | 3206 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; |
| @@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) | |||
| 3197 | 3252 | ||
| 3198 | QETH_DBF_TEXT(SETUP, 2, "diagtrap"); | 3253 | QETH_DBF_TEXT(SETUP, 2, "diagtrap"); |
| 3199 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 3254 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
| 3255 | if (!iob) | ||
| 3256 | return -ENOMEM; | ||
| 3200 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 3257 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 3201 | cmd->data.diagass.subcmd_len = 80; | 3258 | cmd->data.diagass.subcmd_len = 80; |
| 3202 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; | 3259 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; |
| @@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) | |||
| 4162 | 4219 | ||
| 4163 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, | 4220 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, |
| 4164 | sizeof(struct qeth_ipacmd_setadpparms)); | 4221 | sizeof(struct qeth_ipacmd_setadpparms)); |
| 4222 | if (!iob) | ||
| 4223 | return; | ||
| 4165 | cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); | 4224 | cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); |
| 4166 | cmd->data.setadapterparms.data.mode = mode; | 4225 | cmd->data.setadapterparms.data.mode = mode; |
| 4167 | qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); | 4226 | qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); |
| @@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card) | |||
| 4232 | 4291 | ||
| 4233 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, | 4292 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, |
| 4234 | sizeof(struct qeth_ipacmd_setadpparms)); | 4293 | sizeof(struct qeth_ipacmd_setadpparms)); |
| 4294 | if (!iob) | ||
| 4295 | return -ENOMEM; | ||
| 4235 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4296 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 4236 | cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; | 4297 | cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; |
| 4237 | cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; | 4298 | cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; |
| @@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, | |||
| 4345 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, | 4406 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, |
| 4346 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + | 4407 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + |
| 4347 | sizeof(struct qeth_set_access_ctrl)); | 4408 | sizeof(struct qeth_set_access_ctrl)); |
| 4409 | if (!iob) | ||
| 4410 | return -ENOMEM; | ||
| 4348 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4411 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 4349 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; | 4412 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; |
| 4350 | access_ctrl_req->subcmd_code = isolation; | 4413 | access_ctrl_req->subcmd_code = isolation; |
| @@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
| 4588 | 4651 | ||
| 4589 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, | 4652 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, |
| 4590 | QETH_SNMP_SETADP_CMDLENGTH + req_len); | 4653 | QETH_SNMP_SETADP_CMDLENGTH + req_len); |
| 4654 | if (!iob) { | ||
| 4655 | rc = -ENOMEM; | ||
| 4656 | goto out; | ||
| 4657 | } | ||
| 4591 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4658 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 4592 | memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); | 4659 | memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); |
| 4593 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, | 4660 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, |
| @@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
| 4599 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) | 4666 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) |
| 4600 | rc = -EFAULT; | 4667 | rc = -EFAULT; |
| 4601 | } | 4668 | } |
| 4602 | 4669 | out: | |
| 4603 | kfree(ureq); | 4670 | kfree(ureq); |
| 4604 | kfree(qinfo.udata); | 4671 | kfree(qinfo.udata); |
| 4605 | return rc; | 4672 | return rc; |
| @@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata) | |||
| 4670 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, | 4737 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, |
| 4671 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + | 4738 | sizeof(struct qeth_ipacmd_setadpparms_hdr) + |
| 4672 | sizeof(struct qeth_query_oat)); | 4739 | sizeof(struct qeth_query_oat)); |
| 4740 | if (!iob) { | ||
| 4741 | rc = -ENOMEM; | ||
| 4742 | goto out_free; | ||
| 4743 | } | ||
| 4673 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 4744 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 4674 | oat_req = &cmd->data.setadapterparms.data.query_oat; | 4745 | oat_req = &cmd->data.setadapterparms.data.query_oat; |
| 4675 | oat_req->subcmd_code = oat_data.command; | 4746 | oat_req->subcmd_code = oat_data.command; |
| @@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card, | |||
| 4735 | return -EOPNOTSUPP; | 4806 | return -EOPNOTSUPP; |
| 4736 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, | 4807 | iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, |
| 4737 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); | 4808 | sizeof(struct qeth_ipacmd_setadpparms_hdr)); |
| 4809 | if (!iob) | ||
| 4810 | return -ENOMEM; | ||
| 4738 | return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, | 4811 | return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, |
| 4739 | (void *)carrier_info); | 4812 | (void *)carrier_info); |
| 4740 | } | 4813 | } |
| @@ -5060,11 +5133,23 @@ retriable: | |||
| 5060 | card->options.adp.supported_funcs = 0; | 5133 | card->options.adp.supported_funcs = 0; |
| 5061 | card->options.sbp.supported_funcs = 0; | 5134 | card->options.sbp.supported_funcs = 0; |
| 5062 | card->info.diagass_support = 0; | 5135 | card->info.diagass_support = 0; |
| 5063 | qeth_query_ipassists(card, QETH_PROT_IPV4); | 5136 | rc = qeth_query_ipassists(card, QETH_PROT_IPV4); |
| 5064 | if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) | 5137 | if (rc == -ENOMEM) |
| 5065 | qeth_query_setadapterparms(card); | 5138 | goto out; |
| 5066 | if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) | 5139 | if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { |
| 5067 | qeth_query_setdiagass(card); | 5140 | rc = qeth_query_setadapterparms(card); |
| 5141 | if (rc < 0) { | ||
| 5142 | QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); | ||
| 5143 | goto out; | ||
| 5144 | } | ||
| 5145 | } | ||
| 5146 | if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { | ||
| 5147 | rc = qeth_query_setdiagass(card); | ||
| 5148 | if (rc < 0) { | ||
| 5149 | QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); | ||
| 5150 | goto out; | ||
| 5151 | } | ||
| 5152 | } | ||
| 5068 | return 0; | 5153 | return 0; |
| 5069 | out: | 5154 | out: |
| 5070 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " | 5155 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d02cd1a67943..ce87ae72edbd 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
| @@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); | |||
| 27 | static int qeth_l2_stop(struct net_device *); | 27 | static int qeth_l2_stop(struct net_device *); |
| 28 | static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); | 28 | static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); |
| 29 | static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, | 29 | static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *, |
| 30 | enum qeth_ipa_cmds, | 30 | enum qeth_ipa_cmds); |
| 31 | int (*reply_cb) (struct qeth_card *, | ||
| 32 | struct qeth_reply*, | ||
| 33 | unsigned long)); | ||
| 34 | static void qeth_l2_set_multicast_list(struct net_device *); | 31 | static void qeth_l2_set_multicast_list(struct net_device *); |
| 35 | static int qeth_l2_recover(void *); | 32 | static int qeth_l2_recover(void *); |
| 36 | static void qeth_bridgeport_query_support(struct qeth_card *card); | 33 | static void qeth_bridgeport_query_support(struct qeth_card *card); |
| @@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) | |||
| 130 | return ndev; | 127 | return ndev; |
| 131 | } | 128 | } |
| 132 | 129 | ||
| 133 | static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | 130 | static int qeth_setdel_makerc(struct qeth_card *card, int retcode) |
| 134 | struct qeth_reply *reply, | ||
| 135 | unsigned long data) | ||
| 136 | { | 131 | { |
| 137 | struct qeth_ipa_cmd *cmd; | 132 | int rc; |
| 138 | __u8 *mac; | ||
| 139 | 133 | ||
| 140 | QETH_CARD_TEXT(card, 2, "L2Sgmacb"); | 134 | if (retcode) |
| 141 | cmd = (struct qeth_ipa_cmd *) data; | 135 | QETH_CARD_TEXT_(card, 2, "err%04x", retcode); |
| 142 | mac = &cmd->data.setdelmac.mac[0]; | 136 | switch (retcode) { |
| 143 | /* MAC already registered, needed in couple/uncouple case */ | 137 | case IPA_RC_SUCCESS: |
| 144 | if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) { | 138 | rc = 0; |
| 145 | QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n", | 139 | break; |
| 146 | mac, QETH_CARD_IFNAME(card)); | 140 | case IPA_RC_L2_UNSUPPORTED_CMD: |
| 147 | cmd->hdr.return_code = 0; | 141 | rc = -ENOSYS; |
| 142 | break; | ||
| 143 | case IPA_RC_L2_ADDR_TABLE_FULL: | ||
| 144 | rc = -ENOSPC; | ||
| 145 | break; | ||
| 146 | case IPA_RC_L2_DUP_MAC: | ||
| 147 | case IPA_RC_L2_DUP_LAYER3_MAC: | ||
| 148 | rc = -EEXIST; | ||
| 149 | break; | ||
| 150 | case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: | ||
| 151 | case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: | ||
| 152 | rc = -EPERM; | ||
| 153 | break; | ||
| 154 | case IPA_RC_L2_MAC_NOT_FOUND: | ||
| 155 | rc = -ENOENT; | ||
| 156 | break; | ||
| 157 | case -ENOMEM: | ||
| 158 | rc = -ENOMEM; | ||
| 159 | break; | ||
| 160 | default: | ||
| 161 | rc = -EIO; | ||
| 162 | break; | ||
| 148 | } | 163 | } |
| 149 | if (cmd->hdr.return_code) | 164 | return rc; |
| 150 | QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n", | ||
| 151 | mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); | ||
| 152 | return 0; | ||
| 153 | } | 165 | } |
| 154 | 166 | ||
| 155 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) | 167 | static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac) |
| 156 | { | 168 | { |
| 157 | QETH_CARD_TEXT(card, 2, "L2Sgmac"); | 169 | int rc; |
| 158 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, | ||
| 159 | qeth_l2_send_setgroupmac_cb); | ||
| 160 | } | ||
| 161 | |||
| 162 | static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | ||
| 163 | struct qeth_reply *reply, | ||
| 164 | unsigned long data) | ||
| 165 | { | ||
| 166 | struct qeth_ipa_cmd *cmd; | ||
| 167 | __u8 *mac; | ||
| 168 | 170 | ||
| 169 | QETH_CARD_TEXT(card, 2, "L2Dgmacb"); | 171 | QETH_CARD_TEXT(card, 2, "L2Sgmac"); |
| 170 | cmd = (struct qeth_ipa_cmd *) data; | 172 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
| 171 | mac = &cmd->data.setdelmac.mac[0]; | 173 | IPA_CMD_SETGMAC)); |
| 172 | if (cmd->hdr.return_code) | 174 | if (rc == -EEXIST) |
| 173 | QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n", | 175 | QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n", |
| 174 | mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 176 | mac, QETH_CARD_IFNAME(card)); |
| 175 | return 0; | 177 | else if (rc) |
| 178 | QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n", | ||
| 179 | mac, QETH_CARD_IFNAME(card), rc); | ||
| 180 | return rc; | ||
| 176 | } | 181 | } |
| 177 | 182 | ||
| 178 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) | 183 | static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac) |
| 179 | { | 184 | { |
| 185 | int rc; | ||
| 186 | |||
| 180 | QETH_CARD_TEXT(card, 2, "L2Dgmac"); | 187 | QETH_CARD_TEXT(card, 2, "L2Dgmac"); |
| 181 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, | 188 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
| 182 | qeth_l2_send_delgroupmac_cb); | 189 | IPA_CMD_DELGMAC)); |
| 190 | if (rc) | ||
| 191 | QETH_DBF_MESSAGE(2, | ||
| 192 | "Could not delete group MAC %pM on %s: %d\n", | ||
| 193 | mac, QETH_CARD_IFNAME(card), rc); | ||
| 194 | return rc; | ||
| 183 | } | 195 | } |
| 184 | 196 | ||
| 185 | static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) | 197 | static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) |
| @@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) | |||
| 197 | mc->is_vmac = vmac; | 209 | mc->is_vmac = vmac; |
| 198 | 210 | ||
| 199 | if (vmac) { | 211 | if (vmac) { |
| 200 | rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | 212 | rc = qeth_setdel_makerc(card, |
| 201 | NULL); | 213 | qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC)); |
| 202 | } else { | 214 | } else { |
| 203 | rc = qeth_l2_send_setgroupmac(card, mac); | 215 | rc = qeth_setdel_makerc(card, |
| 216 | qeth_l2_send_setgroupmac(card, mac)); | ||
| 204 | } | 217 | } |
| 205 | 218 | ||
| 206 | if (!rc) | 219 | if (!rc) |
| @@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del) | |||
| 218 | if (del) { | 231 | if (del) { |
| 219 | if (mc->is_vmac) | 232 | if (mc->is_vmac) |
| 220 | qeth_l2_send_setdelmac(card, mc->mc_addr, | 233 | qeth_l2_send_setdelmac(card, mc->mc_addr, |
| 221 | IPA_CMD_DELVMAC, NULL); | 234 | IPA_CMD_DELVMAC); |
| 222 | else | 235 | else |
| 223 | qeth_l2_send_delgroupmac(card, mc->mc_addr); | 236 | qeth_l2_send_delgroupmac(card, mc->mc_addr); |
| 224 | } | 237 | } |
| @@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, | |||
| 291 | 304 | ||
| 292 | QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); | 305 | QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); |
| 293 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 306 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
| 307 | if (!iob) | ||
| 308 | return -ENOMEM; | ||
| 294 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 309 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 295 | cmd->data.setdelvlan.vlan_id = i; | 310 | cmd->data.setdelvlan.vlan_id = i; |
| 296 | return qeth_send_ipa_cmd(card, iob, | 311 | return qeth_send_ipa_cmd(card, iob, |
| @@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, | |||
| 313 | { | 328 | { |
| 314 | struct qeth_card *card = dev->ml_priv; | 329 | struct qeth_card *card = dev->ml_priv; |
| 315 | struct qeth_vlan_vid *id; | 330 | struct qeth_vlan_vid *id; |
| 331 | int rc; | ||
| 316 | 332 | ||
| 317 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); | 333 | QETH_CARD_TEXT_(card, 4, "aid:%d", vid); |
| 318 | if (!vid) | 334 | if (!vid) |
| @@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, | |||
| 328 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); | 344 | id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); |
| 329 | if (id) { | 345 | if (id) { |
| 330 | id->vid = vid; | 346 | id->vid = vid; |
| 331 | qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); | 347 | rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); |
| 348 | if (rc) { | ||
| 349 | kfree(id); | ||
| 350 | return rc; | ||
| 351 | } | ||
| 332 | spin_lock_bh(&card->vlanlock); | 352 | spin_lock_bh(&card->vlanlock); |
| 333 | list_add_tail(&id->list, &card->vid_list); | 353 | list_add_tail(&id->list, &card->vid_list); |
| 334 | spin_unlock_bh(&card->vlanlock); | 354 | spin_unlock_bh(&card->vlanlock); |
| @@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, | |||
| 343 | { | 363 | { |
| 344 | struct qeth_vlan_vid *id, *tmpid = NULL; | 364 | struct qeth_vlan_vid *id, *tmpid = NULL; |
| 345 | struct qeth_card *card = dev->ml_priv; | 365 | struct qeth_card *card = dev->ml_priv; |
| 366 | int rc = 0; | ||
| 346 | 367 | ||
| 347 | QETH_CARD_TEXT_(card, 4, "kid:%d", vid); | 368 | QETH_CARD_TEXT_(card, 4, "kid:%d", vid); |
| 348 | if (card->info.type == QETH_CARD_TYPE_OSM) { | 369 | if (card->info.type == QETH_CARD_TYPE_OSM) { |
| @@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, | |||
| 363 | } | 384 | } |
| 364 | spin_unlock_bh(&card->vlanlock); | 385 | spin_unlock_bh(&card->vlanlock); |
| 365 | if (tmpid) { | 386 | if (tmpid) { |
| 366 | qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); | 387 | rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); |
| 367 | kfree(tmpid); | 388 | kfree(tmpid); |
| 368 | } | 389 | } |
| 369 | qeth_l2_set_multicast_list(card->dev); | 390 | qeth_l2_set_multicast_list(card->dev); |
| 370 | return 0; | 391 | return rc; |
| 371 | } | 392 | } |
| 372 | 393 | ||
| 373 | static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | 394 | static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) |
| @@ -539,91 +560,62 @@ out: | |||
| 539 | } | 560 | } |
| 540 | 561 | ||
| 541 | static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, | 562 | static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, |
| 542 | enum qeth_ipa_cmds ipacmd, | 563 | enum qeth_ipa_cmds ipacmd) |
| 543 | int (*reply_cb) (struct qeth_card *, | ||
| 544 | struct qeth_reply*, | ||
| 545 | unsigned long)) | ||
| 546 | { | 564 | { |
| 547 | struct qeth_ipa_cmd *cmd; | 565 | struct qeth_ipa_cmd *cmd; |
| 548 | struct qeth_cmd_buffer *iob; | 566 | struct qeth_cmd_buffer *iob; |
| 549 | 567 | ||
| 550 | QETH_CARD_TEXT(card, 2, "L2sdmac"); | 568 | QETH_CARD_TEXT(card, 2, "L2sdmac"); |
| 551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); | 569 | iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); |
| 570 | if (!iob) | ||
| 571 | return -ENOMEM; | ||
| 552 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 572 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 553 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; | 573 | cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; |
| 554 | memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); | 574 | memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); |
| 555 | return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); | 575 | return qeth_send_ipa_cmd(card, iob, NULL, NULL); |
| 556 | } | 576 | } |
| 557 | 577 | ||
| 558 | static int qeth_l2_send_setmac_cb(struct qeth_card *card, | 578 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) |
| 559 | struct qeth_reply *reply, | ||
| 560 | unsigned long data) | ||
| 561 | { | 579 | { |
| 562 | struct qeth_ipa_cmd *cmd; | 580 | int rc; |
| 563 | 581 | ||
| 564 | QETH_CARD_TEXT(card, 2, "L2Smaccb"); | 582 | QETH_CARD_TEXT(card, 2, "L2Setmac"); |
| 565 | cmd = (struct qeth_ipa_cmd *) data; | 583 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
| 566 | if (cmd->hdr.return_code) { | 584 | IPA_CMD_SETVMAC)); |
| 567 | QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code); | 585 | if (rc == 0) { |
| 586 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | ||
| 587 | memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN); | ||
| 588 | dev_info(&card->gdev->dev, | ||
| 589 | "MAC address %pM successfully registered on device %s\n", | ||
| 590 | card->dev->dev_addr, card->dev->name); | ||
| 591 | } else { | ||
| 568 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | 592 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; |
| 569 | switch (cmd->hdr.return_code) { | 593 | switch (rc) { |
| 570 | case IPA_RC_L2_DUP_MAC: | 594 | case -EEXIST: |
| 571 | case IPA_RC_L2_DUP_LAYER3_MAC: | ||
| 572 | dev_warn(&card->gdev->dev, | 595 | dev_warn(&card->gdev->dev, |
| 573 | "MAC address %pM already exists\n", | 596 | "MAC address %pM already exists\n", mac); |
| 574 | cmd->data.setdelmac.mac); | ||
| 575 | break; | 597 | break; |
| 576 | case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: | 598 | case -EPERM: |
| 577 | case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: | ||
| 578 | dev_warn(&card->gdev->dev, | 599 | dev_warn(&card->gdev->dev, |
| 579 | "MAC address %pM is not authorized\n", | 600 | "MAC address %pM is not authorized\n", mac); |
| 580 | cmd->data.setdelmac.mac); | ||
| 581 | break; | ||
| 582 | default: | ||
| 583 | break; | 601 | break; |
| 584 | } | 602 | } |
| 585 | } else { | ||
| 586 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | ||
| 587 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, | ||
| 588 | OSA_ADDR_LEN); | ||
| 589 | dev_info(&card->gdev->dev, | ||
| 590 | "MAC address %pM successfully registered on device %s\n", | ||
| 591 | card->dev->dev_addr, card->dev->name); | ||
| 592 | } | ||
| 593 | return 0; | ||
| 594 | } | ||
| 595 | |||
| 596 | static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) | ||
| 597 | { | ||
| 598 | QETH_CARD_TEXT(card, 2, "L2Setmac"); | ||
| 599 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, | ||
| 600 | qeth_l2_send_setmac_cb); | ||
| 601 | } | ||
| 602 | |||
| 603 | static int qeth_l2_send_delmac_cb(struct qeth_card *card, | ||
| 604 | struct qeth_reply *reply, | ||
| 605 | unsigned long data) | ||
| 606 | { | ||
| 607 | struct qeth_ipa_cmd *cmd; | ||
| 608 | |||
| 609 | QETH_CARD_TEXT(card, 2, "L2Dmaccb"); | ||
| 610 | cmd = (struct qeth_ipa_cmd *) data; | ||
| 611 | if (cmd->hdr.return_code) { | ||
| 612 | QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); | ||
| 613 | return 0; | ||
| 614 | } | 603 | } |
| 615 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | 604 | return rc; |
| 616 | |||
| 617 | return 0; | ||
| 618 | } | 605 | } |
| 619 | 606 | ||
| 620 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) | 607 | static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) |
| 621 | { | 608 | { |
| 609 | int rc; | ||
| 610 | |||
| 622 | QETH_CARD_TEXT(card, 2, "L2Delmac"); | 611 | QETH_CARD_TEXT(card, 2, "L2Delmac"); |
| 623 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) | 612 | if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) |
| 624 | return 0; | 613 | return 0; |
| 625 | return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, | 614 | rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac, |
| 626 | qeth_l2_send_delmac_cb); | 615 | IPA_CMD_DELVMAC)); |
| 616 | if (rc == 0) | ||
| 617 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; | ||
| 618 | return rc; | ||
| 627 | } | 619 | } |
| 628 | 620 | ||
| 629 | static int qeth_l2_request_initial_mac(struct qeth_card *card) | 621 | static int qeth_l2_request_initial_mac(struct qeth_card *card) |
| @@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) | |||
| 651 | if (rc) { | 643 | if (rc) { |
| 652 | QETH_DBF_MESSAGE(2, "couldn't get MAC address on " | 644 | QETH_DBF_MESSAGE(2, "couldn't get MAC address on " |
| 653 | "device %s: x%x\n", CARD_BUS_ID(card), rc); | 645 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
| 654 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 646 | QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); |
| 655 | return rc; | 647 | return rc; |
| 656 | } | 648 | } |
| 657 | QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); | 649 | QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); |
| @@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
| 687 | return -ERESTARTSYS; | 679 | return -ERESTARTSYS; |
| 688 | } | 680 | } |
| 689 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); | 681 | rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); |
| 690 | if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) | 682 | if (!rc || (rc == -ENOENT)) |
| 691 | rc = qeth_l2_send_setmac(card, addr->sa_data); | 683 | rc = qeth_l2_send_setmac(card, addr->sa_data); |
| 692 | return rc ? -EINVAL : 0; | 684 | return rc ? -EINVAL : 0; |
| 693 | } | 685 | } |
| @@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
| 996 | recover_flag = card->state; | 988 | recover_flag = card->state; |
| 997 | rc = qeth_core_hardsetup_card(card); | 989 | rc = qeth_core_hardsetup_card(card); |
| 998 | if (rc) { | 990 | if (rc) { |
| 999 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 991 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
| 1000 | rc = -ENODEV; | 992 | rc = -ENODEV; |
| 1001 | goto out_remove; | 993 | goto out_remove; |
| 1002 | } | 994 | } |
| @@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card) | |||
| 1730 | 1722 | ||
| 1731 | QETH_CARD_TEXT(card, 2, "brqsuppo"); | 1723 | QETH_CARD_TEXT(card, 2, "brqsuppo"); |
| 1732 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1724 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
| 1725 | if (!iob) | ||
| 1726 | return; | ||
| 1733 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1727 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 1734 | cmd->data.sbp.hdr.cmdlength = | 1728 | cmd->data.sbp.hdr.cmdlength = |
| 1735 | sizeof(struct qeth_ipacmd_sbp_hdr) + | 1729 | sizeof(struct qeth_ipacmd_sbp_hdr) + |
| @@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, | |||
| 1805 | if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) | 1799 | if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) |
| 1806 | return -EOPNOTSUPP; | 1800 | return -EOPNOTSUPP; |
| 1807 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1801 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
| 1802 | if (!iob) | ||
| 1803 | return -ENOMEM; | ||
| 1808 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1804 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 1809 | cmd->data.sbp.hdr.cmdlength = | 1805 | cmd->data.sbp.hdr.cmdlength = |
| 1810 | sizeof(struct qeth_ipacmd_sbp_hdr); | 1806 | sizeof(struct qeth_ipacmd_sbp_hdr); |
| @@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, | |||
| 1817 | if (rc) | 1813 | if (rc) |
| 1818 | return rc; | 1814 | return rc; |
| 1819 | rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); | 1815 | rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); |
| 1820 | if (rc) | 1816 | return rc; |
| 1821 | return rc; | ||
| 1822 | return 0; | ||
| 1823 | } | 1817 | } |
| 1824 | EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); | 1818 | EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports); |
| 1825 | 1819 | ||
| @@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) | |||
| 1873 | if (!(card->options.sbp.supported_funcs & setcmd)) | 1867 | if (!(card->options.sbp.supported_funcs & setcmd)) |
| 1874 | return -EOPNOTSUPP; | 1868 | return -EOPNOTSUPP; |
| 1875 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); | 1869 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0); |
| 1870 | if (!iob) | ||
| 1871 | return -ENOMEM; | ||
| 1876 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1872 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 1877 | cmd->data.sbp.hdr.cmdlength = cmdlength; | 1873 | cmd->data.sbp.hdr.cmdlength = cmdlength; |
| 1878 | cmd->data.sbp.hdr.command_code = setcmd; | 1874 | cmd->data.sbp.hdr.command_code = setcmd; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 625227ad16ee..e2a0ee845399 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
| @@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, | |||
| 549 | QETH_CARD_TEXT(card, 4, "setdelmc"); | 549 | QETH_CARD_TEXT(card, 4, "setdelmc"); |
| 550 | 550 | ||
| 551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 551 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
| 552 | if (!iob) | ||
| 553 | return -ENOMEM; | ||
| 552 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 554 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 553 | memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); | 555 | memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN); |
| 554 | if (addr->proto == QETH_PROT_IPV6) | 556 | if (addr->proto == QETH_PROT_IPV6) |
| @@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, | |||
| 588 | QETH_CARD_TEXT_(card, 4, "flags%02X", flags); | 590 | QETH_CARD_TEXT_(card, 4, "flags%02X", flags); |
| 589 | 591 | ||
| 590 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); | 592 | iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); |
| 593 | if (!iob) | ||
| 594 | return -ENOMEM; | ||
| 591 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 595 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 592 | if (addr->proto == QETH_PROT_IPV6) { | 596 | if (addr->proto == QETH_PROT_IPV6) { |
| 593 | memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, | 597 | memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, |
| @@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
| 616 | 620 | ||
| 617 | QETH_CARD_TEXT(card, 4, "setroutg"); | 621 | QETH_CARD_TEXT(card, 4, "setroutg"); |
| 618 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); | 622 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); |
| 623 | if (!iob) | ||
| 624 | return -ENOMEM; | ||
| 619 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 625 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 620 | cmd->data.setrtg.type = (type); | 626 | cmd->data.setrtg.type = (type); |
| 621 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); | 627 | rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); |
| @@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd( | |||
| 1049 | QETH_CARD_TEXT(card, 4, "getasscm"); | 1055 | QETH_CARD_TEXT(card, 4, "getasscm"); |
| 1050 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); | 1056 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); |
| 1051 | 1057 | ||
| 1052 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1058 | if (iob) { |
| 1053 | cmd->data.setassparms.hdr.assist_no = ipa_func; | 1059 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 1054 | cmd->data.setassparms.hdr.length = 8 + len; | 1060 | cmd->data.setassparms.hdr.assist_no = ipa_func; |
| 1055 | cmd->data.setassparms.hdr.command_code = cmd_code; | 1061 | cmd->data.setassparms.hdr.length = 8 + len; |
| 1056 | cmd->data.setassparms.hdr.return_code = 0; | 1062 | cmd->data.setassparms.hdr.command_code = cmd_code; |
| 1057 | cmd->data.setassparms.hdr.seq_no = 0; | 1063 | cmd->data.setassparms.hdr.return_code = 0; |
| 1064 | cmd->data.setassparms.hdr.seq_no = 0; | ||
| 1065 | } | ||
| 1058 | 1066 | ||
| 1059 | return iob; | 1067 | return iob; |
| 1060 | } | 1068 | } |
| @@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, | |||
| 1090 | QETH_CARD_TEXT(card, 4, "simassp6"); | 1098 | QETH_CARD_TEXT(card, 4, "simassp6"); |
| 1091 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1099 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
| 1092 | 0, QETH_PROT_IPV6); | 1100 | 0, QETH_PROT_IPV6); |
| 1101 | if (!iob) | ||
| 1102 | return -ENOMEM; | ||
| 1093 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, | 1103 | rc = qeth_l3_send_setassparms(card, iob, 0, 0, |
| 1094 | qeth_l3_default_setassparms_cb, NULL); | 1104 | qeth_l3_default_setassparms_cb, NULL); |
| 1095 | return rc; | 1105 | return rc; |
| @@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card, | |||
| 1108 | length = sizeof(__u32); | 1118 | length = sizeof(__u32); |
| 1109 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, | 1119 | iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code, |
| 1110 | length, QETH_PROT_IPV4); | 1120 | length, QETH_PROT_IPV4); |
| 1121 | if (!iob) | ||
| 1122 | return -ENOMEM; | ||
| 1111 | rc = qeth_l3_send_setassparms(card, iob, length, data, | 1123 | rc = qeth_l3_send_setassparms(card, iob, length, data, |
| 1112 | qeth_l3_default_setassparms_cb, NULL); | 1124 | qeth_l3_default_setassparms_cb, NULL); |
| 1113 | return rc; | 1125 | return rc; |
| @@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) | |||
| 1494 | 1506 | ||
| 1495 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, | 1507 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, |
| 1496 | QETH_PROT_IPV6); | 1508 | QETH_PROT_IPV6); |
| 1509 | if (!iob) | ||
| 1510 | return -ENOMEM; | ||
| 1497 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1511 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 1498 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = | 1512 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = |
| 1499 | card->info.unique_id; | 1513 | card->info.unique_id; |
| @@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card) | |||
| 1537 | 1551 | ||
| 1538 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, | 1552 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, |
| 1539 | QETH_PROT_IPV6); | 1553 | QETH_PROT_IPV6); |
| 1554 | if (!iob) | ||
| 1555 | return -ENOMEM; | ||
| 1540 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1556 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 1541 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = | 1557 | *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = |
| 1542 | card->info.unique_id; | 1558 | card->info.unique_id; |
| @@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) | |||
| 1611 | QETH_DBF_TEXT(SETUP, 2, "diagtrac"); | 1627 | QETH_DBF_TEXT(SETUP, 2, "diagtrac"); |
| 1612 | 1628 | ||
| 1613 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); | 1629 | iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); |
| 1630 | if (!iob) | ||
| 1631 | return -ENOMEM; | ||
| 1614 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 1632 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 1615 | cmd->data.diagass.subcmd_len = 16; | 1633 | cmd->data.diagass.subcmd_len = 16; |
| 1616 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; | 1634 | cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; |
| @@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, | |||
| 2442 | IPA_CMD_ASS_ARP_QUERY_INFO, | 2460 | IPA_CMD_ASS_ARP_QUERY_INFO, |
| 2443 | sizeof(struct qeth_arp_query_data) - sizeof(char), | 2461 | sizeof(struct qeth_arp_query_data) - sizeof(char), |
| 2444 | prot); | 2462 | prot); |
| 2463 | if (!iob) | ||
| 2464 | return -ENOMEM; | ||
| 2445 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); | 2465 | cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); |
| 2446 | cmd->data.setassparms.data.query_arp.request_bits = 0x000F; | 2466 | cmd->data.setassparms.data.query_arp.request_bits = 0x000F; |
| 2447 | cmd->data.setassparms.data.query_arp.reply_bits = 0; | 2467 | cmd->data.setassparms.data.query_arp.reply_bits = 0; |
| @@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
| 2535 | IPA_CMD_ASS_ARP_ADD_ENTRY, | 2555 | IPA_CMD_ASS_ARP_ADD_ENTRY, |
| 2536 | sizeof(struct qeth_arp_cache_entry), | 2556 | sizeof(struct qeth_arp_cache_entry), |
| 2537 | QETH_PROT_IPV4); | 2557 | QETH_PROT_IPV4); |
| 2558 | if (!iob) | ||
| 2559 | return -ENOMEM; | ||
| 2538 | rc = qeth_l3_send_setassparms(card, iob, | 2560 | rc = qeth_l3_send_setassparms(card, iob, |
| 2539 | sizeof(struct qeth_arp_cache_entry), | 2561 | sizeof(struct qeth_arp_cache_entry), |
| 2540 | (unsigned long) entry, | 2562 | (unsigned long) entry, |
| @@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
| 2574 | IPA_CMD_ASS_ARP_REMOVE_ENTRY, | 2596 | IPA_CMD_ASS_ARP_REMOVE_ENTRY, |
| 2575 | 12, | 2597 | 12, |
| 2576 | QETH_PROT_IPV4); | 2598 | QETH_PROT_IPV4); |
| 2599 | if (!iob) | ||
| 2600 | return -ENOMEM; | ||
| 2577 | rc = qeth_l3_send_setassparms(card, iob, | 2601 | rc = qeth_l3_send_setassparms(card, iob, |
| 2578 | 12, (unsigned long)buf, | 2602 | 12, (unsigned long)buf, |
| 2579 | qeth_l3_default_setassparms_cb, NULL); | 2603 | qeth_l3_default_setassparms_cb, NULL); |
| @@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { | |||
| 3262 | 3286 | ||
| 3263 | static int qeth_l3_setup_netdev(struct qeth_card *card) | 3287 | static int qeth_l3_setup_netdev(struct qeth_card *card) |
| 3264 | { | 3288 | { |
| 3289 | int rc; | ||
| 3290 | |||
| 3265 | if (card->info.type == QETH_CARD_TYPE_OSD || | 3291 | if (card->info.type == QETH_CARD_TYPE_OSD || |
| 3266 | card->info.type == QETH_CARD_TYPE_OSX) { | 3292 | card->info.type == QETH_CARD_TYPE_OSX) { |
| 3267 | if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || | 3293 | if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || |
| @@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) | |||
| 3293 | return -ENODEV; | 3319 | return -ENODEV; |
| 3294 | card->dev->flags |= IFF_NOARP; | 3320 | card->dev->flags |= IFF_NOARP; |
| 3295 | card->dev->netdev_ops = &qeth_l3_netdev_ops; | 3321 | card->dev->netdev_ops = &qeth_l3_netdev_ops; |
| 3296 | qeth_l3_iqd_read_initial_mac(card); | 3322 | rc = qeth_l3_iqd_read_initial_mac(card); |
| 3323 | if (rc) | ||
| 3324 | return rc; | ||
| 3297 | if (card->options.hsuid[0]) | 3325 | if (card->options.hsuid[0]) |
| 3298 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); | 3326 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); |
| 3299 | } else | 3327 | } else |
| @@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
| 3360 | recover_flag = card->state; | 3388 | recover_flag = card->state; |
| 3361 | rc = qeth_core_hardsetup_card(card); | 3389 | rc = qeth_core_hardsetup_card(card); |
| 3362 | if (rc) { | 3390 | if (rc) { |
| 3363 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 3391 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
| 3364 | rc = -ENODEV; | 3392 | rc = -ENODEV; |
| 3365 | goto out_remove; | 3393 | goto out_remove; |
| 3366 | } | 3394 | } |
| @@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
| 3401 | contin: | 3429 | contin: |
| 3402 | rc = qeth_l3_setadapter_parms(card); | 3430 | rc = qeth_l3_setadapter_parms(card); |
| 3403 | if (rc) | 3431 | if (rc) |
| 3404 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 3432 | QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); |
| 3405 | if (!card->options.sniffer) { | 3433 | if (!card->options.sniffer) { |
| 3406 | rc = qeth_l3_start_ipassists(card); | 3434 | rc = qeth_l3_start_ipassists(card); |
| 3407 | if (rc) { | 3435 | if (rc) { |
| @@ -3410,10 +3438,10 @@ contin: | |||
| 3410 | } | 3438 | } |
| 3411 | rc = qeth_l3_setrouting_v4(card); | 3439 | rc = qeth_l3_setrouting_v4(card); |
| 3412 | if (rc) | 3440 | if (rc) |
| 3413 | QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); | 3441 | QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc); |
| 3414 | rc = qeth_l3_setrouting_v6(card); | 3442 | rc = qeth_l3_setrouting_v6(card); |
| 3415 | if (rc) | 3443 | if (rc) |
| 3416 | QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); | 3444 | QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc); |
| 3417 | } | 3445 | } |
| 3418 | netif_tx_disable(card->dev); | 3446 | netif_tx_disable(card->dev); |
| 3419 | 3447 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index df4e27cd996a..9219953ee949 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, | |||
| 683 | ipr_reinit_ipr_cmnd(ipr_cmd); | 683 | ipr_reinit_ipr_cmnd(ipr_cmd); |
| 684 | ipr_cmd->u.scratch = 0; | 684 | ipr_cmd->u.scratch = 0; |
| 685 | ipr_cmd->sibling = NULL; | 685 | ipr_cmd->sibling = NULL; |
| 686 | ipr_cmd->eh_comp = NULL; | ||
| 686 | ipr_cmd->fast_done = fast_done; | 687 | ipr_cmd->fast_done = fast_done; |
| 687 | init_timer(&ipr_cmd->timer); | 688 | init_timer(&ipr_cmd->timer); |
| 688 | } | 689 | } |
| @@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) | |||
| 848 | 849 | ||
| 849 | scsi_dma_unmap(ipr_cmd->scsi_cmd); | 850 | scsi_dma_unmap(ipr_cmd->scsi_cmd); |
| 850 | scsi_cmd->scsi_done(scsi_cmd); | 851 | scsi_cmd->scsi_done(scsi_cmd); |
| 852 | if (ipr_cmd->eh_comp) | ||
| 853 | complete(ipr_cmd->eh_comp); | ||
| 851 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); | 854 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); |
| 852 | } | 855 | } |
| 853 | 856 | ||
| @@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev) | |||
| 4811 | return rc; | 4814 | return rc; |
| 4812 | } | 4815 | } |
| 4813 | 4816 | ||
| 4817 | /** | ||
| 4818 | * ipr_match_lun - Match function for specified LUN | ||
| 4819 | * @ipr_cmd: ipr command struct | ||
| 4820 | * @device: device to match (sdev) | ||
| 4821 | * | ||
| 4822 | * Returns: | ||
| 4823 | * 1 if command matches sdev / 0 if command does not match sdev | ||
| 4824 | **/ | ||
| 4825 | static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) | ||
| 4826 | { | ||
| 4827 | if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) | ||
| 4828 | return 1; | ||
| 4829 | return 0; | ||
| 4830 | } | ||
| 4831 | |||
| 4832 | /** | ||
| 4833 | * ipr_wait_for_ops - Wait for matching commands to complete | ||
| 4834 | * @ipr_cmd: ipr command struct | ||
| 4835 | * @device: device to match (sdev) | ||
| 4836 | * @match: match function to use | ||
| 4837 | * | ||
| 4838 | * Returns: | ||
| 4839 | * SUCCESS / FAILED | ||
| 4840 | **/ | ||
| 4841 | static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, | ||
| 4842 | int (*match)(struct ipr_cmnd *, void *)) | ||
| 4843 | { | ||
| 4844 | struct ipr_cmnd *ipr_cmd; | ||
| 4845 | int wait; | ||
| 4846 | unsigned long flags; | ||
| 4847 | struct ipr_hrr_queue *hrrq; | ||
| 4848 | signed long timeout = IPR_ABORT_TASK_TIMEOUT; | ||
| 4849 | DECLARE_COMPLETION_ONSTACK(comp); | ||
| 4850 | |||
| 4851 | ENTER; | ||
| 4852 | do { | ||
| 4853 | wait = 0; | ||
| 4854 | |||
| 4855 | for_each_hrrq(hrrq, ioa_cfg) { | ||
| 4856 | spin_lock_irqsave(hrrq->lock, flags); | ||
| 4857 | list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { | ||
| 4858 | if (match(ipr_cmd, device)) { | ||
| 4859 | ipr_cmd->eh_comp = ∁ | ||
| 4860 | wait++; | ||
| 4861 | } | ||
| 4862 | } | ||
| 4863 | spin_unlock_irqrestore(hrrq->lock, flags); | ||
| 4864 | } | ||
| 4865 | |||
| 4866 | if (wait) { | ||
| 4867 | timeout = wait_for_completion_timeout(&comp, timeout); | ||
| 4868 | |||
| 4869 | if (!timeout) { | ||
| 4870 | wait = 0; | ||
| 4871 | |||
| 4872 | for_each_hrrq(hrrq, ioa_cfg) { | ||
| 4873 | spin_lock_irqsave(hrrq->lock, flags); | ||
| 4874 | list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { | ||
| 4875 | if (match(ipr_cmd, device)) { | ||
| 4876 | ipr_cmd->eh_comp = NULL; | ||
| 4877 | wait++; | ||
| 4878 | } | ||
| 4879 | } | ||
| 4880 | spin_unlock_irqrestore(hrrq->lock, flags); | ||
| 4881 | } | ||
| 4882 | |||
| 4883 | if (wait) | ||
| 4884 | dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); | ||
| 4885 | LEAVE; | ||
| 4886 | return wait ? FAILED : SUCCESS; | ||
| 4887 | } | ||
| 4888 | } | ||
| 4889 | } while (wait); | ||
| 4890 | |||
| 4891 | LEAVE; | ||
| 4892 | return SUCCESS; | ||
| 4893 | } | ||
| 4894 | |||
| 4814 | static int ipr_eh_host_reset(struct scsi_cmnd *cmd) | 4895 | static int ipr_eh_host_reset(struct scsi_cmnd *cmd) |
| 4815 | { | 4896 | { |
| 4816 | struct ipr_ioa_cfg *ioa_cfg; | 4897 | struct ipr_ioa_cfg *ioa_cfg; |
| @@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) | |||
| 5030 | static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) | 5111 | static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) |
| 5031 | { | 5112 | { |
| 5032 | int rc; | 5113 | int rc; |
| 5114 | struct ipr_ioa_cfg *ioa_cfg; | ||
| 5115 | |||
| 5116 | ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; | ||
| 5033 | 5117 | ||
| 5034 | spin_lock_irq(cmd->device->host->host_lock); | 5118 | spin_lock_irq(cmd->device->host->host_lock); |
| 5035 | rc = __ipr_eh_dev_reset(cmd); | 5119 | rc = __ipr_eh_dev_reset(cmd); |
| 5036 | spin_unlock_irq(cmd->device->host->host_lock); | 5120 | spin_unlock_irq(cmd->device->host->host_lock); |
| 5037 | 5121 | ||
| 5122 | if (rc == SUCCESS) | ||
| 5123 | rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); | ||
| 5124 | |||
| 5038 | return rc; | 5125 | return rc; |
| 5039 | } | 5126 | } |
| 5040 | 5127 | ||
| @@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) | |||
| 5234 | { | 5321 | { |
| 5235 | unsigned long flags; | 5322 | unsigned long flags; |
| 5236 | int rc; | 5323 | int rc; |
| 5324 | struct ipr_ioa_cfg *ioa_cfg; | ||
| 5237 | 5325 | ||
| 5238 | ENTER; | 5326 | ENTER; |
| 5239 | 5327 | ||
| 5328 | ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; | ||
| 5329 | |||
| 5240 | spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); | 5330 | spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); |
| 5241 | rc = ipr_cancel_op(scsi_cmd); | 5331 | rc = ipr_cancel_op(scsi_cmd); |
| 5242 | spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); | 5332 | spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); |
| 5243 | 5333 | ||
| 5334 | if (rc == SUCCESS) | ||
| 5335 | rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); | ||
| 5244 | LEAVE; | 5336 | LEAVE; |
| 5245 | return rc; | 5337 | return rc; |
| 5246 | } | 5338 | } |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index b4f3eec51bc9..ec03b42fa2b9 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
| @@ -1606,6 +1606,7 @@ struct ipr_cmnd { | |||
| 1606 | struct scsi_device *sdev; | 1606 | struct scsi_device *sdev; |
| 1607 | } u; | 1607 | } u; |
| 1608 | 1608 | ||
| 1609 | struct completion *eh_comp; | ||
| 1609 | struct ipr_hrr_queue *hrrq; | 1610 | struct ipr_hrr_queue *hrrq; |
| 1610 | struct ipr_ioa_cfg *ioa_cfg; | 1611 | struct ipr_ioa_cfg *ioa_cfg; |
| 1611 | }; | 1612 | }; |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index e02885451425..9b3829931f40 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev) | |||
| 986 | return -ENXIO; | 986 | return -ENXIO; |
| 987 | if (!get_device(&sdev->sdev_gendev)) | 987 | if (!get_device(&sdev->sdev_gendev)) |
| 988 | return -ENXIO; | 988 | return -ENXIO; |
| 989 | /* We can fail this if we're doing SCSI operations | 989 | /* We can fail try_module_get if we're doing SCSI operations |
| 990 | * from module exit (like cache flush) */ | 990 | * from module exit (like cache flush) */ |
| 991 | try_module_get(sdev->host->hostt->module); | 991 | __module_get(sdev->host->hostt->module); |
| 992 | 992 | ||
| 993 | return 0; | 993 | return 0; |
| 994 | } | 994 | } |
| @@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get); | |||
| 1004 | */ | 1004 | */ |
| 1005 | void scsi_device_put(struct scsi_device *sdev) | 1005 | void scsi_device_put(struct scsi_device *sdev) |
| 1006 | { | 1006 | { |
| 1007 | #ifdef CONFIG_MODULE_UNLOAD | 1007 | module_put(sdev->host->hostt->module); |
| 1008 | struct module *module = sdev->host->hostt->module; | ||
| 1009 | |||
| 1010 | /* The module refcount will be zero if scsi_device_get() | ||
| 1011 | * was called from a module removal routine */ | ||
| 1012 | if (module && module_refcount(module) != 0) | ||
| 1013 | module_put(module); | ||
| 1014 | #endif | ||
| 1015 | put_device(&sdev->sdev_gendev); | 1008 | put_device(&sdev->sdev_gendev); |
| 1016 | } | 1009 | } |
| 1017 | EXPORT_SYMBOL(scsi_device_put); | 1010 | EXPORT_SYMBOL(scsi_device_put); |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b8b51bc29b4..4aca1b0378c2 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
| @@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
| 1623 | req_opcode = cmd[3]; | 1623 | req_opcode = cmd[3]; |
| 1624 | req_sa = get_unaligned_be16(cmd + 4); | 1624 | req_sa = get_unaligned_be16(cmd + 4); |
| 1625 | alloc_len = get_unaligned_be32(cmd + 6); | 1625 | alloc_len = get_unaligned_be32(cmd + 6); |
| 1626 | if (alloc_len < 4 && alloc_len > 0xffff) { | 1626 | if (alloc_len < 4 || alloc_len > 0xffff) { |
| 1627 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); | 1627 | mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1); |
| 1628 | return check_condition_result; | 1628 | return check_condition_result; |
| 1629 | } | 1629 | } |
| @@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) | |||
| 1631 | a_len = 8192; | 1631 | a_len = 8192; |
| 1632 | else | 1632 | else |
| 1633 | a_len = alloc_len; | 1633 | a_len = alloc_len; |
| 1634 | arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL); | 1634 | arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC); |
| 1635 | if (NULL == arr) { | 1635 | if (NULL == arr) { |
| 1636 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, | 1636 | mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, |
| 1637 | INSUFF_RES_ASCQ); | 1637 | INSUFF_RES_ASCQ); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d5c0b8cb0bb..17bb541f7cc2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1143,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd) | |||
| 1143 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; | 1143 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; |
| 1144 | int ivecs, count; | 1144 | int ivecs, count; |
| 1145 | 1145 | ||
| 1146 | BUG_ON(prot_sdb == NULL); | 1146 | if (prot_sdb == NULL) { |
| 1147 | /* | ||
| 1148 | * This can happen if someone (e.g. multipath) | ||
| 1149 | * queues a command to a device on an adapter | ||
| 1150 | * that does not support DIX. | ||
| 1151 | */ | ||
| 1152 | WARN_ON_ONCE(1); | ||
| 1153 | error = BLKPREP_KILL; | ||
| 1154 | goto err_exit; | ||
| 1155 | } | ||
| 1156 | |||
| 1147 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); | 1157 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
| 1148 | 1158 | ||
| 1149 | if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { | 1159 | if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) { |
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index 7281316a5ecb..a67d37c7e3c0 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
| @@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws) | |||
| 271 | iounmap(clk_reg); | 271 | iounmap(clk_reg); |
| 272 | 272 | ||
| 273 | dws->num_cs = 16; | 273 | dws->num_cs = 16; |
| 274 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ | ||
| 275 | 274 | ||
| 276 | #ifdef CONFIG_SPI_DW_MID_DMA | 275 | #ifdef CONFIG_SPI_DW_MID_DMA |
| 277 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); | 276 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); |
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index d0d5542efc06..8edcd1b84562 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c | |||
| @@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws) | |||
| 621 | if (!dws->fifo_len) { | 621 | if (!dws->fifo_len) { |
| 622 | u32 fifo; | 622 | u32 fifo; |
| 623 | 623 | ||
| 624 | for (fifo = 2; fifo <= 257; fifo++) { | 624 | for (fifo = 2; fifo <= 256; fifo++) { |
| 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); | 625 | dw_writew(dws, DW_SPI_TXFLTR, fifo); |
| 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) | 626 | if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) |
| 627 | break; | 627 | break; |
| 628 | } | 628 | } |
| 629 | 629 | ||
| 630 | dws->fifo_len = (fifo == 257) ? 0 : fifo; | 630 | dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; |
| 631 | dw_writew(dws, DW_SPI_TXFLTR, 0); | 631 | dw_writew(dws, DW_SPI_TXFLTR, 0); |
| 632 | } | 632 | } |
| 633 | } | 633 | } |
| @@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) | |||
| 673 | if (dws->dma_ops && dws->dma_ops->dma_init) { | 673 | if (dws->dma_ops && dws->dma_ops->dma_init) { |
| 674 | ret = dws->dma_ops->dma_init(dws); | 674 | ret = dws->dma_ops->dma_init(dws); |
| 675 | if (ret) { | 675 | if (ret) { |
| 676 | dev_warn(&master->dev, "DMA init failed\n"); | 676 | dev_warn(dev, "DMA init failed\n"); |
| 677 | dws->dma_inited = 0; | 677 | dws->dma_inited = 0; |
| 678 | } | 678 | } |
| 679 | } | 679 | } |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 05c623cfb078..23822e7df6c1 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
| @@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data) | |||
| 546 | cs_deassert(drv_data); | 546 | cs_deassert(drv_data); |
| 547 | } | 547 | } |
| 548 | 548 | ||
| 549 | spi_finalize_current_message(drv_data->master); | ||
| 550 | drv_data->cur_chip = NULL; | 549 | drv_data->cur_chip = NULL; |
| 550 | spi_finalize_current_message(drv_data->master); | ||
| 551 | } | 551 | } |
| 552 | 552 | ||
| 553 | static void reset_sccr1(struct driver_data *drv_data) | 553 | static void reset_sccr1(struct driver_data *drv_data) |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 96a5fc0878d8..3ab7a21445fc 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
| @@ -82,7 +82,7 @@ struct sh_msiof_spi_priv { | |||
| 82 | #define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ | 82 | #define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */ |
| 83 | #define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ | 83 | #define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ |
| 84 | #define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ | 84 | #define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ |
| 85 | #define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */ | 85 | #define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */ |
| 86 | #define MDR1_FLD_SHIFT 2 | 86 | #define MDR1_FLD_SHIFT 2 |
| 87 | #define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ | 87 | #define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */ |
| 88 | /* TMDR1 */ | 88 | /* TMDR1 */ |
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 930f6010203e..65d610abe06e 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c | |||
| @@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) | |||
| 632 | return 0; | 632 | return 0; |
| 633 | } | 633 | } |
| 634 | 634 | ||
| 635 | if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { | 635 | if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { |
| 636 | CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); | 636 | CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); |
| 637 | return -EFAULT; | 637 | return -EFAULT; |
| 638 | } | 638 | } |
diff --git a/drivers/staging/media/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig index 81784c6f7b88..77d8753f6ba4 100644 --- a/drivers/staging/media/tlg2300/Kconfig +++ b/drivers/staging/media/tlg2300/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config VIDEO_TLG2300 | 1 | config VIDEO_TLG2300 |
| 2 | tristate "Telegent TLG2300 USB video capture support (Deprecated)" | 2 | tristate "Telegent TLG2300 USB video capture support (Deprecated)" |
| 3 | depends on VIDEO_DEV && I2C && SND && DVB_CORE | 3 | depends on VIDEO_DEV && I2C && SND && DVB_CORE |
| 4 | depends on MEDIA_USB_SUPPORT | ||
| 4 | select VIDEO_TUNER | 5 | select VIDEO_TUNER |
| 5 | select VIDEO_TVEEPROM | 6 | select VIDEO_TVEEPROM |
| 6 | depends on RC_CORE | 7 | depends on RC_CORE |
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c index 093535c6217b..120b70d72d79 100644 --- a/drivers/staging/nvec/nvec.c +++ b/drivers/staging/nvec/nvec.c | |||
| @@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle; | |||
| 85 | static const struct mfd_cell nvec_devices[] = { | 85 | static const struct mfd_cell nvec_devices[] = { |
| 86 | { | 86 | { |
| 87 | .name = "nvec-kbd", | 87 | .name = "nvec-kbd", |
| 88 | .id = 1, | ||
| 89 | }, | 88 | }, |
| 90 | { | 89 | { |
| 91 | .name = "nvec-mouse", | 90 | .name = "nvec-mouse", |
| 92 | .id = 1, | ||
| 93 | }, | 91 | }, |
| 94 | { | 92 | { |
| 95 | .name = "nvec-power", | 93 | .name = "nvec-power", |
| 96 | .id = 1, | 94 | .id = 0, |
| 97 | }, | 95 | }, |
| 98 | { | 96 | { |
| 99 | .name = "nvec-power", | 97 | .name = "nvec-power", |
| 100 | .id = 2, | 98 | .id = 1, |
| 101 | }, | 99 | }, |
| 102 | { | 100 | { |
| 103 | .name = "nvec-paz00", | 101 | .name = "nvec-paz00", |
| 104 | .id = 1, | ||
| 105 | }, | 102 | }, |
| 106 | }; | 103 | }; |
| 107 | 104 | ||
| @@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev) | |||
| 891 | nvec_msg_free(nvec, msg); | 888 | nvec_msg_free(nvec, msg); |
| 892 | } | 889 | } |
| 893 | 890 | ||
| 894 | ret = mfd_add_devices(nvec->dev, -1, nvec_devices, | 891 | ret = mfd_add_devices(nvec->dev, 0, nvec_devices, |
| 895 | ARRAY_SIZE(nvec_devices), NULL, 0, NULL); | 892 | ARRAY_SIZE(nvec_devices), NULL, 0, NULL); |
| 896 | if (ret) | 893 | if (ret) |
| 897 | dev_err(nvec->dev, "error adding subdevices\n"); | 894 | dev_err(nvec->dev, "error adding subdevices\n"); |
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h index de0c9c9d7091..a6315abe7b7c 100644 --- a/drivers/usb/core/otg_whitelist.h +++ b/drivers/usb/core/otg_whitelist.h | |||
| @@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev) | |||
| 55 | le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) | 55 | le16_to_cpu(dev->descriptor.idProduct) == 0xbadd)) |
| 56 | return 0; | 56 | return 0; |
| 57 | 57 | ||
| 58 | /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */ | ||
| 59 | if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a && | ||
| 60 | le16_to_cpu(dev->descriptor.idProduct) == 0x0200)) | ||
| 61 | return 1; | ||
| 62 | |||
| 58 | /* NOTE: can't use usb_match_id() since interface caches | 63 | /* NOTE: can't use usb_match_id() since interface caches |
| 59 | * aren't set up yet. this is cut/paste from that code. | 64 | * aren't set up yet. this is cut/paste from that code. |
| 60 | */ | 65 | */ |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 0ffb4ed0a945..41e510ae8c83 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 179 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = | 179 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = |
| 180 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, | 180 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, |
| 181 | 181 | ||
| 182 | /* Protocol and OTG Electrical Test Device */ | ||
| 183 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = | ||
| 184 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, | ||
| 185 | |||
| 182 | { } /* terminating entry must be last */ | 186 | { } /* terminating entry must be last */ |
| 183 | }; | 187 | }; |
| 184 | 188 | ||
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c index ad43c5bc1ef1..02e3e2d4ea56 100644 --- a/drivers/usb/dwc2/core_intr.c +++ b/drivers/usb/dwc2/core_intr.c | |||
| @@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev) | |||
| 476 | u32 gintsts; | 476 | u32 gintsts; |
| 477 | irqreturn_t retval = IRQ_NONE; | 477 | irqreturn_t retval = IRQ_NONE; |
| 478 | 478 | ||
| 479 | spin_lock(&hsotg->lock); | ||
| 480 | |||
| 479 | if (!dwc2_is_controller_alive(hsotg)) { | 481 | if (!dwc2_is_controller_alive(hsotg)) { |
| 480 | dev_warn(hsotg->dev, "Controller is dead\n"); | 482 | dev_warn(hsotg->dev, "Controller is dead\n"); |
| 481 | goto out; | 483 | goto out; |
| 482 | } | 484 | } |
| 483 | 485 | ||
| 484 | spin_lock(&hsotg->lock); | ||
| 485 | |||
| 486 | gintsts = dwc2_read_common_intr(hsotg); | 486 | gintsts = dwc2_read_common_intr(hsotg); |
| 487 | if (gintsts & ~GINTSTS_PRTINT) | 487 | if (gintsts & ~GINTSTS_PRTINT) |
| 488 | retval = IRQ_HANDLED; | 488 | retval = IRQ_HANDLED; |
| @@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev) | |||
| 515 | } | 515 | } |
| 516 | } | 516 | } |
| 517 | 517 | ||
| 518 | spin_unlock(&hsotg->lock); | ||
| 519 | out: | 518 | out: |
| 519 | spin_unlock(&hsotg->lock); | ||
| 520 | return retval; | 520 | return retval; |
| 521 | } | 521 | } |
| 522 | EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); | 522 | EXPORT_SYMBOL_GPL(dwc2_handle_common_intr); |
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index ccfdfb24b240..2f9735b35338 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c | |||
| @@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list, | |||
| 34 | return phy; | 34 | return phy; |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | return ERR_PTR(-EPROBE_DEFER); | 37 | return ERR_PTR(-ENODEV); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | static struct usb_phy *__usb_find_phy_dev(struct device *dev, | 40 | static struct usb_phy *__usb_find_phy_dev(struct device *dev, |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 11c7a9676441..d684b4b8108f 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
| @@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100, | |||
| 507 | UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, | 507 | UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999, |
| 508 | "SCM Microsystems", | 508 | "SCM Microsystems", |
| 509 | "eUSB SCSI Adapter (Bus Powered)", | 509 | "eUSB SCSI Adapter (Bus Powered)", |
| 510 | USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, | 510 | USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init, |
| 511 | US_FL_SCM_MULT_TARG ), | 511 | US_FL_SCM_MULT_TARG ), |
| 512 | 512 | ||
| 513 | UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, | 513 | UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200, |
| @@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100, | |||
| 1995 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 1995 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 1996 | US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), | 1996 | US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), |
| 1997 | 1997 | ||
| 1998 | /* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */ | ||
| 1999 | UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, | ||
| 2000 | "JMicron", | ||
| 2001 | "USB to ATA/ATAPI Bridge", | ||
| 2002 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 2003 | US_FL_BROKEN_FUA ), | ||
| 2004 | |||
| 1998 | /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) | 2005 | /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) |
| 1999 | * and Mac USB Dock USB-SCSI */ | 2006 | * and Mac USB Dock USB-SCSI */ |
| 2000 | UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, | 2007 | UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 6df4357d9ee3..dbc00e56c7f5 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
| @@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, | |||
| 140 | "External HDD", | 140 | "External HDD", |
| 141 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 141 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 142 | US_FL_IGNORE_UAS), | 142 | US_FL_IGNORE_UAS), |
| 143 | |||
| 144 | /* Reported-by: Richard Henderson <rth@redhat.com> */ | ||
| 145 | UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999, | ||
| 146 | "SimpleTech", | ||
| 147 | "External HDD", | ||
| 148 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 149 | US_FL_NO_REPORT_OPCODES), | ||
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 5927c0a98a74..bcfd2a22208f 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c | |||
| @@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = { | |||
| 503 | .shutdown = cdns_wdt_shutdown, | 503 | .shutdown = cdns_wdt_shutdown, |
| 504 | .driver = { | 504 | .driver = { |
| 505 | .name = "cdns-wdt", | 505 | .name = "cdns-wdt", |
| 506 | .owner = THIS_MODULE, | ||
| 507 | .of_match_table = cdns_wdt_of_match, | 506 | .of_match_table = cdns_wdt_of_match, |
| 508 | .pm = &cdns_wdt_pm_ops, | 507 | .pm = &cdns_wdt_pm_ops, |
| 509 | }, | 508 | }, |
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index d6add516a7a7..5142bbabe027 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c | |||
| @@ -52,6 +52,8 @@ | |||
| 52 | #define IMX2_WDT_WRSR 0x04 /* Reset Status Register */ | 52 | #define IMX2_WDT_WRSR 0x04 /* Reset Status Register */ |
| 53 | #define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */ | 53 | #define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */ |
| 54 | 54 | ||
| 55 | #define IMX2_WDT_WMCR 0x08 /* Misc Register */ | ||
| 56 | |||
| 55 | #define IMX2_WDT_MAX_TIME 128 | 57 | #define IMX2_WDT_MAX_TIME 128 |
| 56 | #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ | 58 | #define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */ |
| 57 | 59 | ||
| @@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) | |||
| 274 | 276 | ||
| 275 | imx2_wdt_ping_if_active(wdog); | 277 | imx2_wdt_ping_if_active(wdog); |
| 276 | 278 | ||
| 279 | /* | ||
| 280 | * Disable the watchdog power down counter at boot. Otherwise the power | ||
| 281 | * down counter will pull down the #WDOG interrupt line for one clock | ||
| 282 | * cycle. | ||
| 283 | */ | ||
| 284 | regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0); | ||
| 285 | |||
| 277 | ret = watchdog_register_device(wdog); | 286 | ret = watchdog_register_device(wdog); |
| 278 | if (ret) { | 287 | if (ret) { |
| 279 | dev_err(&pdev->dev, "cannot register watchdog device\n"); | 288 | dev_err(&pdev->dev, "cannot register watchdog device\n"); |
| @@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev) | |||
| 327 | } | 336 | } |
| 328 | 337 | ||
| 329 | #ifdef CONFIG_PM_SLEEP | 338 | #ifdef CONFIG_PM_SLEEP |
| 330 | /* Disable watchdog if it is active during suspend */ | 339 | /* Disable watchdog if it is active or non-active but still running */ |
| 331 | static int imx2_wdt_suspend(struct device *dev) | 340 | static int imx2_wdt_suspend(struct device *dev) |
| 332 | { | 341 | { |
| 333 | struct watchdog_device *wdog = dev_get_drvdata(dev); | 342 | struct watchdog_device *wdog = dev_get_drvdata(dev); |
| 334 | struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); | 343 | struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); |
| 335 | 344 | ||
| 336 | imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); | 345 | /* The watchdog IP block is running */ |
| 337 | imx2_wdt_ping(wdog); | 346 | if (imx2_wdt_is_running(wdev)) { |
| 347 | imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME); | ||
| 348 | imx2_wdt_ping(wdog); | ||
| 338 | 349 | ||
| 339 | /* Watchdog has been stopped but IP block is still running */ | 350 | /* The watchdog is not active */ |
| 340 | if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev)) | 351 | if (!watchdog_active(wdog)) |
| 341 | del_timer_sync(&wdev->timer); | 352 | del_timer_sync(&wdev->timer); |
| 353 | } | ||
| 342 | 354 | ||
| 343 | clk_disable_unprepare(wdev->clk); | 355 | clk_disable_unprepare(wdev->clk); |
| 344 | 356 | ||
| @@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev) | |||
| 354 | clk_prepare_enable(wdev->clk); | 366 | clk_prepare_enable(wdev->clk); |
| 355 | 367 | ||
| 356 | if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) { | 368 | if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) { |
| 357 | /* Resumes from deep sleep we need restart | 369 | /* |
| 358 | * the watchdog again. | 370 | * If the watchdog is still active and resumes |
| 371 | * from deep sleep state, need to restart the | ||
| 372 | * watchdog again. | ||
| 359 | */ | 373 | */ |
| 360 | imx2_wdt_setup(wdog); | 374 | imx2_wdt_setup(wdog); |
| 361 | imx2_wdt_set_timeout(wdog, wdog->timeout); | 375 | imx2_wdt_set_timeout(wdog, wdog->timeout); |
| 362 | imx2_wdt_ping(wdog); | 376 | imx2_wdt_ping(wdog); |
| 363 | } else if (imx2_wdt_is_running(wdev)) { | 377 | } else if (imx2_wdt_is_running(wdev)) { |
| 378 | /* Resuming from non-deep sleep state. */ | ||
| 379 | imx2_wdt_set_timeout(wdog, wdog->timeout); | ||
| 364 | imx2_wdt_ping(wdog); | 380 | imx2_wdt_ping(wdog); |
| 365 | mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2); | 381 | /* |
| 382 | * But the watchdog is not active, then start | ||
| 383 | * the timer again. | ||
| 384 | */ | ||
| 385 | if (!watchdog_active(wdog)) | ||
| 386 | mod_timer(&wdev->timer, | ||
| 387 | jiffies + wdog->timeout * HZ / 2); | ||
| 366 | } | 388 | } |
| 367 | 389 | ||
| 368 | return 0; | 390 | return 0; |
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c index ef6a298e8c45..1f4155ee3404 100644 --- a/drivers/watchdog/meson_wdt.c +++ b/drivers/watchdog/meson_wdt.c | |||
| @@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = { | |||
| 215 | .remove = meson_wdt_remove, | 215 | .remove = meson_wdt_remove, |
| 216 | .shutdown = meson_wdt_shutdown, | 216 | .shutdown = meson_wdt_shutdown, |
| 217 | .driver = { | 217 | .driver = { |
| 218 | .owner = THIS_MODULE, | ||
| 219 | .name = DRV_NAME, | 218 | .name = DRV_NAME, |
| 220 | .of_match_table = meson_wdt_dt_ids, | 219 | .of_match_table = meson_wdt_dt_ids, |
| 221 | }, | 220 | }, |
