diff options
| author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-02-22 17:46:24 -0500 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-02-22 17:46:24 -0500 |
| commit | bb27d4998a9e8767674e8eda225c82cc149e5bc8 (patch) | |
| tree | dcabb8b8cc510003637323b30ed414b1a1ec86f6 /drivers | |
| parent | 9676e84dfd641e3366a41f2c45ac5c55dbac820f (diff) | |
| parent | 35bf7692e765c2275bf93fe573f7ca868ab73453 (diff) | |
Merge char-misc-next into staging-next
This resolves the merge issues and confusions people were having with
the goldfish drivers due to changes for them showing up in two different
trees.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
254 files changed, 10376 insertions, 6215 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index f080a8b7659b..796301a7c4fb 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
| @@ -1321,6 +1321,7 @@ static void binder_transaction(struct binder_proc *proc, | |||
| 1321 | struct binder_transaction *t; | 1321 | struct binder_transaction *t; |
| 1322 | struct binder_work *tcomplete; | 1322 | struct binder_work *tcomplete; |
| 1323 | binder_size_t *offp, *off_end; | 1323 | binder_size_t *offp, *off_end; |
| 1324 | binder_size_t off_min; | ||
| 1324 | struct binder_proc *target_proc; | 1325 | struct binder_proc *target_proc; |
| 1325 | struct binder_thread *target_thread = NULL; | 1326 | struct binder_thread *target_thread = NULL; |
| 1326 | struct binder_node *target_node = NULL; | 1327 | struct binder_node *target_node = NULL; |
| @@ -1522,18 +1523,24 @@ static void binder_transaction(struct binder_proc *proc, | |||
| 1522 | goto err_bad_offset; | 1523 | goto err_bad_offset; |
| 1523 | } | 1524 | } |
| 1524 | off_end = (void *)offp + tr->offsets_size; | 1525 | off_end = (void *)offp + tr->offsets_size; |
| 1526 | off_min = 0; | ||
| 1525 | for (; offp < off_end; offp++) { | 1527 | for (; offp < off_end; offp++) { |
| 1526 | struct flat_binder_object *fp; | 1528 | struct flat_binder_object *fp; |
| 1527 | 1529 | ||
| 1528 | if (*offp > t->buffer->data_size - sizeof(*fp) || | 1530 | if (*offp > t->buffer->data_size - sizeof(*fp) || |
| 1531 | *offp < off_min || | ||
| 1529 | t->buffer->data_size < sizeof(*fp) || | 1532 | t->buffer->data_size < sizeof(*fp) || |
| 1530 | !IS_ALIGNED(*offp, sizeof(u32))) { | 1533 | !IS_ALIGNED(*offp, sizeof(u32))) { |
| 1531 | binder_user_error("%d:%d got transaction with invalid offset, %lld\n", | 1534 | binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n", |
| 1532 | proc->pid, thread->pid, (u64)*offp); | 1535 | proc->pid, thread->pid, (u64)*offp, |
| 1536 | (u64)off_min, | ||
| 1537 | (u64)(t->buffer->data_size - | ||
| 1538 | sizeof(*fp))); | ||
| 1533 | return_error = BR_FAILED_REPLY; | 1539 | return_error = BR_FAILED_REPLY; |
| 1534 | goto err_bad_offset; | 1540 | goto err_bad_offset; |
| 1535 | } | 1541 | } |
| 1536 | fp = (struct flat_binder_object *)(t->buffer->data + *offp); | 1542 | fp = (struct flat_binder_object *)(t->buffer->data + *offp); |
| 1543 | off_min = *offp + sizeof(struct flat_binder_object); | ||
| 1537 | switch (fp->type) { | 1544 | switch (fp->type) { |
| 1538 | case BINDER_TYPE_BINDER: | 1545 | case BINDER_TYPE_BINDER: |
| 1539 | case BINDER_TYPE_WEAK_BINDER: { | 1546 | case BINDER_TYPE_WEAK_BINDER: { |
| @@ -3598,13 +3605,24 @@ static int binder_transactions_show(struct seq_file *m, void *unused) | |||
| 3598 | 3605 | ||
| 3599 | static int binder_proc_show(struct seq_file *m, void *unused) | 3606 | static int binder_proc_show(struct seq_file *m, void *unused) |
| 3600 | { | 3607 | { |
| 3608 | struct binder_proc *itr; | ||
| 3601 | struct binder_proc *proc = m->private; | 3609 | struct binder_proc *proc = m->private; |
| 3602 | int do_lock = !binder_debug_no_lock; | 3610 | int do_lock = !binder_debug_no_lock; |
| 3611 | bool valid_proc = false; | ||
| 3603 | 3612 | ||
| 3604 | if (do_lock) | 3613 | if (do_lock) |
| 3605 | binder_lock(__func__); | 3614 | binder_lock(__func__); |
| 3606 | seq_puts(m, "binder proc state:\n"); | 3615 | |
| 3607 | print_binder_proc(m, proc, 1); | 3616 | hlist_for_each_entry(itr, &binder_procs, proc_node) { |
| 3617 | if (itr == proc) { | ||
| 3618 | valid_proc = true; | ||
| 3619 | break; | ||
| 3620 | } | ||
| 3621 | } | ||
| 3622 | if (valid_proc) { | ||
| 3623 | seq_puts(m, "binder proc state:\n"); | ||
| 3624 | print_binder_proc(m, proc, 1); | ||
| 3625 | } | ||
| 3608 | if (do_lock) | 3626 | if (do_lock) |
| 3609 | binder_unlock(__func__); | 3627 | binder_unlock(__func__); |
| 3610 | return 0; | 3628 | return 0; |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 594fcabd22cd..546a3692774f 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
| @@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
| 264 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ | 264 | { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ |
| 265 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ | 265 | { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ |
| 266 | { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ | 266 | { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ |
| 267 | { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ | ||
| 268 | { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ | ||
| 269 | { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */ | ||
| 270 | { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */ | ||
| 271 | { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */ | ||
| 272 | { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */ | ||
| 273 | { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */ | ||
| 274 | { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */ | ||
| 275 | { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */ | ||
| 276 | { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */ | ||
| 277 | { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */ | ||
| 278 | { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */ | ||
| 279 | { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */ | ||
| 280 | { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */ | ||
| 281 | { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */ | ||
| 282 | { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */ | ||
| 283 | { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */ | ||
| 284 | { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */ | ||
| 285 | { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ | ||
| 286 | { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ | ||
| 267 | { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ | 287 | { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ |
| 268 | { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ | 288 | { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ |
| 269 | { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ | 289 | { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ |
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index a4faa438889c..a44c75d4c284 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h | |||
| @@ -250,6 +250,7 @@ enum { | |||
| 250 | AHCI_HFLAG_MULTI_MSI = 0, | 250 | AHCI_HFLAG_MULTI_MSI = 0, |
| 251 | AHCI_HFLAG_MULTI_MSIX = 0, | 251 | AHCI_HFLAG_MULTI_MSIX = 0, |
| 252 | #endif | 252 | #endif |
| 253 | AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ | ||
| 253 | 254 | ||
| 254 | /* ap->flags bits */ | 255 | /* ap->flags bits */ |
| 255 | 256 | ||
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c index b36cae2fd04b..e87bcec0fd7c 100644 --- a/drivers/ata/ahci_brcmstb.c +++ b/drivers/ata/ahci_brcmstb.c | |||
| @@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) | |||
| 317 | if (IS_ERR(hpriv)) | 317 | if (IS_ERR(hpriv)) |
| 318 | return PTR_ERR(hpriv); | 318 | return PTR_ERR(hpriv); |
| 319 | hpriv->plat_data = priv; | 319 | hpriv->plat_data = priv; |
| 320 | hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; | ||
| 320 | 321 | ||
| 321 | brcm_sata_alpm_init(hpriv); | 322 | brcm_sata_alpm_init(hpriv); |
| 322 | 323 | ||
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index d61740e78d6d..402967902cbe 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
| @@ -496,8 +496,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) | |||
| 496 | } | 496 | } |
| 497 | } | 497 | } |
| 498 | 498 | ||
| 499 | /* fabricate port_map from cap.nr_ports */ | 499 | /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */ |
| 500 | if (!port_map) { | 500 | if (!port_map && vers < 0x10300) { |
| 501 | port_map = (1 << ahci_nr_ports(cap)) - 1; | 501 | port_map = (1 << ahci_nr_ports(cap)) - 1; |
| 502 | dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); | 502 | dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); |
| 503 | 503 | ||
| @@ -593,8 +593,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine); | |||
| 593 | int ahci_stop_engine(struct ata_port *ap) | 593 | int ahci_stop_engine(struct ata_port *ap) |
| 594 | { | 594 | { |
| 595 | void __iomem *port_mmio = ahci_port_base(ap); | 595 | void __iomem *port_mmio = ahci_port_base(ap); |
| 596 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
| 596 | u32 tmp; | 597 | u32 tmp; |
| 597 | 598 | ||
| 599 | /* | ||
| 600 | * On some controllers, stopping a port's DMA engine while the port | ||
| 601 | * is in ALPM state (partial or slumber) results in failures on | ||
| 602 | * subsequent DMA engine starts. For those controllers, put the | ||
| 603 | * port back in active state before stopping its DMA engine. | ||
| 604 | */ | ||
| 605 | if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) && | ||
| 606 | (ap->link.lpm_policy > ATA_LPM_MAX_POWER) && | ||
| 607 | ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) { | ||
| 608 | dev_err(ap->host->dev, "Failed to wake up port before engine stop\n"); | ||
| 609 | return -EIO; | ||
| 610 | } | ||
| 611 | |||
| 598 | tmp = readl(port_mmio + PORT_CMD); | 612 | tmp = readl(port_mmio + PORT_CMD); |
| 599 | 613 | ||
| 600 | /* check if the HBA is idle */ | 614 | /* check if the HBA is idle */ |
| @@ -689,6 +703,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, | |||
| 689 | void __iomem *port_mmio = ahci_port_base(ap); | 703 | void __iomem *port_mmio = ahci_port_base(ap); |
| 690 | 704 | ||
| 691 | if (policy != ATA_LPM_MAX_POWER) { | 705 | if (policy != ATA_LPM_MAX_POWER) { |
| 706 | /* wakeup flag only applies to the max power policy */ | ||
| 707 | hints &= ~ATA_LPM_WAKE_ONLY; | ||
| 708 | |||
| 692 | /* | 709 | /* |
| 693 | * Disable interrupts on Phy Ready. This keeps us from | 710 | * Disable interrupts on Phy Ready. This keeps us from |
| 694 | * getting woken up due to spurious phy ready | 711 | * getting woken up due to spurious phy ready |
| @@ -704,7 +721,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, | |||
| 704 | u32 cmd = readl(port_mmio + PORT_CMD); | 721 | u32 cmd = readl(port_mmio + PORT_CMD); |
| 705 | 722 | ||
| 706 | if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { | 723 | if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { |
| 707 | cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); | 724 | if (!(hints & ATA_LPM_WAKE_ONLY)) |
| 725 | cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); | ||
| 708 | cmd |= PORT_CMD_ICC_ACTIVE; | 726 | cmd |= PORT_CMD_ICC_ACTIVE; |
| 709 | 727 | ||
| 710 | writel(cmd, port_mmio + PORT_CMD); | 728 | writel(cmd, port_mmio + PORT_CMD); |
| @@ -712,6 +730,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, | |||
| 712 | 730 | ||
| 713 | /* wait 10ms to be sure we've come out of LPM state */ | 731 | /* wait 10ms to be sure we've come out of LPM state */ |
| 714 | ata_msleep(ap, 10); | 732 | ata_msleep(ap, 10); |
| 733 | |||
| 734 | if (hints & ATA_LPM_WAKE_ONLY) | ||
| 735 | return 0; | ||
| 715 | } else { | 736 | } else { |
| 716 | cmd |= PORT_CMD_ALPE; | 737 | cmd |= PORT_CMD_ALPE; |
| 717 | if (policy == ATA_LPM_MIN_POWER) | 738 | if (policy == ATA_LPM_MIN_POWER) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index cbb74719d2c1..55e257c268dd 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
| 4125 | { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, | 4125 | { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, |
| 4126 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, | 4126 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, |
| 4127 | { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, | 4127 | { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, |
| 4128 | { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, | ||
| 4128 | /* Odd clown on sil3726/4726 PMPs */ | 4129 | /* Odd clown on sil3726/4726 PMPs */ |
| 4129 | { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, | 4130 | { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, |
| 4130 | 4131 | ||
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index cdf6215a9a22..051b6158d1b7 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
| @@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, | |||
| 997 | static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | 997 | static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) |
| 998 | { | 998 | { |
| 999 | struct ata_port *ap = qc->ap; | 999 | struct ata_port *ap = qc->ap; |
| 1000 | unsigned long flags; | ||
| 1001 | 1000 | ||
| 1002 | if (ap->ops->error_handler) { | 1001 | if (ap->ops->error_handler) { |
| 1003 | if (in_wq) { | 1002 | if (in_wq) { |
| 1004 | spin_lock_irqsave(ap->lock, flags); | ||
| 1005 | |||
| 1006 | /* EH might have kicked in while host lock is | 1003 | /* EH might have kicked in while host lock is |
| 1007 | * released. | 1004 | * released. |
| 1008 | */ | 1005 | */ |
| @@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
| 1014 | } else | 1011 | } else |
| 1015 | ata_port_freeze(ap); | 1012 | ata_port_freeze(ap); |
| 1016 | } | 1013 | } |
| 1017 | |||
| 1018 | spin_unlock_irqrestore(ap->lock, flags); | ||
| 1019 | } else { | 1014 | } else { |
| 1020 | if (likely(!(qc->err_mask & AC_ERR_HSM))) | 1015 | if (likely(!(qc->err_mask & AC_ERR_HSM))) |
| 1021 | ata_qc_complete(qc); | 1016 | ata_qc_complete(qc); |
| @@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
| 1024 | } | 1019 | } |
| 1025 | } else { | 1020 | } else { |
| 1026 | if (in_wq) { | 1021 | if (in_wq) { |
| 1027 | spin_lock_irqsave(ap->lock, flags); | ||
| 1028 | ata_sff_irq_on(ap); | 1022 | ata_sff_irq_on(ap); |
| 1029 | ata_qc_complete(qc); | 1023 | ata_qc_complete(qc); |
| 1030 | spin_unlock_irqrestore(ap->lock, flags); | ||
| 1031 | } else | 1024 | } else |
| 1032 | ata_qc_complete(qc); | 1025 | ata_qc_complete(qc); |
| 1033 | } | 1026 | } |
| @@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | |||
| 1048 | { | 1041 | { |
| 1049 | struct ata_link *link = qc->dev->link; | 1042 | struct ata_link *link = qc->dev->link; |
| 1050 | struct ata_eh_info *ehi = &link->eh_info; | 1043 | struct ata_eh_info *ehi = &link->eh_info; |
| 1051 | unsigned long flags = 0; | ||
| 1052 | int poll_next; | 1044 | int poll_next; |
| 1053 | 1045 | ||
| 1046 | lockdep_assert_held(ap->lock); | ||
| 1047 | |||
| 1054 | WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); | 1048 | WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); |
| 1055 | 1049 | ||
| 1056 | /* Make sure ata_sff_qc_issue() does not throw things | 1050 | /* Make sure ata_sff_qc_issue() does not throw things |
| @@ -1112,14 +1106,6 @@ fsm_start: | |||
| 1112 | } | 1106 | } |
| 1113 | } | 1107 | } |
| 1114 | 1108 | ||
| 1115 | /* Send the CDB (atapi) or the first data block (ata pio out). | ||
| 1116 | * During the state transition, interrupt handler shouldn't | ||
| 1117 | * be invoked before the data transfer is complete and | ||
| 1118 | * hsm_task_state is changed. Hence, the following locking. | ||
| 1119 | */ | ||
| 1120 | if (in_wq) | ||
| 1121 | spin_lock_irqsave(ap->lock, flags); | ||
| 1122 | |||
| 1123 | if (qc->tf.protocol == ATA_PROT_PIO) { | 1109 | if (qc->tf.protocol == ATA_PROT_PIO) { |
| 1124 | /* PIO data out protocol. | 1110 | /* PIO data out protocol. |
| 1125 | * send first data block. | 1111 | * send first data block. |
| @@ -1135,9 +1121,6 @@ fsm_start: | |||
| 1135 | /* send CDB */ | 1121 | /* send CDB */ |
| 1136 | atapi_send_cdb(ap, qc); | 1122 | atapi_send_cdb(ap, qc); |
| 1137 | 1123 | ||
| 1138 | if (in_wq) | ||
| 1139 | spin_unlock_irqrestore(ap->lock, flags); | ||
| 1140 | |||
| 1141 | /* if polling, ata_sff_pio_task() handles the rest. | 1124 | /* if polling, ata_sff_pio_task() handles the rest. |
| 1142 | * otherwise, interrupt handler takes over from here. | 1125 | * otherwise, interrupt handler takes over from here. |
| 1143 | */ | 1126 | */ |
| @@ -1296,7 +1279,8 @@ fsm_start: | |||
| 1296 | break; | 1279 | break; |
| 1297 | default: | 1280 | default: |
| 1298 | poll_next = 0; | 1281 | poll_next = 0; |
| 1299 | BUG(); | 1282 | WARN(true, "ata%d: SFF host state machine in invalid state %d", |
| 1283 | ap->print_id, ap->hsm_task_state); | ||
| 1300 | } | 1284 | } |
| 1301 | 1285 | ||
| 1302 | return poll_next; | 1286 | return poll_next; |
| @@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work) | |||
| 1361 | u8 status; | 1345 | u8 status; |
| 1362 | int poll_next; | 1346 | int poll_next; |
| 1363 | 1347 | ||
| 1348 | spin_lock_irq(ap->lock); | ||
| 1349 | |||
| 1364 | BUG_ON(ap->sff_pio_task_link == NULL); | 1350 | BUG_ON(ap->sff_pio_task_link == NULL); |
| 1365 | /* qc can be NULL if timeout occurred */ | 1351 | /* qc can be NULL if timeout occurred */ |
| 1366 | qc = ata_qc_from_tag(ap, link->active_tag); | 1352 | qc = ata_qc_from_tag(ap, link->active_tag); |
| 1367 | if (!qc) { | 1353 | if (!qc) { |
| 1368 | ap->sff_pio_task_link = NULL; | 1354 | ap->sff_pio_task_link = NULL; |
| 1369 | return; | 1355 | goto out_unlock; |
| 1370 | } | 1356 | } |
| 1371 | 1357 | ||
| 1372 | fsm_start: | 1358 | fsm_start: |
| @@ -1381,11 +1367,14 @@ fsm_start: | |||
| 1381 | */ | 1367 | */ |
| 1382 | status = ata_sff_busy_wait(ap, ATA_BUSY, 5); | 1368 | status = ata_sff_busy_wait(ap, ATA_BUSY, 5); |
| 1383 | if (status & ATA_BUSY) { | 1369 | if (status & ATA_BUSY) { |
| 1370 | spin_unlock_irq(ap->lock); | ||
| 1384 | ata_msleep(ap, 2); | 1371 | ata_msleep(ap, 2); |
| 1372 | spin_lock_irq(ap->lock); | ||
| 1373 | |||
| 1385 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); | 1374 | status = ata_sff_busy_wait(ap, ATA_BUSY, 10); |
| 1386 | if (status & ATA_BUSY) { | 1375 | if (status & ATA_BUSY) { |
| 1387 | ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); | 1376 | ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); |
| 1388 | return; | 1377 | goto out_unlock; |
| 1389 | } | 1378 | } |
| 1390 | } | 1379 | } |
| 1391 | 1380 | ||
| @@ -1402,6 +1391,8 @@ fsm_start: | |||
| 1402 | */ | 1391 | */ |
| 1403 | if (poll_next) | 1392 | if (poll_next) |
| 1404 | goto fsm_start; | 1393 | goto fsm_start; |
| 1394 | out_unlock: | ||
| 1395 | spin_unlock_irq(ap->lock); | ||
| 1405 | } | 1396 | } |
| 1406 | 1397 | ||
| 1407 | /** | 1398 | /** |
diff --git a/drivers/base/component.c b/drivers/base/component.c index 89f5cf68d80a..04a1582e80bb 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c | |||
| @@ -206,6 +206,8 @@ static void component_match_release(struct device *master, | |||
| 206 | if (mc->release) | 206 | if (mc->release) |
| 207 | mc->release(master, mc->data); | 207 | mc->release(master, mc->data); |
| 208 | } | 208 | } |
| 209 | |||
| 210 | kfree(match->compare); | ||
| 209 | } | 211 | } |
| 210 | 212 | ||
| 211 | static void devm_component_match_release(struct device *dev, void *res) | 213 | static void devm_component_match_release(struct device *dev, void *res) |
| @@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev, | |||
| 221 | if (match->alloc == num) | 223 | if (match->alloc == num) |
| 222 | return 0; | 224 | return 0; |
| 223 | 225 | ||
| 224 | new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL); | 226 | new = kmalloc_array(num, sizeof(*new), GFP_KERNEL); |
| 225 | if (!new) | 227 | if (!new) |
| 226 | return -ENOMEM; | 228 | return -ENOMEM; |
| 227 | 229 | ||
| 228 | if (match->compare) { | 230 | if (match->compare) { |
| 229 | memcpy(new, match->compare, sizeof(*new) * | 231 | memcpy(new, match->compare, sizeof(*new) * |
| 230 | min(match->num, num)); | 232 | min(match->num, num)); |
| 231 | devm_kfree(dev, match->compare); | 233 | kfree(match->compare); |
| 232 | } | 234 | } |
| 233 | match->compare = new; | 235 | match->compare = new; |
| 234 | match->alloc = num; | 236 | match->alloc = num; |
| @@ -283,6 +285,24 @@ void component_match_add_release(struct device *master, | |||
| 283 | } | 285 | } |
| 284 | EXPORT_SYMBOL(component_match_add_release); | 286 | EXPORT_SYMBOL(component_match_add_release); |
| 285 | 287 | ||
| 288 | static void free_master(struct master *master) | ||
| 289 | { | ||
| 290 | struct component_match *match = master->match; | ||
| 291 | int i; | ||
| 292 | |||
| 293 | list_del(&master->node); | ||
| 294 | |||
| 295 | if (match) { | ||
| 296 | for (i = 0; i < match->num; i++) { | ||
| 297 | struct component *c = match->compare[i].component; | ||
| 298 | if (c) | ||
| 299 | c->master = NULL; | ||
| 300 | } | ||
| 301 | } | ||
| 302 | |||
| 303 | kfree(master); | ||
| 304 | } | ||
| 305 | |||
| 286 | int component_master_add_with_match(struct device *dev, | 306 | int component_master_add_with_match(struct device *dev, |
| 287 | const struct component_master_ops *ops, | 307 | const struct component_master_ops *ops, |
| 288 | struct component_match *match) | 308 | struct component_match *match) |
| @@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev, | |||
| 309 | 329 | ||
| 310 | ret = try_to_bring_up_master(master, NULL); | 330 | ret = try_to_bring_up_master(master, NULL); |
| 311 | 331 | ||
| 312 | if (ret < 0) { | 332 | if (ret < 0) |
| 313 | /* Delete off the list if we weren't successful */ | 333 | free_master(master); |
| 314 | list_del(&master->node); | 334 | |
| 315 | kfree(master); | ||
| 316 | } | ||
| 317 | mutex_unlock(&component_mutex); | 335 | mutex_unlock(&component_mutex); |
| 318 | 336 | ||
| 319 | return ret < 0 ? ret : 0; | 337 | return ret < 0 ? ret : 0; |
| @@ -324,25 +342,12 @@ void component_master_del(struct device *dev, | |||
| 324 | const struct component_master_ops *ops) | 342 | const struct component_master_ops *ops) |
| 325 | { | 343 | { |
| 326 | struct master *master; | 344 | struct master *master; |
| 327 | int i; | ||
| 328 | 345 | ||
| 329 | mutex_lock(&component_mutex); | 346 | mutex_lock(&component_mutex); |
| 330 | master = __master_find(dev, ops); | 347 | master = __master_find(dev, ops); |
| 331 | if (master) { | 348 | if (master) { |
| 332 | struct component_match *match = master->match; | ||
| 333 | |||
| 334 | take_down_master(master); | 349 | take_down_master(master); |
| 335 | 350 | free_master(master); | |
| 336 | list_del(&master->node); | ||
| 337 | |||
| 338 | if (match) { | ||
| 339 | for (i = 0; i < match->num; i++) { | ||
| 340 | struct component *c = match->compare[i].component; | ||
| 341 | if (c) | ||
| 342 | c->master = NULL; | ||
| 343 | } | ||
| 344 | } | ||
| 345 | kfree(master); | ||
| 346 | } | 351 | } |
| 347 | mutex_unlock(&component_mutex); | 352 | mutex_unlock(&component_mutex); |
| 348 | } | 353 | } |
| @@ -486,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops) | |||
| 486 | 491 | ||
| 487 | ret = try_to_bring_up_masters(component); | 492 | ret = try_to_bring_up_masters(component); |
| 488 | if (ret < 0) { | 493 | if (ret < 0) { |
| 494 | if (component->master) | ||
| 495 | remove_component(component->master, component); | ||
| 489 | list_del(&component->node); | 496 | list_del(&component->node); |
| 490 | 497 | ||
| 491 | kfree(component); | 498 | kfree(component); |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b9250e564ebf..a7f4aa3f6b32 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
| @@ -257,7 +257,7 @@ static void __fw_free_buf(struct kref *ref) | |||
| 257 | vunmap(buf->data); | 257 | vunmap(buf->data); |
| 258 | for (i = 0; i < buf->nr_pages; i++) | 258 | for (i = 0; i < buf->nr_pages; i++) |
| 259 | __free_page(buf->pages[i]); | 259 | __free_page(buf->pages[i]); |
| 260 | kfree(buf->pages); | 260 | vfree(buf->pages); |
| 261 | } else | 261 | } else |
| 262 | #endif | 262 | #endif |
| 263 | vfree(buf->data); | 263 | vfree(buf->data); |
| @@ -353,15 +353,15 @@ static int fw_get_filesystem_firmware(struct device *device, | |||
| 353 | rc = fw_read_file_contents(file, buf); | 353 | rc = fw_read_file_contents(file, buf); |
| 354 | fput(file); | 354 | fput(file); |
| 355 | if (rc) | 355 | if (rc) |
| 356 | dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n", | 356 | dev_warn(device, "loading %s failed with error %d\n", |
| 357 | path, rc); | 357 | path, rc); |
| 358 | else | 358 | else |
| 359 | break; | 359 | break; |
| 360 | } | 360 | } |
| 361 | __putname(path); | 361 | __putname(path); |
| 362 | 362 | ||
| 363 | if (!rc) { | 363 | if (!rc) { |
| 364 | dev_dbg(device, "firmware: direct-loading firmware %s\n", | 364 | dev_dbg(device, "direct-loading %s\n", |
| 365 | buf->fw_id); | 365 | buf->fw_id); |
| 366 | mutex_lock(&fw_lock); | 366 | mutex_lock(&fw_lock); |
| 367 | set_bit(FW_STATUS_DONE, &buf->status); | 367 | set_bit(FW_STATUS_DONE, &buf->status); |
| @@ -660,7 +660,7 @@ static ssize_t firmware_loading_store(struct device *dev, | |||
| 660 | if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) { | 660 | if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) { |
| 661 | for (i = 0; i < fw_buf->nr_pages; i++) | 661 | for (i = 0; i < fw_buf->nr_pages; i++) |
| 662 | __free_page(fw_buf->pages[i]); | 662 | __free_page(fw_buf->pages[i]); |
| 663 | kfree(fw_buf->pages); | 663 | vfree(fw_buf->pages); |
| 664 | fw_buf->pages = NULL; | 664 | fw_buf->pages = NULL; |
| 665 | fw_buf->page_array_size = 0; | 665 | fw_buf->page_array_size = 0; |
| 666 | fw_buf->nr_pages = 0; | 666 | fw_buf->nr_pages = 0; |
| @@ -770,8 +770,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) | |||
| 770 | buf->page_array_size * 2); | 770 | buf->page_array_size * 2); |
| 771 | struct page **new_pages; | 771 | struct page **new_pages; |
| 772 | 772 | ||
| 773 | new_pages = kmalloc(new_array_size * sizeof(void *), | 773 | new_pages = vmalloc(new_array_size * sizeof(void *)); |
| 774 | GFP_KERNEL); | ||
| 775 | if (!new_pages) { | 774 | if (!new_pages) { |
| 776 | fw_load_abort(fw_priv); | 775 | fw_load_abort(fw_priv); |
| 777 | return -ENOMEM; | 776 | return -ENOMEM; |
| @@ -780,7 +779,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) | |||
| 780 | buf->page_array_size * sizeof(void *)); | 779 | buf->page_array_size * sizeof(void *)); |
| 781 | memset(&new_pages[buf->page_array_size], 0, sizeof(void *) * | 780 | memset(&new_pages[buf->page_array_size], 0, sizeof(void *) * |
| 782 | (new_array_size - buf->page_array_size)); | 781 | (new_array_size - buf->page_array_size)); |
| 783 | kfree(buf->pages); | 782 | vfree(buf->pages); |
| 784 | buf->pages = new_pages; | 783 | buf->pages = new_pages; |
| 785 | buf->page_array_size = new_array_size; | 784 | buf->page_array_size = new_array_size; |
| 786 | } | 785 | } |
| @@ -1051,7 +1050,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, | |||
| 1051 | } | 1050 | } |
| 1052 | 1051 | ||
| 1053 | if (fw_get_builtin_firmware(firmware, name)) { | 1052 | if (fw_get_builtin_firmware(firmware, name)) { |
| 1054 | dev_dbg(device, "firmware: using built-in firmware %s\n", name); | 1053 | dev_dbg(device, "using built-in %s\n", name); |
| 1055 | return 0; /* assigned */ | 1054 | return 0; /* assigned */ |
| 1056 | } | 1055 | } |
| 1057 | 1056 | ||
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 8812bfb9e3b8..eea51569f0eb 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c | |||
| @@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context, | |||
| 133 | while (val_size) { | 133 | while (val_size) { |
| 134 | switch (ctx->val_bytes) { | 134 | switch (ctx->val_bytes) { |
| 135 | case 1: | 135 | case 1: |
| 136 | __raw_writeb(*(u8 *)val, ctx->regs + offset); | 136 | writeb(*(u8 *)val, ctx->regs + offset); |
| 137 | break; | 137 | break; |
| 138 | case 2: | 138 | case 2: |
| 139 | __raw_writew(*(u16 *)val, ctx->regs + offset); | 139 | writew(*(u16 *)val, ctx->regs + offset); |
| 140 | break; | 140 | break; |
| 141 | case 4: | 141 | case 4: |
| 142 | __raw_writel(*(u32 *)val, ctx->regs + offset); | 142 | writel(*(u32 *)val, ctx->regs + offset); |
| 143 | break; | 143 | break; |
| 144 | #ifdef CONFIG_64BIT | 144 | #ifdef CONFIG_64BIT |
| 145 | case 8: | 145 | case 8: |
| 146 | __raw_writeq(*(u64 *)val, ctx->regs + offset); | 146 | writeq(*(u64 *)val, ctx->regs + offset); |
| 147 | break; | 147 | break; |
| 148 | #endif | 148 | #endif |
| 149 | default: | 149 | default: |
| @@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context, | |||
| 193 | while (val_size) { | 193 | while (val_size) { |
| 194 | switch (ctx->val_bytes) { | 194 | switch (ctx->val_bytes) { |
| 195 | case 1: | 195 | case 1: |
| 196 | *(u8 *)val = __raw_readb(ctx->regs + offset); | 196 | *(u8 *)val = readb(ctx->regs + offset); |
| 197 | break; | 197 | break; |
| 198 | case 2: | 198 | case 2: |
| 199 | *(u16 *)val = __raw_readw(ctx->regs + offset); | 199 | *(u16 *)val = readw(ctx->regs + offset); |
| 200 | break; | 200 | break; |
| 201 | case 4: | 201 | case 4: |
| 202 | *(u32 *)val = __raw_readl(ctx->regs + offset); | 202 | *(u32 *)val = readl(ctx->regs + offset); |
| 203 | break; | 203 | break; |
| 204 | #ifdef CONFIG_64BIT | 204 | #ifdef CONFIG_64BIT |
| 205 | case 8: | 205 | case 8: |
| 206 | *(u64 *)val = __raw_readq(ctx->regs + offset); | 206 | *(u64 *)val = readq(ctx->regs + offset); |
| 207 | break; | 207 | break; |
| 208 | #endif | 208 | #endif |
| 209 | default: | 209 | default: |
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 01292328a456..678fa97e41fb 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c | |||
| @@ -496,12 +496,12 @@ static void pc_set_checksum(void) | |||
| 496 | 496 | ||
| 497 | #ifdef CONFIG_PROC_FS | 497 | #ifdef CONFIG_PROC_FS |
| 498 | 498 | ||
| 499 | static char *floppy_types[] = { | 499 | static const char * const floppy_types[] = { |
| 500 | "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", | 500 | "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", |
| 501 | "3.5'' 2.88M", "3.5'' 2.88M" | 501 | "3.5'' 2.88M", "3.5'' 2.88M" |
| 502 | }; | 502 | }; |
| 503 | 503 | ||
| 504 | static char *gfx_types[] = { | 504 | static const char * const gfx_types[] = { |
| 505 | "EGA, VGA, ... (with BIOS)", | 505 | "EGA, VGA, ... (with BIOS)", |
| 506 | "CGA (40 cols)", | 506 | "CGA (40 cols)", |
| 507 | "CGA (80 cols)", | 507 | "CGA (80 cols)", |
| @@ -602,7 +602,7 @@ static void atari_set_checksum(void) | |||
| 602 | 602 | ||
| 603 | static struct { | 603 | static struct { |
| 604 | unsigned char val; | 604 | unsigned char val; |
| 605 | char *name; | 605 | const char *name; |
| 606 | } boot_prefs[] = { | 606 | } boot_prefs[] = { |
| 607 | { 0x80, "TOS" }, | 607 | { 0x80, "TOS" }, |
| 608 | { 0x40, "ASV" }, | 608 | { 0x40, "ASV" }, |
| @@ -611,7 +611,7 @@ static struct { | |||
| 611 | { 0x00, "unspecified" } | 611 | { 0x00, "unspecified" } |
| 612 | }; | 612 | }; |
| 613 | 613 | ||
| 614 | static char *languages[] = { | 614 | static const char * const languages[] = { |
| 615 | "English (US)", | 615 | "English (US)", |
| 616 | "German", | 616 | "German", |
| 617 | "French", | 617 | "French", |
| @@ -623,7 +623,7 @@ static char *languages[] = { | |||
| 623 | "Swiss (German)" | 623 | "Swiss (German)" |
| 624 | }; | 624 | }; |
| 625 | 625 | ||
| 626 | static char *dateformat[] = { | 626 | static const char * const dateformat[] = { |
| 627 | "MM%cDD%cYY", | 627 | "MM%cDD%cYY", |
| 628 | "DD%cMM%cYY", | 628 | "DD%cMM%cYY", |
| 629 | "YY%cMM%cDD", | 629 | "YY%cMM%cDD", |
| @@ -634,7 +634,7 @@ static char *dateformat[] = { | |||
| 634 | "7 (undefined)" | 634 | "7 (undefined)" |
| 635 | }; | 635 | }; |
| 636 | 636 | ||
| 637 | static char *colors[] = { | 637 | static const char * const colors[] = { |
| 638 | "2", "4", "16", "256", "65536", "??", "??", "??" | 638 | "2", "4", "16", "256", "65536", "??", "??", "??" |
| 639 | }; | 639 | }; |
| 640 | 640 | ||
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index 76c490fa0511..0e184426db98 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c | |||
| @@ -129,10 +129,9 @@ static void button_consume_callbacks (int bpcount) | |||
| 129 | 129 | ||
| 130 | static void button_sequence_finished (unsigned long parameters) | 130 | static void button_sequence_finished (unsigned long parameters) |
| 131 | { | 131 | { |
| 132 | #ifdef CONFIG_NWBUTTON_REBOOT /* Reboot using button is enabled */ | 132 | if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) && |
| 133 | if (button_press_count == reboot_count) | 133 | button_press_count == reboot_count) |
| 134 | kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */ | 134 | kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */ |
| 135 | #endif /* CONFIG_NWBUTTON_REBOOT */ | ||
| 136 | button_consume_callbacks (button_press_count); | 135 | button_consume_callbacks (button_press_count); |
| 137 | bcount = sprintf (button_output_buffer, "%d\n", button_press_count); | 136 | bcount = sprintf (button_output_buffer, "%d\n", button_press_count); |
| 138 | button_press_count = 0; /* Reset the button press counter */ | 137 | button_press_count = 0; /* Reset the button press counter */ |
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index ae0b42b66e55..d23368874710 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c | |||
| @@ -69,12 +69,13 @@ | |||
| 69 | #include <linux/ppdev.h> | 69 | #include <linux/ppdev.h> |
| 70 | #include <linux/mutex.h> | 70 | #include <linux/mutex.h> |
| 71 | #include <linux/uaccess.h> | 71 | #include <linux/uaccess.h> |
| 72 | #include <linux/compat.h> | ||
| 72 | 73 | ||
| 73 | #define PP_VERSION "ppdev: user-space parallel port driver" | 74 | #define PP_VERSION "ppdev: user-space parallel port driver" |
| 74 | #define CHRDEV "ppdev" | 75 | #define CHRDEV "ppdev" |
| 75 | 76 | ||
| 76 | struct pp_struct { | 77 | struct pp_struct { |
| 77 | struct pardevice * pdev; | 78 | struct pardevice *pdev; |
| 78 | wait_queue_head_t irq_wait; | 79 | wait_queue_head_t irq_wait; |
| 79 | atomic_t irqc; | 80 | atomic_t irqc; |
| 80 | unsigned int flags; | 81 | unsigned int flags; |
| @@ -98,18 +99,26 @@ struct pp_struct { | |||
| 98 | #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) | 99 | #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) |
| 99 | 100 | ||
| 100 | static DEFINE_MUTEX(pp_do_mutex); | 101 | static DEFINE_MUTEX(pp_do_mutex); |
| 101 | static inline void pp_enable_irq (struct pp_struct *pp) | 102 | |
| 103 | /* define fixed sized ioctl cmd for y2038 migration */ | ||
| 104 | #define PPGETTIME32 _IOR(PP_IOCTL, 0x95, s32[2]) | ||
| 105 | #define PPSETTIME32 _IOW(PP_IOCTL, 0x96, s32[2]) | ||
| 106 | #define PPGETTIME64 _IOR(PP_IOCTL, 0x95, s64[2]) | ||
| 107 | #define PPSETTIME64 _IOW(PP_IOCTL, 0x96, s64[2]) | ||
| 108 | |||
| 109 | static inline void pp_enable_irq(struct pp_struct *pp) | ||
| 102 | { | 110 | { |
| 103 | struct parport *port = pp->pdev->port; | 111 | struct parport *port = pp->pdev->port; |
| 104 | port->ops->enable_irq (port); | 112 | |
| 113 | port->ops->enable_irq(port); | ||
| 105 | } | 114 | } |
| 106 | 115 | ||
| 107 | static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | 116 | static ssize_t pp_read(struct file *file, char __user *buf, size_t count, |
| 108 | loff_t * ppos) | 117 | loff_t *ppos) |
| 109 | { | 118 | { |
| 110 | unsigned int minor = iminor(file_inode(file)); | 119 | unsigned int minor = iminor(file_inode(file)); |
| 111 | struct pp_struct *pp = file->private_data; | 120 | struct pp_struct *pp = file->private_data; |
| 112 | char * kbuffer; | 121 | char *kbuffer; |
| 113 | ssize_t bytes_read = 0; | 122 | ssize_t bytes_read = 0; |
| 114 | struct parport *pport; | 123 | struct parport *pport; |
| 115 | int mode; | 124 | int mode; |
| @@ -125,16 +134,15 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | |||
| 125 | return 0; | 134 | return 0; |
| 126 | 135 | ||
| 127 | kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); | 136 | kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); |
| 128 | if (!kbuffer) { | 137 | if (!kbuffer) |
| 129 | return -ENOMEM; | 138 | return -ENOMEM; |
| 130 | } | ||
| 131 | pport = pp->pdev->port; | 139 | pport = pp->pdev->port; |
| 132 | mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); | 140 | mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); |
| 133 | 141 | ||
| 134 | parport_set_timeout (pp->pdev, | 142 | parport_set_timeout(pp->pdev, |
| 135 | (file->f_flags & O_NONBLOCK) ? | 143 | (file->f_flags & O_NONBLOCK) ? |
| 136 | PARPORT_INACTIVITY_O_NONBLOCK : | 144 | PARPORT_INACTIVITY_O_NONBLOCK : |
| 137 | pp->default_inactivity); | 145 | pp->default_inactivity); |
| 138 | 146 | ||
| 139 | while (bytes_read == 0) { | 147 | while (bytes_read == 0) { |
| 140 | ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); | 148 | ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); |
| @@ -144,20 +152,17 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | |||
| 144 | int flags = 0; | 152 | int flags = 0; |
| 145 | size_t (*fn)(struct parport *, void *, size_t, int); | 153 | size_t (*fn)(struct parport *, void *, size_t, int); |
| 146 | 154 | ||
| 147 | if (pp->flags & PP_W91284PIC) { | 155 | if (pp->flags & PP_W91284PIC) |
| 148 | flags |= PARPORT_W91284PIC; | 156 | flags |= PARPORT_W91284PIC; |
| 149 | } | 157 | if (pp->flags & PP_FASTREAD) |
| 150 | if (pp->flags & PP_FASTREAD) { | ||
| 151 | flags |= PARPORT_EPP_FAST; | 158 | flags |= PARPORT_EPP_FAST; |
| 152 | } | 159 | if (pport->ieee1284.mode & IEEE1284_ADDR) |
| 153 | if (pport->ieee1284.mode & IEEE1284_ADDR) { | ||
| 154 | fn = pport->ops->epp_read_addr; | 160 | fn = pport->ops->epp_read_addr; |
| 155 | } else { | 161 | else |
| 156 | fn = pport->ops->epp_read_data; | 162 | fn = pport->ops->epp_read_data; |
| 157 | } | ||
| 158 | bytes_read = (*fn)(pport, kbuffer, need, flags); | 163 | bytes_read = (*fn)(pport, kbuffer, need, flags); |
| 159 | } else { | 164 | } else { |
| 160 | bytes_read = parport_read (pport, kbuffer, need); | 165 | bytes_read = parport_read(pport, kbuffer, need); |
| 161 | } | 166 | } |
| 162 | 167 | ||
| 163 | if (bytes_read != 0) | 168 | if (bytes_read != 0) |
| @@ -168,7 +173,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | |||
| 168 | break; | 173 | break; |
| 169 | } | 174 | } |
| 170 | 175 | ||
| 171 | if (signal_pending (current)) { | 176 | if (signal_pending(current)) { |
| 172 | bytes_read = -ERESTARTSYS; | 177 | bytes_read = -ERESTARTSYS; |
| 173 | break; | 178 | break; |
| 174 | } | 179 | } |
| @@ -176,22 +181,22 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count, | |||
| 176 | cond_resched(); | 181 | cond_resched(); |
| 177 | } | 182 | } |
| 178 | 183 | ||
| 179 | parport_set_timeout (pp->pdev, pp->default_inactivity); | 184 | parport_set_timeout(pp->pdev, pp->default_inactivity); |
| 180 | 185 | ||
| 181 | if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) | 186 | if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read)) |
| 182 | bytes_read = -EFAULT; | 187 | bytes_read = -EFAULT; |
| 183 | 188 | ||
| 184 | kfree (kbuffer); | 189 | kfree(kbuffer); |
| 185 | pp_enable_irq (pp); | 190 | pp_enable_irq(pp); |
| 186 | return bytes_read; | 191 | return bytes_read; |
| 187 | } | 192 | } |
| 188 | 193 | ||
| 189 | static ssize_t pp_write (struct file * file, const char __user * buf, | 194 | static ssize_t pp_write(struct file *file, const char __user *buf, |
| 190 | size_t count, loff_t * ppos) | 195 | size_t count, loff_t *ppos) |
| 191 | { | 196 | { |
| 192 | unsigned int minor = iminor(file_inode(file)); | 197 | unsigned int minor = iminor(file_inode(file)); |
| 193 | struct pp_struct *pp = file->private_data; | 198 | struct pp_struct *pp = file->private_data; |
| 194 | char * kbuffer; | 199 | char *kbuffer; |
| 195 | ssize_t bytes_written = 0; | 200 | ssize_t bytes_written = 0; |
| 196 | ssize_t wrote; | 201 | ssize_t wrote; |
| 197 | int mode; | 202 | int mode; |
| @@ -204,21 +209,21 @@ static ssize_t pp_write (struct file * file, const char __user * buf, | |||
| 204 | } | 209 | } |
| 205 | 210 | ||
| 206 | kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); | 211 | kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); |
| 207 | if (!kbuffer) { | 212 | if (!kbuffer) |
| 208 | return -ENOMEM; | 213 | return -ENOMEM; |
| 209 | } | 214 | |
| 210 | pport = pp->pdev->port; | 215 | pport = pp->pdev->port; |
| 211 | mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); | 216 | mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); |
| 212 | 217 | ||
| 213 | parport_set_timeout (pp->pdev, | 218 | parport_set_timeout(pp->pdev, |
| 214 | (file->f_flags & O_NONBLOCK) ? | 219 | (file->f_flags & O_NONBLOCK) ? |
| 215 | PARPORT_INACTIVITY_O_NONBLOCK : | 220 | PARPORT_INACTIVITY_O_NONBLOCK : |
| 216 | pp->default_inactivity); | 221 | pp->default_inactivity); |
| 217 | 222 | ||
| 218 | while (bytes_written < count) { | 223 | while (bytes_written < count) { |
| 219 | ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); | 224 | ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); |
| 220 | 225 | ||
| 221 | if (copy_from_user (kbuffer, buf + bytes_written, n)) { | 226 | if (copy_from_user(kbuffer, buf + bytes_written, n)) { |
| 222 | bytes_written = -EFAULT; | 227 | bytes_written = -EFAULT; |
| 223 | break; | 228 | break; |
| 224 | } | 229 | } |
| @@ -226,20 +231,19 @@ static ssize_t pp_write (struct file * file, const char __user * buf, | |||
| 226 | if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { | 231 | if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { |
| 227 | /* do a fast EPP write */ | 232 | /* do a fast EPP write */ |
| 228 | if (pport->ieee1284.mode & IEEE1284_ADDR) { | 233 | if (pport->ieee1284.mode & IEEE1284_ADDR) { |
| 229 | wrote = pport->ops->epp_write_addr (pport, | 234 | wrote = pport->ops->epp_write_addr(pport, |
| 230 | kbuffer, n, PARPORT_EPP_FAST); | 235 | kbuffer, n, PARPORT_EPP_FAST); |
| 231 | } else { | 236 | } else { |
| 232 | wrote = pport->ops->epp_write_data (pport, | 237 | wrote = pport->ops->epp_write_data(pport, |
| 233 | kbuffer, n, PARPORT_EPP_FAST); | 238 | kbuffer, n, PARPORT_EPP_FAST); |
| 234 | } | 239 | } |
| 235 | } else { | 240 | } else { |
| 236 | wrote = parport_write (pp->pdev->port, kbuffer, n); | 241 | wrote = parport_write(pp->pdev->port, kbuffer, n); |
| 237 | } | 242 | } |
| 238 | 243 | ||
| 239 | if (wrote <= 0) { | 244 | if (wrote <= 0) { |
| 240 | if (!bytes_written) { | 245 | if (!bytes_written) |
| 241 | bytes_written = wrote; | 246 | bytes_written = wrote; |
| 242 | } | ||
| 243 | break; | 247 | break; |
| 244 | } | 248 | } |
| 245 | 249 | ||
| @@ -251,67 +255,69 @@ static ssize_t pp_write (struct file * file, const char __user * buf, | |||
| 251 | break; | 255 | break; |
| 252 | } | 256 | } |
| 253 | 257 | ||
| 254 | if (signal_pending (current)) | 258 | if (signal_pending(current)) |
| 255 | break; | 259 | break; |
| 256 | 260 | ||
| 257 | cond_resched(); | 261 | cond_resched(); |
| 258 | } | 262 | } |
| 259 | 263 | ||
| 260 | parport_set_timeout (pp->pdev, pp->default_inactivity); | 264 | parport_set_timeout(pp->pdev, pp->default_inactivity); |
| 261 | 265 | ||
| 262 | kfree (kbuffer); | 266 | kfree(kbuffer); |
| 263 | pp_enable_irq (pp); | 267 | pp_enable_irq(pp); |
| 264 | return bytes_written; | 268 | return bytes_written; |
| 265 | } | 269 | } |
| 266 | 270 | ||
| 267 | static void pp_irq (void *private) | 271 | static void pp_irq(void *private) |
| 268 | { | 272 | { |
| 269 | struct pp_struct *pp = private; | 273 | struct pp_struct *pp = private; |
| 270 | 274 | ||
| 271 | if (pp->irqresponse) { | 275 | if (pp->irqresponse) { |
| 272 | parport_write_control (pp->pdev->port, pp->irqctl); | 276 | parport_write_control(pp->pdev->port, pp->irqctl); |
| 273 | pp->irqresponse = 0; | 277 | pp->irqresponse = 0; |
| 274 | } | 278 | } |
| 275 | 279 | ||
| 276 | atomic_inc (&pp->irqc); | 280 | atomic_inc(&pp->irqc); |
| 277 | wake_up_interruptible (&pp->irq_wait); | 281 | wake_up_interruptible(&pp->irq_wait); |
| 278 | } | 282 | } |
| 279 | 283 | ||
| 280 | static int register_device (int minor, struct pp_struct *pp) | 284 | static int register_device(int minor, struct pp_struct *pp) |
| 281 | { | 285 | { |
| 282 | struct parport *port; | 286 | struct parport *port; |
| 283 | struct pardevice * pdev = NULL; | 287 | struct pardevice *pdev = NULL; |
| 284 | char *name; | 288 | char *name; |
| 285 | int fl; | 289 | struct pardev_cb ppdev_cb; |
| 286 | 290 | ||
| 287 | name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); | 291 | name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); |
| 288 | if (name == NULL) | 292 | if (name == NULL) |
| 289 | return -ENOMEM; | 293 | return -ENOMEM; |
| 290 | 294 | ||
| 291 | port = parport_find_number (minor); | 295 | port = parport_find_number(minor); |
| 292 | if (!port) { | 296 | if (!port) { |
| 293 | printk (KERN_WARNING "%s: no associated port!\n", name); | 297 | printk(KERN_WARNING "%s: no associated port!\n", name); |
| 294 | kfree (name); | 298 | kfree(name); |
| 295 | return -ENXIO; | 299 | return -ENXIO; |
| 296 | } | 300 | } |
| 297 | 301 | ||
| 298 | fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; | 302 | memset(&ppdev_cb, 0, sizeof(ppdev_cb)); |
| 299 | pdev = parport_register_device (port, name, NULL, | 303 | ppdev_cb.irq_func = pp_irq; |
| 300 | NULL, pp_irq, fl, pp); | 304 | ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; |
| 301 | parport_put_port (port); | 305 | ppdev_cb.private = pp; |
| 306 | pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); | ||
| 307 | parport_put_port(port); | ||
| 302 | 308 | ||
| 303 | if (!pdev) { | 309 | if (!pdev) { |
| 304 | printk (KERN_WARNING "%s: failed to register device!\n", name); | 310 | printk(KERN_WARNING "%s: failed to register device!\n", name); |
| 305 | kfree (name); | 311 | kfree(name); |
| 306 | return -ENXIO; | 312 | return -ENXIO; |
| 307 | } | 313 | } |
| 308 | 314 | ||
| 309 | pp->pdev = pdev; | 315 | pp->pdev = pdev; |
| 310 | pr_debug("%s: registered pardevice\n", name); | 316 | dev_dbg(&pdev->dev, "registered pardevice\n"); |
| 311 | return 0; | 317 | return 0; |
| 312 | } | 318 | } |
| 313 | 319 | ||
| 314 | static enum ieee1284_phase init_phase (int mode) | 320 | static enum ieee1284_phase init_phase(int mode) |
| 315 | { | 321 | { |
| 316 | switch (mode & ~(IEEE1284_DEVICEID | 322 | switch (mode & ~(IEEE1284_DEVICEID |
| 317 | | IEEE1284_ADDR)) { | 323 | | IEEE1284_ADDR)) { |
| @@ -322,11 +328,27 @@ static enum ieee1284_phase init_phase (int mode) | |||
| 322 | return IEEE1284_PH_FWD_IDLE; | 328 | return IEEE1284_PH_FWD_IDLE; |
| 323 | } | 329 | } |
| 324 | 330 | ||
| 331 | static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec) | ||
| 332 | { | ||
| 333 | long to_jiffies; | ||
| 334 | |||
| 335 | if ((tv_sec < 0) || (tv_usec < 0)) | ||
| 336 | return -EINVAL; | ||
| 337 | |||
| 338 | to_jiffies = usecs_to_jiffies(tv_usec); | ||
| 339 | to_jiffies += tv_sec * HZ; | ||
| 340 | if (to_jiffies <= 0) | ||
| 341 | return -EINVAL; | ||
| 342 | |||
| 343 | pdev->timeout = to_jiffies; | ||
| 344 | return 0; | ||
| 345 | } | ||
| 346 | |||
| 325 | static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 347 | static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 326 | { | 348 | { |
| 327 | unsigned int minor = iminor(file_inode(file)); | 349 | unsigned int minor = iminor(file_inode(file)); |
| 328 | struct pp_struct *pp = file->private_data; | 350 | struct pp_struct *pp = file->private_data; |
| 329 | struct parport * port; | 351 | struct parport *port; |
| 330 | void __user *argp = (void __user *)arg; | 352 | void __user *argp = (void __user *)arg; |
| 331 | 353 | ||
| 332 | /* First handle the cases that don't take arguments. */ | 354 | /* First handle the cases that don't take arguments. */ |
| @@ -337,19 +359,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 337 | int ret; | 359 | int ret; |
| 338 | 360 | ||
| 339 | if (pp->flags & PP_CLAIMED) { | 361 | if (pp->flags & PP_CLAIMED) { |
| 340 | pr_debug(CHRDEV "%x: you've already got it!\n", minor); | 362 | dev_dbg(&pp->pdev->dev, "you've already got it!\n"); |
| 341 | return -EINVAL; | 363 | return -EINVAL; |
| 342 | } | 364 | } |
| 343 | 365 | ||
| 344 | /* Deferred device registration. */ | 366 | /* Deferred device registration. */ |
| 345 | if (!pp->pdev) { | 367 | if (!pp->pdev) { |
| 346 | int err = register_device (minor, pp); | 368 | int err = register_device(minor, pp); |
| 347 | if (err) { | 369 | |
| 370 | if (err) | ||
| 348 | return err; | 371 | return err; |
| 349 | } | ||
| 350 | } | 372 | } |
| 351 | 373 | ||
| 352 | ret = parport_claim_or_block (pp->pdev); | 374 | ret = parport_claim_or_block(pp->pdev); |
| 353 | if (ret < 0) | 375 | if (ret < 0) |
| 354 | return ret; | 376 | return ret; |
| 355 | 377 | ||
| @@ -357,7 +379,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 357 | 379 | ||
| 358 | /* For interrupt-reporting to work, we need to be | 380 | /* For interrupt-reporting to work, we need to be |
| 359 | * informed of each interrupt. */ | 381 | * informed of each interrupt. */ |
| 360 | pp_enable_irq (pp); | 382 | pp_enable_irq(pp); |
| 361 | 383 | ||
| 362 | /* We may need to fix up the state machine. */ | 384 | /* We may need to fix up the state machine. */ |
| 363 | info = &pp->pdev->port->ieee1284; | 385 | info = &pp->pdev->port->ieee1284; |
| @@ -365,15 +387,15 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 365 | pp->saved_state.phase = info->phase; | 387 | pp->saved_state.phase = info->phase; |
| 366 | info->mode = pp->state.mode; | 388 | info->mode = pp->state.mode; |
| 367 | info->phase = pp->state.phase; | 389 | info->phase = pp->state.phase; |
| 368 | pp->default_inactivity = parport_set_timeout (pp->pdev, 0); | 390 | pp->default_inactivity = parport_set_timeout(pp->pdev, 0); |
| 369 | parport_set_timeout (pp->pdev, pp->default_inactivity); | 391 | parport_set_timeout(pp->pdev, pp->default_inactivity); |
| 370 | 392 | ||
| 371 | return 0; | 393 | return 0; |
| 372 | } | 394 | } |
| 373 | case PPEXCL: | 395 | case PPEXCL: |
| 374 | if (pp->pdev) { | 396 | if (pp->pdev) { |
| 375 | pr_debug(CHRDEV "%x: too late for PPEXCL; " | 397 | dev_dbg(&pp->pdev->dev, |
| 376 | "already registered\n", minor); | 398 | "too late for PPEXCL; already registered\n"); |
| 377 | if (pp->flags & PP_EXCL) | 399 | if (pp->flags & PP_EXCL) |
| 378 | /* But it's not really an error. */ | 400 | /* But it's not really an error. */ |
| 379 | return 0; | 401 | return 0; |
| @@ -388,11 +410,12 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 388 | case PPSETMODE: | 410 | case PPSETMODE: |
| 389 | { | 411 | { |
| 390 | int mode; | 412 | int mode; |
| 391 | if (copy_from_user (&mode, argp, sizeof (mode))) | 413 | |
| 414 | if (copy_from_user(&mode, argp, sizeof(mode))) | ||
| 392 | return -EFAULT; | 415 | return -EFAULT; |
| 393 | /* FIXME: validate mode */ | 416 | /* FIXME: validate mode */ |
| 394 | pp->state.mode = mode; | 417 | pp->state.mode = mode; |
| 395 | pp->state.phase = init_phase (mode); | 418 | pp->state.phase = init_phase(mode); |
| 396 | 419 | ||
| 397 | if (pp->flags & PP_CLAIMED) { | 420 | if (pp->flags & PP_CLAIMED) { |
| 398 | pp->pdev->port->ieee1284.mode = mode; | 421 | pp->pdev->port->ieee1284.mode = mode; |
| @@ -405,28 +428,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 405 | { | 428 | { |
| 406 | int mode; | 429 | int mode; |
| 407 | 430 | ||
| 408 | if (pp->flags & PP_CLAIMED) { | 431 | if (pp->flags & PP_CLAIMED) |
| 409 | mode = pp->pdev->port->ieee1284.mode; | 432 | mode = pp->pdev->port->ieee1284.mode; |
| 410 | } else { | 433 | else |
| 411 | mode = pp->state.mode; | 434 | mode = pp->state.mode; |
| 412 | } | 435 | |
| 413 | if (copy_to_user (argp, &mode, sizeof (mode))) { | 436 | if (copy_to_user(argp, &mode, sizeof(mode))) |
| 414 | return -EFAULT; | 437 | return -EFAULT; |
| 415 | } | ||
| 416 | return 0; | 438 | return 0; |
| 417 | } | 439 | } |
| 418 | case PPSETPHASE: | 440 | case PPSETPHASE: |
| 419 | { | 441 | { |
| 420 | int phase; | 442 | int phase; |
| 421 | if (copy_from_user (&phase, argp, sizeof (phase))) { | 443 | |
| 444 | if (copy_from_user(&phase, argp, sizeof(phase))) | ||
| 422 | return -EFAULT; | 445 | return -EFAULT; |
| 423 | } | 446 | |
| 424 | /* FIXME: validate phase */ | 447 | /* FIXME: validate phase */ |
| 425 | pp->state.phase = phase; | 448 | pp->state.phase = phase; |
| 426 | 449 | ||
| 427 | if (pp->flags & PP_CLAIMED) { | 450 | if (pp->flags & PP_CLAIMED) |
| 428 | pp->pdev->port->ieee1284.phase = phase; | 451 | pp->pdev->port->ieee1284.phase = phase; |
| 429 | } | ||
| 430 | 452 | ||
| 431 | return 0; | 453 | return 0; |
| 432 | } | 454 | } |
| @@ -434,38 +456,34 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 434 | { | 456 | { |
| 435 | int phase; | 457 | int phase; |
| 436 | 458 | ||
| 437 | if (pp->flags & PP_CLAIMED) { | 459 | if (pp->flags & PP_CLAIMED) |
| 438 | phase = pp->pdev->port->ieee1284.phase; | 460 | phase = pp->pdev->port->ieee1284.phase; |
| 439 | } else { | 461 | else |
| 440 | phase = pp->state.phase; | 462 | phase = pp->state.phase; |
| 441 | } | 463 | if (copy_to_user(argp, &phase, sizeof(phase))) |
| 442 | if (copy_to_user (argp, &phase, sizeof (phase))) { | ||
| 443 | return -EFAULT; | 464 | return -EFAULT; |
| 444 | } | ||
| 445 | return 0; | 465 | return 0; |
| 446 | } | 466 | } |
| 447 | case PPGETMODES: | 467 | case PPGETMODES: |
| 448 | { | 468 | { |
| 449 | unsigned int modes; | 469 | unsigned int modes; |
| 450 | 470 | ||
| 451 | port = parport_find_number (minor); | 471 | port = parport_find_number(minor); |
| 452 | if (!port) | 472 | if (!port) |
| 453 | return -ENODEV; | 473 | return -ENODEV; |
| 454 | 474 | ||
| 455 | modes = port->modes; | 475 | modes = port->modes; |
| 456 | parport_put_port(port); | 476 | parport_put_port(port); |
| 457 | if (copy_to_user (argp, &modes, sizeof (modes))) { | 477 | if (copy_to_user(argp, &modes, sizeof(modes))) |
| 458 | return -EFAULT; | 478 | return -EFAULT; |
| 459 | } | ||
| 460 | return 0; | 479 | return 0; |
| 461 | } | 480 | } |
| 462 | case PPSETFLAGS: | 481 | case PPSETFLAGS: |
| 463 | { | 482 | { |
| 464 | int uflags; | 483 | int uflags; |
| 465 | 484 | ||
| 466 | if (copy_from_user (&uflags, argp, sizeof (uflags))) { | 485 | if (copy_from_user(&uflags, argp, sizeof(uflags))) |
| 467 | return -EFAULT; | 486 | return -EFAULT; |
| 468 | } | ||
| 469 | pp->flags &= ~PP_FLAGMASK; | 487 | pp->flags &= ~PP_FLAGMASK; |
| 470 | pp->flags |= (uflags & PP_FLAGMASK); | 488 | pp->flags |= (uflags & PP_FLAGMASK); |
| 471 | return 0; | 489 | return 0; |
| @@ -475,9 +493,8 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 475 | int uflags; | 493 | int uflags; |
| 476 | 494 | ||
| 477 | uflags = pp->flags & PP_FLAGMASK; | 495 | uflags = pp->flags & PP_FLAGMASK; |
| 478 | if (copy_to_user (argp, &uflags, sizeof (uflags))) { | 496 | if (copy_to_user(argp, &uflags, sizeof(uflags))) |
| 479 | return -EFAULT; | 497 | return -EFAULT; |
| 480 | } | ||
| 481 | return 0; | 498 | return 0; |
| 482 | } | 499 | } |
| 483 | } /* end switch() */ | 500 | } /* end switch() */ |
| @@ -495,27 +512,28 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 495 | unsigned char reg; | 512 | unsigned char reg; |
| 496 | unsigned char mask; | 513 | unsigned char mask; |
| 497 | int mode; | 514 | int mode; |
| 515 | s32 time32[2]; | ||
| 516 | s64 time64[2]; | ||
| 517 | struct timespec64 ts; | ||
| 498 | int ret; | 518 | int ret; |
| 499 | struct timeval par_timeout; | ||
| 500 | long to_jiffies; | ||
| 501 | 519 | ||
| 502 | case PPRSTATUS: | 520 | case PPRSTATUS: |
| 503 | reg = parport_read_status (port); | 521 | reg = parport_read_status(port); |
| 504 | if (copy_to_user (argp, ®, sizeof (reg))) | 522 | if (copy_to_user(argp, ®, sizeof(reg))) |
| 505 | return -EFAULT; | 523 | return -EFAULT; |
| 506 | return 0; | 524 | return 0; |
| 507 | case PPRDATA: | 525 | case PPRDATA: |
| 508 | reg = parport_read_data (port); | 526 | reg = parport_read_data(port); |
| 509 | if (copy_to_user (argp, ®, sizeof (reg))) | 527 | if (copy_to_user(argp, ®, sizeof(reg))) |
| 510 | return -EFAULT; | 528 | return -EFAULT; |
| 511 | return 0; | 529 | return 0; |
| 512 | case PPRCONTROL: | 530 | case PPRCONTROL: |
| 513 | reg = parport_read_control (port); | 531 | reg = parport_read_control(port); |
| 514 | if (copy_to_user (argp, ®, sizeof (reg))) | 532 | if (copy_to_user(argp, ®, sizeof(reg))) |
| 515 | return -EFAULT; | 533 | return -EFAULT; |
| 516 | return 0; | 534 | return 0; |
| 517 | case PPYIELD: | 535 | case PPYIELD: |
| 518 | parport_yield_blocking (pp->pdev); | 536 | parport_yield_blocking(pp->pdev); |
| 519 | return 0; | 537 | return 0; |
| 520 | 538 | ||
| 521 | case PPRELEASE: | 539 | case PPRELEASE: |
| @@ -525,45 +543,45 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 525 | pp->state.phase = info->phase; | 543 | pp->state.phase = info->phase; |
| 526 | info->mode = pp->saved_state.mode; | 544 | info->mode = pp->saved_state.mode; |
| 527 | info->phase = pp->saved_state.phase; | 545 | info->phase = pp->saved_state.phase; |
| 528 | parport_release (pp->pdev); | 546 | parport_release(pp->pdev); |
| 529 | pp->flags &= ~PP_CLAIMED; | 547 | pp->flags &= ~PP_CLAIMED; |
| 530 | return 0; | 548 | return 0; |
| 531 | 549 | ||
| 532 | case PPWCONTROL: | 550 | case PPWCONTROL: |
| 533 | if (copy_from_user (®, argp, sizeof (reg))) | 551 | if (copy_from_user(®, argp, sizeof(reg))) |
| 534 | return -EFAULT; | 552 | return -EFAULT; |
| 535 | parport_write_control (port, reg); | 553 | parport_write_control(port, reg); |
| 536 | return 0; | 554 | return 0; |
| 537 | 555 | ||
| 538 | case PPWDATA: | 556 | case PPWDATA: |
| 539 | if (copy_from_user (®, argp, sizeof (reg))) | 557 | if (copy_from_user(®, argp, sizeof(reg))) |
| 540 | return -EFAULT; | 558 | return -EFAULT; |
| 541 | parport_write_data (port, reg); | 559 | parport_write_data(port, reg); |
| 542 | return 0; | 560 | return 0; |
| 543 | 561 | ||
| 544 | case PPFCONTROL: | 562 | case PPFCONTROL: |
| 545 | if (copy_from_user (&mask, argp, | 563 | if (copy_from_user(&mask, argp, |
| 546 | sizeof (mask))) | 564 | sizeof(mask))) |
| 547 | return -EFAULT; | 565 | return -EFAULT; |
| 548 | if (copy_from_user (®, 1 + (unsigned char __user *) arg, | 566 | if (copy_from_user(®, 1 + (unsigned char __user *) arg, |
| 549 | sizeof (reg))) | 567 | sizeof(reg))) |
| 550 | return -EFAULT; | 568 | return -EFAULT; |
| 551 | parport_frob_control (port, mask, reg); | 569 | parport_frob_control(port, mask, reg); |
| 552 | return 0; | 570 | return 0; |
| 553 | 571 | ||
| 554 | case PPDATADIR: | 572 | case PPDATADIR: |
| 555 | if (copy_from_user (&mode, argp, sizeof (mode))) | 573 | if (copy_from_user(&mode, argp, sizeof(mode))) |
| 556 | return -EFAULT; | 574 | return -EFAULT; |
| 557 | if (mode) | 575 | if (mode) |
| 558 | port->ops->data_reverse (port); | 576 | port->ops->data_reverse(port); |
| 559 | else | 577 | else |
| 560 | port->ops->data_forward (port); | 578 | port->ops->data_forward(port); |
| 561 | return 0; | 579 | return 0; |
| 562 | 580 | ||
| 563 | case PPNEGOT: | 581 | case PPNEGOT: |
| 564 | if (copy_from_user (&mode, argp, sizeof (mode))) | 582 | if (copy_from_user(&mode, argp, sizeof(mode))) |
| 565 | return -EFAULT; | 583 | return -EFAULT; |
| 566 | switch ((ret = parport_negotiate (port, mode))) { | 584 | switch ((ret = parport_negotiate(port, mode))) { |
| 567 | case 0: break; | 585 | case 0: break; |
| 568 | case -1: /* handshake failed, peripheral not IEEE 1284 */ | 586 | case -1: /* handshake failed, peripheral not IEEE 1284 */ |
| 569 | ret = -EIO; | 587 | ret = -EIO; |
| @@ -572,11 +590,11 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 572 | ret = -ENXIO; | 590 | ret = -ENXIO; |
| 573 | break; | 591 | break; |
| 574 | } | 592 | } |
| 575 | pp_enable_irq (pp); | 593 | pp_enable_irq(pp); |
| 576 | return ret; | 594 | return ret; |
| 577 | 595 | ||
| 578 | case PPWCTLONIRQ: | 596 | case PPWCTLONIRQ: |
| 579 | if (copy_from_user (®, argp, sizeof (reg))) | 597 | if (copy_from_user(®, argp, sizeof(reg))) |
| 580 | return -EFAULT; | 598 | return -EFAULT; |
| 581 | 599 | ||
| 582 | /* Remember what to set the control lines to, for next | 600 | /* Remember what to set the control lines to, for next |
| @@ -586,39 +604,50 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 586 | return 0; | 604 | return 0; |
| 587 | 605 | ||
| 588 | case PPCLRIRQ: | 606 | case PPCLRIRQ: |
| 589 | ret = atomic_read (&pp->irqc); | 607 | ret = atomic_read(&pp->irqc); |
| 590 | if (copy_to_user (argp, &ret, sizeof (ret))) | 608 | if (copy_to_user(argp, &ret, sizeof(ret))) |
| 591 | return -EFAULT; | 609 | return -EFAULT; |
| 592 | atomic_sub (ret, &pp->irqc); | 610 | atomic_sub(ret, &pp->irqc); |
| 593 | return 0; | 611 | return 0; |
| 594 | 612 | ||
| 595 | case PPSETTIME: | 613 | case PPSETTIME32: |
| 596 | if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { | 614 | if (copy_from_user(time32, argp, sizeof(time32))) |
| 597 | return -EFAULT; | 615 | return -EFAULT; |
| 598 | } | 616 | |
| 599 | /* Convert to jiffies, place in pp->pdev->timeout */ | 617 | return pp_set_timeout(pp->pdev, time32[0], time32[1]); |
| 600 | if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { | 618 | |
| 601 | return -EINVAL; | 619 | case PPSETTIME64: |
| 602 | } | 620 | if (copy_from_user(time64, argp, sizeof(time64))) |
| 603 | to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); | 621 | return -EFAULT; |
| 604 | to_jiffies += par_timeout.tv_sec * (long)HZ; | 622 | |
| 605 | if (to_jiffies <= 0) { | 623 | return pp_set_timeout(pp->pdev, time64[0], time64[1]); |
| 624 | |||
| 625 | case PPGETTIME32: | ||
| 626 | jiffies_to_timespec64(pp->pdev->timeout, &ts); | ||
| 627 | time32[0] = ts.tv_sec; | ||
| 628 | time32[1] = ts.tv_nsec / NSEC_PER_USEC; | ||
| 629 | if ((time32[0] < 0) || (time32[1] < 0)) | ||
| 606 | return -EINVAL; | 630 | return -EINVAL; |
| 607 | } | 631 | |
| 608 | pp->pdev->timeout = to_jiffies; | 632 | if (copy_to_user(argp, time32, sizeof(time32))) |
| 633 | return -EFAULT; | ||
| 634 | |||
| 609 | return 0; | 635 | return 0; |
| 610 | 636 | ||
| 611 | case PPGETTIME: | 637 | case PPGETTIME64: |
| 612 | to_jiffies = pp->pdev->timeout; | 638 | jiffies_to_timespec64(pp->pdev->timeout, &ts); |
| 613 | memset(&par_timeout, 0, sizeof(par_timeout)); | 639 | time64[0] = ts.tv_sec; |
| 614 | par_timeout.tv_sec = to_jiffies / HZ; | 640 | time64[1] = ts.tv_nsec / NSEC_PER_USEC; |
| 615 | par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); | 641 | if ((time64[0] < 0) || (time64[1] < 0)) |
| 616 | if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) | 642 | return -EINVAL; |
| 643 | |||
| 644 | if (copy_to_user(argp, time64, sizeof(time64))) | ||
| 617 | return -EFAULT; | 645 | return -EFAULT; |
| 646 | |||
| 618 | return 0; | 647 | return 0; |
| 619 | 648 | ||
| 620 | default: | 649 | default: |
| 621 | pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); | 650 | dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd); |
| 622 | return -EINVAL; | 651 | return -EINVAL; |
| 623 | } | 652 | } |
| 624 | 653 | ||
| @@ -629,13 +658,22 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 629 | static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 658 | static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 630 | { | 659 | { |
| 631 | long ret; | 660 | long ret; |
| 661 | |||
| 632 | mutex_lock(&pp_do_mutex); | 662 | mutex_lock(&pp_do_mutex); |
| 633 | ret = pp_do_ioctl(file, cmd, arg); | 663 | ret = pp_do_ioctl(file, cmd, arg); |
| 634 | mutex_unlock(&pp_do_mutex); | 664 | mutex_unlock(&pp_do_mutex); |
| 635 | return ret; | 665 | return ret; |
| 636 | } | 666 | } |
| 637 | 667 | ||
| 638 | static int pp_open (struct inode * inode, struct file * file) | 668 | #ifdef CONFIG_COMPAT |
| 669 | static long pp_compat_ioctl(struct file *file, unsigned int cmd, | ||
| 670 | unsigned long arg) | ||
| 671 | { | ||
| 672 | return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); | ||
| 673 | } | ||
| 674 | #endif | ||
| 675 | |||
| 676 | static int pp_open(struct inode *inode, struct file *file) | ||
| 639 | { | 677 | { |
| 640 | unsigned int minor = iminor(inode); | 678 | unsigned int minor = iminor(inode); |
| 641 | struct pp_struct *pp; | 679 | struct pp_struct *pp; |
| @@ -643,16 +681,16 @@ static int pp_open (struct inode * inode, struct file * file) | |||
| 643 | if (minor >= PARPORT_MAX) | 681 | if (minor >= PARPORT_MAX) |
| 644 | return -ENXIO; | 682 | return -ENXIO; |
| 645 | 683 | ||
| 646 | pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); | 684 | pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL); |
| 647 | if (!pp) | 685 | if (!pp) |
| 648 | return -ENOMEM; | 686 | return -ENOMEM; |
| 649 | 687 | ||
| 650 | pp->state.mode = IEEE1284_MODE_COMPAT; | 688 | pp->state.mode = IEEE1284_MODE_COMPAT; |
| 651 | pp->state.phase = init_phase (pp->state.mode); | 689 | pp->state.phase = init_phase(pp->state.mode); |
| 652 | pp->flags = 0; | 690 | pp->flags = 0; |
| 653 | pp->irqresponse = 0; | 691 | pp->irqresponse = 0; |
| 654 | atomic_set (&pp->irqc, 0); | 692 | atomic_set(&pp->irqc, 0); |
| 655 | init_waitqueue_head (&pp->irq_wait); | 693 | init_waitqueue_head(&pp->irq_wait); |
| 656 | 694 | ||
| 657 | /* Defer the actual device registration until the first claim. | 695 | /* Defer the actual device registration until the first claim. |
| 658 | * That way, we know whether or not the driver wants to have | 696 | * That way, we know whether or not the driver wants to have |
| @@ -664,7 +702,7 @@ static int pp_open (struct inode * inode, struct file * file) | |||
| 664 | return 0; | 702 | return 0; |
| 665 | } | 703 | } |
| 666 | 704 | ||
| 667 | static int pp_release (struct inode * inode, struct file * file) | 705 | static int pp_release(struct inode *inode, struct file *file) |
| 668 | { | 706 | { |
| 669 | unsigned int minor = iminor(inode); | 707 | unsigned int minor = iminor(inode); |
| 670 | struct pp_struct *pp = file->private_data; | 708 | struct pp_struct *pp = file->private_data; |
| @@ -673,10 +711,10 @@ static int pp_release (struct inode * inode, struct file * file) | |||
| 673 | compat_negot = 0; | 711 | compat_negot = 0; |
| 674 | if (!(pp->flags & PP_CLAIMED) && pp->pdev && | 712 | if (!(pp->flags & PP_CLAIMED) && pp->pdev && |
| 675 | (pp->state.mode != IEEE1284_MODE_COMPAT)) { | 713 | (pp->state.mode != IEEE1284_MODE_COMPAT)) { |
| 676 | struct ieee1284_info *info; | 714 | struct ieee1284_info *info; |
| 677 | 715 | ||
| 678 | /* parport released, but not in compatibility mode */ | 716 | /* parport released, but not in compatibility mode */ |
| 679 | parport_claim_or_block (pp->pdev); | 717 | parport_claim_or_block(pp->pdev); |
| 680 | pp->flags |= PP_CLAIMED; | 718 | pp->flags |= PP_CLAIMED; |
| 681 | info = &pp->pdev->port->ieee1284; | 719 | info = &pp->pdev->port->ieee1284; |
| 682 | pp->saved_state.mode = info->mode; | 720 | pp->saved_state.mode = info->mode; |
| @@ -689,9 +727,9 @@ static int pp_release (struct inode * inode, struct file * file) | |||
| 689 | compat_negot = 2; | 727 | compat_negot = 2; |
| 690 | } | 728 | } |
| 691 | if (compat_negot) { | 729 | if (compat_negot) { |
| 692 | parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); | 730 | parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT); |
| 693 | pr_debug(CHRDEV "%x: negotiated back to compatibility " | 731 | dev_dbg(&pp->pdev->dev, |
| 694 | "mode because user-space forgot\n", minor); | 732 | "negotiated back to compatibility mode because user-space forgot\n"); |
| 695 | } | 733 | } |
| 696 | 734 | ||
| 697 | if (pp->flags & PP_CLAIMED) { | 735 | if (pp->flags & PP_CLAIMED) { |
| @@ -702,7 +740,7 @@ static int pp_release (struct inode * inode, struct file * file) | |||
| 702 | pp->state.phase = info->phase; | 740 | pp->state.phase = info->phase; |
| 703 | info->mode = pp->saved_state.mode; | 741 | info->mode = pp->saved_state.mode; |
| 704 | info->phase = pp->saved_state.phase; | 742 | info->phase = pp->saved_state.phase; |
| 705 | parport_release (pp->pdev); | 743 | parport_release(pp->pdev); |
| 706 | if (compat_negot != 1) { | 744 | if (compat_negot != 1) { |
| 707 | pr_debug(CHRDEV "%x: released pardevice " | 745 | pr_debug(CHRDEV "%x: released pardevice " |
| 708 | "because user-space forgot\n", minor); | 746 | "because user-space forgot\n", minor); |
| @@ -711,25 +749,26 @@ static int pp_release (struct inode * inode, struct file * file) | |||
| 711 | 749 | ||
| 712 | if (pp->pdev) { | 750 | if (pp->pdev) { |
| 713 | const char *name = pp->pdev->name; | 751 | const char *name = pp->pdev->name; |
| 714 | parport_unregister_device (pp->pdev); | 752 | |
| 715 | kfree (name); | 753 | parport_unregister_device(pp->pdev); |
| 754 | kfree(name); | ||
| 716 | pp->pdev = NULL; | 755 | pp->pdev = NULL; |
| 717 | pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); | 756 | pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); |
| 718 | } | 757 | } |
| 719 | 758 | ||
| 720 | kfree (pp); | 759 | kfree(pp); |
| 721 | 760 | ||
| 722 | return 0; | 761 | return 0; |
| 723 | } | 762 | } |
| 724 | 763 | ||
| 725 | /* No kernel lock held - fine */ | 764 | /* No kernel lock held - fine */ |
| 726 | static unsigned int pp_poll (struct file * file, poll_table * wait) | 765 | static unsigned int pp_poll(struct file *file, poll_table *wait) |
| 727 | { | 766 | { |
| 728 | struct pp_struct *pp = file->private_data; | 767 | struct pp_struct *pp = file->private_data; |
| 729 | unsigned int mask = 0; | 768 | unsigned int mask = 0; |
| 730 | 769 | ||
| 731 | poll_wait (file, &pp->irq_wait, wait); | 770 | poll_wait(file, &pp->irq_wait, wait); |
| 732 | if (atomic_read (&pp->irqc)) | 771 | if (atomic_read(&pp->irqc)) |
| 733 | mask |= POLLIN | POLLRDNORM; | 772 | mask |= POLLIN | POLLRDNORM; |
| 734 | 773 | ||
| 735 | return mask; | 774 | return mask; |
| @@ -744,6 +783,9 @@ static const struct file_operations pp_fops = { | |||
| 744 | .write = pp_write, | 783 | .write = pp_write, |
| 745 | .poll = pp_poll, | 784 | .poll = pp_poll, |
| 746 | .unlocked_ioctl = pp_ioctl, | 785 | .unlocked_ioctl = pp_ioctl, |
| 786 | #ifdef CONFIG_COMPAT | ||
| 787 | .compat_ioctl = pp_compat_ioctl, | ||
| 788 | #endif | ||
| 747 | .open = pp_open, | 789 | .open = pp_open, |
| 748 | .release = pp_release, | 790 | .release = pp_release, |
| 749 | }; | 791 | }; |
| @@ -759,19 +801,32 @@ static void pp_detach(struct parport *port) | |||
| 759 | device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); | 801 | device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); |
| 760 | } | 802 | } |
| 761 | 803 | ||
| 804 | static int pp_probe(struct pardevice *par_dev) | ||
| 805 | { | ||
| 806 | struct device_driver *drv = par_dev->dev.driver; | ||
| 807 | int len = strlen(drv->name); | ||
| 808 | |||
| 809 | if (strncmp(par_dev->name, drv->name, len)) | ||
| 810 | return -ENODEV; | ||
| 811 | |||
| 812 | return 0; | ||
| 813 | } | ||
| 814 | |||
| 762 | static struct parport_driver pp_driver = { | 815 | static struct parport_driver pp_driver = { |
| 763 | .name = CHRDEV, | 816 | .name = CHRDEV, |
| 764 | .attach = pp_attach, | 817 | .probe = pp_probe, |
| 818 | .match_port = pp_attach, | ||
| 765 | .detach = pp_detach, | 819 | .detach = pp_detach, |
| 820 | .devmodel = true, | ||
| 766 | }; | 821 | }; |
| 767 | 822 | ||
| 768 | static int __init ppdev_init (void) | 823 | static int __init ppdev_init(void) |
| 769 | { | 824 | { |
| 770 | int err = 0; | 825 | int err = 0; |
| 771 | 826 | ||
| 772 | if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { | 827 | if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) { |
| 773 | printk (KERN_WARNING CHRDEV ": unable to get major %d\n", | 828 | printk(KERN_WARNING CHRDEV ": unable to get major %d\n", |
| 774 | PP_MAJOR); | 829 | PP_MAJOR); |
| 775 | return -EIO; | 830 | return -EIO; |
| 776 | } | 831 | } |
| 777 | ppdev_class = class_create(THIS_MODULE, CHRDEV); | 832 | ppdev_class = class_create(THIS_MODULE, CHRDEV); |
| @@ -781,11 +836,11 @@ static int __init ppdev_init (void) | |||
| 781 | } | 836 | } |
| 782 | err = parport_register_driver(&pp_driver); | 837 | err = parport_register_driver(&pp_driver); |
| 783 | if (err < 0) { | 838 | if (err < 0) { |
| 784 | printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); | 839 | printk(KERN_WARNING CHRDEV ": unable to register with parport\n"); |
| 785 | goto out_class; | 840 | goto out_class; |
| 786 | } | 841 | } |
| 787 | 842 | ||
| 788 | printk (KERN_INFO PP_VERSION "\n"); | 843 | printk(KERN_INFO PP_VERSION "\n"); |
| 789 | goto out; | 844 | goto out; |
| 790 | 845 | ||
| 791 | out_class: | 846 | out_class: |
| @@ -796,12 +851,12 @@ out: | |||
| 796 | return err; | 851 | return err; |
| 797 | } | 852 | } |
| 798 | 853 | ||
| 799 | static void __exit ppdev_cleanup (void) | 854 | static void __exit ppdev_cleanup(void) |
| 800 | { | 855 | { |
| 801 | /* Clean up all parport stuff */ | 856 | /* Clean up all parport stuff */ |
| 802 | parport_unregister_driver(&pp_driver); | 857 | parport_unregister_driver(&pp_driver); |
| 803 | class_destroy(ppdev_class); | 858 | class_destroy(ppdev_class); |
| 804 | unregister_chrdev (PP_MAJOR, CHRDEV); | 859 | unregister_chrdev(PP_MAJOR, CHRDEV); |
| 805 | } | 860 | } |
| 806 | 861 | ||
| 807 | module_init(ppdev_init); | 862 | module_init(ppdev_init); |
diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 9b9809b709a5..e83b2adc014a 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c | |||
| @@ -334,10 +334,8 @@ static int __init raw_init(void) | |||
| 334 | 334 | ||
| 335 | cdev_init(&raw_cdev, &raw_fops); | 335 | cdev_init(&raw_cdev, &raw_fops); |
| 336 | ret = cdev_add(&raw_cdev, dev, max_raw_minors); | 336 | ret = cdev_add(&raw_cdev, dev, max_raw_minors); |
| 337 | if (ret) { | 337 | if (ret) |
| 338 | goto error_region; | 338 | goto error_region; |
| 339 | } | ||
| 340 | |||
| 341 | raw_class = class_create(THIS_MODULE, "raw"); | 339 | raw_class = class_create(THIS_MODULE, "raw"); |
| 342 | if (IS_ERR(raw_class)) { | 340 | if (IS_ERR(raw_class)) { |
| 343 | printk(KERN_ERR "Error creating raw class.\n"); | 341 | printk(KERN_ERR "Error creating raw class.\n"); |
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 20de861aa0ea..8bf9914d4d15 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c | |||
| @@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err) | |||
| 782 | dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | | 782 | dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | |
| 783 | SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); | 783 | SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); |
| 784 | 784 | ||
| 785 | clk_disable_unprepare(dd->iclk); | 785 | clk_disable(dd->iclk); |
| 786 | 786 | ||
| 787 | if (req->base.complete) | 787 | if (req->base.complete) |
| 788 | req->base.complete(&req->base, err); | 788 | req->base.complete(&req->base, err); |
| @@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | |||
| 795 | { | 795 | { |
| 796 | int err; | 796 | int err; |
| 797 | 797 | ||
| 798 | err = clk_prepare_enable(dd->iclk); | 798 | err = clk_enable(dd->iclk); |
| 799 | if (err) | 799 | if (err) |
| 800 | return err; | 800 | return err; |
| 801 | 801 | ||
| @@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) | |||
| 822 | dev_info(dd->dev, | 822 | dev_info(dd->dev, |
| 823 | "version: 0x%x\n", dd->hw_version); | 823 | "version: 0x%x\n", dd->hw_version); |
| 824 | 824 | ||
| 825 | clk_disable_unprepare(dd->iclk); | 825 | clk_disable(dd->iclk); |
| 826 | } | 826 | } |
| 827 | 827 | ||
| 828 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, | 828 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, |
| @@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
| 1410 | goto res_err; | 1410 | goto res_err; |
| 1411 | } | 1411 | } |
| 1412 | 1412 | ||
| 1413 | err = clk_prepare(sha_dd->iclk); | ||
| 1414 | if (err) | ||
| 1415 | goto res_err; | ||
| 1416 | |||
| 1413 | atmel_sha_hw_version_init(sha_dd); | 1417 | atmel_sha_hw_version_init(sha_dd); |
| 1414 | 1418 | ||
| 1415 | atmel_sha_get_cap(sha_dd); | 1419 | atmel_sha_get_cap(sha_dd); |
| @@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev) | |||
| 1421 | if (IS_ERR(pdata)) { | 1425 | if (IS_ERR(pdata)) { |
| 1422 | dev_err(&pdev->dev, "platform data not available\n"); | 1426 | dev_err(&pdev->dev, "platform data not available\n"); |
| 1423 | err = PTR_ERR(pdata); | 1427 | err = PTR_ERR(pdata); |
| 1424 | goto res_err; | 1428 | goto iclk_unprepare; |
| 1425 | } | 1429 | } |
| 1426 | } | 1430 | } |
| 1427 | if (!pdata->dma_slave) { | 1431 | if (!pdata->dma_slave) { |
| 1428 | err = -ENXIO; | 1432 | err = -ENXIO; |
| 1429 | goto res_err; | 1433 | goto iclk_unprepare; |
| 1430 | } | 1434 | } |
| 1431 | err = atmel_sha_dma_init(sha_dd, pdata); | 1435 | err = atmel_sha_dma_init(sha_dd, pdata); |
| 1432 | if (err) | 1436 | if (err) |
| @@ -1457,6 +1461,8 @@ err_algs: | |||
| 1457 | if (sha_dd->caps.has_dma) | 1461 | if (sha_dd->caps.has_dma) |
| 1458 | atmel_sha_dma_cleanup(sha_dd); | 1462 | atmel_sha_dma_cleanup(sha_dd); |
| 1459 | err_sha_dma: | 1463 | err_sha_dma: |
| 1464 | iclk_unprepare: | ||
| 1465 | clk_unprepare(sha_dd->iclk); | ||
| 1460 | res_err: | 1466 | res_err: |
| 1461 | tasklet_kill(&sha_dd->done_task); | 1467 | tasklet_kill(&sha_dd->done_task); |
| 1462 | sha_dd_err: | 1468 | sha_dd_err: |
| @@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev) | |||
| 1483 | if (sha_dd->caps.has_dma) | 1489 | if (sha_dd->caps.has_dma) |
| 1484 | atmel_sha_dma_cleanup(sha_dd); | 1490 | atmel_sha_dma_cleanup(sha_dd); |
| 1485 | 1491 | ||
| 1486 | iounmap(sha_dd->io_base); | 1492 | clk_unprepare(sha_dd->iclk); |
| 1487 | |||
| 1488 | clk_put(sha_dd->iclk); | ||
| 1489 | |||
| 1490 | if (sha_dd->irq >= 0) | ||
| 1491 | free_irq(sha_dd->irq, sha_dd); | ||
| 1492 | 1493 | ||
| 1493 | return 0; | 1494 | return 0; |
| 1494 | } | 1495 | } |
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 0643e3366e33..c0656e7f37b5 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c | |||
| @@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) | |||
| 306 | return -ENOMEM; | 306 | return -ENOMEM; |
| 307 | 307 | ||
| 308 | dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); | 308 | dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); |
| 309 | if (!dma->cache_pool) | 309 | if (!dma->padding_pool) |
| 310 | return -ENOMEM; | 310 | return -ENOMEM; |
| 311 | 311 | ||
| 312 | cesa->dma = dma; | 312 | cesa->dma = dma; |
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 2aeaebd1c6e7..3f87a03abc22 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c | |||
| @@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev) | |||
| 312 | handle_simple_irq, IRQ_TYPE_NONE); | 312 | handle_simple_irq, IRQ_TYPE_NONE); |
| 313 | 313 | ||
| 314 | if (ret) { | 314 | if (ret) { |
| 315 | dev_info(&pdev->dev, "could not add irqchip\n"); | 315 | dev_err(&pdev->dev, "could not add irqchip\n"); |
| 316 | return ret; | 316 | goto teardown; |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc, | 319 | gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc, |
| @@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev) | |||
| 326 | skip_irq: | 326 | skip_irq: |
| 327 | return 0; | 327 | return 0; |
| 328 | teardown: | 328 | teardown: |
| 329 | of_mm_gpiochip_remove(&altera_gc->mmchip); | ||
| 329 | pr_err("%s: registration failed with status %d\n", | 330 | pr_err("%s: registration failed with status %d\n", |
| 330 | node->full_name, ret); | 331 | node->full_name, ret); |
| 331 | 332 | ||
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index ec58f4288649..cd007a67b302 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c | |||
| @@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc, | |||
| 195 | static int davinci_gpio_probe(struct platform_device *pdev) | 195 | static int davinci_gpio_probe(struct platform_device *pdev) |
| 196 | { | 196 | { |
| 197 | int i, base; | 197 | int i, base; |
| 198 | unsigned ngpio; | 198 | unsigned ngpio, nbank; |
| 199 | struct davinci_gpio_controller *chips; | 199 | struct davinci_gpio_controller *chips; |
| 200 | struct davinci_gpio_platform_data *pdata; | 200 | struct davinci_gpio_platform_data *pdata; |
| 201 | struct davinci_gpio_regs __iomem *regs; | 201 | struct davinci_gpio_regs __iomem *regs; |
| @@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev) | |||
| 224 | if (WARN_ON(ARCH_NR_GPIOS < ngpio)) | 224 | if (WARN_ON(ARCH_NR_GPIOS < ngpio)) |
| 225 | ngpio = ARCH_NR_GPIOS; | 225 | ngpio = ARCH_NR_GPIOS; |
| 226 | 226 | ||
| 227 | nbank = DIV_ROUND_UP(ngpio, 32); | ||
| 227 | chips = devm_kzalloc(dev, | 228 | chips = devm_kzalloc(dev, |
| 228 | ngpio * sizeof(struct davinci_gpio_controller), | 229 | nbank * sizeof(struct davinci_gpio_controller), |
| 229 | GFP_KERNEL); | 230 | GFP_KERNEL); |
| 230 | if (!chips) | 231 | if (!chips) |
| 231 | return -ENOMEM; | 232 | return -ENOMEM; |
| @@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) | |||
| 511 | return irq; | 512 | return irq; |
| 512 | } | 513 | } |
| 513 | 514 | ||
| 514 | irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0, | 515 | irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0, |
| 515 | &davinci_gpio_irq_ops, | 516 | &davinci_gpio_irq_ops, |
| 516 | chips); | 517 | chips); |
| 517 | if (!irq_domain) { | 518 | if (!irq_domain) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 82edf95b7740..5e7770f9a415 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs; | |||
| 87 | extern int amdgpu_sched_hw_submission; | 87 | extern int amdgpu_sched_hw_submission; |
| 88 | extern int amdgpu_enable_semaphores; | 88 | extern int amdgpu_enable_semaphores; |
| 89 | extern int amdgpu_powerplay; | 89 | extern int amdgpu_powerplay; |
| 90 | extern unsigned amdgpu_pcie_gen_cap; | ||
| 91 | extern unsigned amdgpu_pcie_lane_cap; | ||
| 90 | 92 | ||
| 91 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 | 93 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
| 92 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 94 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
| @@ -132,47 +134,6 @@ extern int amdgpu_powerplay; | |||
| 132 | #define AMDGPU_RESET_VCE (1 << 13) | 134 | #define AMDGPU_RESET_VCE (1 << 13) |
| 133 | #define AMDGPU_RESET_VCE1 (1 << 14) | 135 | #define AMDGPU_RESET_VCE1 (1 << 14) |
| 134 | 136 | ||
| 135 | /* CG block flags */ | ||
| 136 | #define AMDGPU_CG_BLOCK_GFX (1 << 0) | ||
| 137 | #define AMDGPU_CG_BLOCK_MC (1 << 1) | ||
| 138 | #define AMDGPU_CG_BLOCK_SDMA (1 << 2) | ||
| 139 | #define AMDGPU_CG_BLOCK_UVD (1 << 3) | ||
| 140 | #define AMDGPU_CG_BLOCK_VCE (1 << 4) | ||
| 141 | #define AMDGPU_CG_BLOCK_HDP (1 << 5) | ||
| 142 | #define AMDGPU_CG_BLOCK_BIF (1 << 6) | ||
| 143 | |||
| 144 | /* CG flags */ | ||
| 145 | #define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0) | ||
| 146 | #define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1) | ||
| 147 | #define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2) | ||
| 148 | #define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3) | ||
| 149 | #define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4) | ||
| 150 | #define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5) | ||
| 151 | #define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6) | ||
| 152 | #define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7) | ||
| 153 | #define AMDGPU_CG_SUPPORT_MC_LS (1 << 8) | ||
| 154 | #define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9) | ||
| 155 | #define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10) | ||
| 156 | #define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11) | ||
| 157 | #define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12) | ||
| 158 | #define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13) | ||
| 159 | #define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14) | ||
| 160 | #define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15) | ||
| 161 | #define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16) | ||
| 162 | |||
| 163 | /* PG flags */ | ||
| 164 | #define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0) | ||
| 165 | #define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1) | ||
| 166 | #define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2) | ||
| 167 | #define AMDGPU_PG_SUPPORT_UVD (1 << 3) | ||
| 168 | #define AMDGPU_PG_SUPPORT_VCE (1 << 4) | ||
| 169 | #define AMDGPU_PG_SUPPORT_CP (1 << 5) | ||
| 170 | #define AMDGPU_PG_SUPPORT_GDS (1 << 6) | ||
| 171 | #define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7) | ||
| 172 | #define AMDGPU_PG_SUPPORT_SDMA (1 << 8) | ||
| 173 | #define AMDGPU_PG_SUPPORT_ACP (1 << 9) | ||
| 174 | #define AMDGPU_PG_SUPPORT_SAMU (1 << 10) | ||
| 175 | |||
| 176 | /* GFX current status */ | 137 | /* GFX current status */ |
| 177 | #define AMDGPU_GFX_NORMAL_MODE 0x00000000L | 138 | #define AMDGPU_GFX_NORMAL_MODE 0x00000000L |
| 178 | #define AMDGPU_GFX_SAFE_MODE 0x00000001L | 139 | #define AMDGPU_GFX_SAFE_MODE 0x00000001L |
| @@ -606,8 +567,6 @@ struct amdgpu_sa_manager { | |||
| 606 | uint32_t align; | 567 | uint32_t align; |
| 607 | }; | 568 | }; |
| 608 | 569 | ||
| 609 | struct amdgpu_sa_bo; | ||
| 610 | |||
| 611 | /* sub-allocation buffer */ | 570 | /* sub-allocation buffer */ |
| 612 | struct amdgpu_sa_bo { | 571 | struct amdgpu_sa_bo { |
| 613 | struct list_head olist; | 572 | struct list_head olist; |
| @@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); | |||
| 2360 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | 2319 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
| 2361 | uint32_t flags); | 2320 | uint32_t flags); |
| 2362 | bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); | 2321 | bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); |
| 2322 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, | ||
| 2323 | unsigned long end); | ||
| 2363 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); | 2324 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); |
| 2364 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | 2325 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, |
| 2365 | struct ttm_mem_reg *mem); | 2326 | struct ttm_mem_reg *mem); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a081dda9fa2f..7a4b101e10c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
| @@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device, | |||
| 795 | case CGS_SYSTEM_INFO_PCIE_MLW: | 795 | case CGS_SYSTEM_INFO_PCIE_MLW: |
| 796 | sys_info->value = adev->pm.pcie_mlw_mask; | 796 | sys_info->value = adev->pm.pcie_mlw_mask; |
| 797 | break; | 797 | break; |
| 798 | case CGS_SYSTEM_INFO_CG_FLAGS: | ||
| 799 | sys_info->value = adev->cg_flags; | ||
| 800 | break; | ||
| 801 | case CGS_SYSTEM_INFO_PG_FLAGS: | ||
| 802 | sys_info->value = adev->pg_flags; | ||
| 803 | break; | ||
| 798 | default: | 804 | default: |
| 799 | return -ENODEV; | 805 | return -ENODEV; |
| 800 | } | 806 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 65531463f88e..51bfc114584e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
| 1795 | } | 1795 | } |
| 1796 | 1796 | ||
| 1797 | /* post card */ | 1797 | /* post card */ |
| 1798 | amdgpu_atom_asic_init(adev->mode_info.atom_context); | 1798 | if (!amdgpu_card_posted(adev)) |
| 1799 | amdgpu_atom_asic_init(adev->mode_info.atom_context); | ||
| 1799 | 1800 | ||
| 1800 | r = amdgpu_resume(adev); | 1801 | r = amdgpu_resume(adev); |
| 1802 | if (r) | ||
| 1803 | DRM_ERROR("amdgpu_resume failed (%d).\n", r); | ||
| 1801 | 1804 | ||
| 1802 | amdgpu_fence_driver_resume(adev); | 1805 | amdgpu_fence_driver_resume(adev); |
| 1803 | 1806 | ||
| 1804 | r = amdgpu_ib_ring_tests(adev); | 1807 | if (resume) { |
| 1805 | if (r) | 1808 | r = amdgpu_ib_ring_tests(adev); |
| 1806 | DRM_ERROR("ib ring test failed (%d).\n", r); | 1809 | if (r) |
| 1810 | DRM_ERROR("ib ring test failed (%d).\n", r); | ||
| 1811 | } | ||
| 1807 | 1812 | ||
| 1808 | r = amdgpu_late_init(adev); | 1813 | r = amdgpu_late_init(adev); |
| 1809 | if (r) | 1814 | if (r) |
| @@ -1933,80 +1938,97 @@ retry: | |||
| 1933 | return r; | 1938 | return r; |
| 1934 | } | 1939 | } |
| 1935 | 1940 | ||
| 1941 | #define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */ | ||
| 1942 | #define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */ | ||
| 1943 | |||
| 1936 | void amdgpu_get_pcie_info(struct amdgpu_device *adev) | 1944 | void amdgpu_get_pcie_info(struct amdgpu_device *adev) |
| 1937 | { | 1945 | { |
| 1938 | u32 mask; | 1946 | u32 mask; |
| 1939 | int ret; | 1947 | int ret; |
| 1940 | 1948 | ||
| 1941 | if (pci_is_root_bus(adev->pdev->bus)) | 1949 | if (amdgpu_pcie_gen_cap) |
| 1942 | return; | 1950 | adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; |
| 1943 | 1951 | ||
| 1944 | if (amdgpu_pcie_gen2 == 0) | 1952 | if (amdgpu_pcie_lane_cap) |
| 1945 | return; | 1953 | adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; |
| 1946 | 1954 | ||
| 1947 | if (adev->flags & AMD_IS_APU) | 1955 | /* covers APUs as well */ |
| 1956 | if (pci_is_root_bus(adev->pdev->bus)) { | ||
| 1957 | if (adev->pm.pcie_gen_mask == 0) | ||
| 1958 | adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; | ||
| 1959 | if (adev->pm.pcie_mlw_mask == 0) | ||
| 1960 | adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; | ||
| 1948 | return; | 1961 | return; |
| 1962 | } | ||
| 1949 | 1963 | ||
| 1950 | ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); | 1964 | if (adev->pm.pcie_gen_mask == 0) { |
| 1951 | if (!ret) { | 1965 | ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); |
| 1952 | adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | | 1966 | if (!ret) { |
| 1953 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | | 1967 | adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | |
| 1954 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); | 1968 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | |
| 1955 | 1969 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); | |
| 1956 | if (mask & DRM_PCIE_SPEED_25) | 1970 | |
| 1957 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; | 1971 | if (mask & DRM_PCIE_SPEED_25) |
| 1958 | if (mask & DRM_PCIE_SPEED_50) | 1972 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; |
| 1959 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; | 1973 | if (mask & DRM_PCIE_SPEED_50) |
| 1960 | if (mask & DRM_PCIE_SPEED_80) | 1974 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; |
| 1961 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; | 1975 | if (mask & DRM_PCIE_SPEED_80) |
| 1962 | } | 1976 | adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; |
| 1963 | ret = drm_pcie_get_max_link_width(adev->ddev, &mask); | 1977 | } else { |
| 1964 | if (!ret) { | 1978 | adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; |
| 1965 | switch (mask) { | 1979 | } |
| 1966 | case 32: | 1980 | } |
| 1967 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | | 1981 | if (adev->pm.pcie_mlw_mask == 0) { |
| 1968 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | | 1982 | ret = drm_pcie_get_max_link_width(adev->ddev, &mask); |
| 1969 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | | 1983 | if (!ret) { |
| 1970 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | | 1984 | switch (mask) { |
| 1971 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | 1985 | case 32: |
| 1972 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | 1986 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | |
| 1973 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | 1987 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | |
| 1974 | break; | 1988 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | |
| 1975 | case 16: | 1989 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | |
| 1976 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | | 1990 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | |
| 1977 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | | 1991 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | |
| 1978 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | | 1992 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); |
| 1979 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | 1993 | break; |
| 1980 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | 1994 | case 16: |
| 1981 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | 1995 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | |
| 1982 | break; | 1996 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | |
| 1983 | case 12: | 1997 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | |
| 1984 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | | 1998 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | |
| 1985 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | | 1999 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | |
| 1986 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | 2000 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); |
| 1987 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | 2001 | break; |
| 1988 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | 2002 | case 12: |
| 1989 | break; | 2003 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | |
| 1990 | case 8: | 2004 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | |
| 1991 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | | 2005 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | |
| 1992 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | 2006 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | |
| 1993 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | 2007 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); |
| 1994 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | 2008 | break; |
| 1995 | break; | 2009 | case 8: |
| 1996 | case 4: | 2010 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | |
| 1997 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | | 2011 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | |
| 1998 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | 2012 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | |
| 1999 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | 2013 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); |
| 2000 | break; | 2014 | break; |
| 2001 | case 2: | 2015 | case 4: |
| 2002 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | | 2016 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | |
| 2003 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); | 2017 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | |
| 2004 | break; | 2018 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); |
| 2005 | case 1: | 2019 | break; |
| 2006 | adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; | 2020 | case 2: |
| 2007 | break; | 2021 | adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | |
| 2008 | default: | 2022 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); |
| 2009 | break; | 2023 | break; |
| 2024 | case 1: | ||
| 2025 | adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; | ||
| 2026 | break; | ||
| 2027 | default: | ||
| 2028 | break; | ||
| 2029 | } | ||
| 2030 | } else { | ||
| 2031 | adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; | ||
| 2010 | } | 2032 | } |
| 2011 | } | 2033 | } |
| 2012 | } | 2034 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9c1af8976bef..9ef1db87cf26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
| @@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32; | |||
| 83 | int amdgpu_sched_hw_submission = 2; | 83 | int amdgpu_sched_hw_submission = 2; |
| 84 | int amdgpu_enable_semaphores = 0; | 84 | int amdgpu_enable_semaphores = 0; |
| 85 | int amdgpu_powerplay = -1; | 85 | int amdgpu_powerplay = -1; |
| 86 | unsigned amdgpu_pcie_gen_cap = 0; | ||
| 87 | unsigned amdgpu_pcie_lane_cap = 0; | ||
| 86 | 88 | ||
| 87 | MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); | 89 | MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); |
| 88 | module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); | 90 | module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); |
| @@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = | |||
| 170 | module_param_named(powerplay, amdgpu_powerplay, int, 0444); | 172 | module_param_named(powerplay, amdgpu_powerplay, int, 0444); |
| 171 | #endif | 173 | #endif |
| 172 | 174 | ||
| 175 | MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); | ||
| 176 | module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); | ||
| 177 | |||
| 178 | MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); | ||
| 179 | module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); | ||
| 180 | |||
| 173 | static struct pci_device_id pciidlist[] = { | 181 | static struct pci_device_id pciidlist[] = { |
| 174 | #ifdef CONFIG_DRM_AMDGPU_CIK | 182 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| 175 | /* Kaveri */ | 183 | /* Kaveri */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index b1969f2b2038..d4e2780c0796 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
| @@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
| 142 | 142 | ||
| 143 | list_for_each_entry(bo, &node->bos, mn_list) { | 143 | list_for_each_entry(bo, &node->bos, mn_list) { |
| 144 | 144 | ||
| 145 | if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) | 145 | if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, |
| 146 | end)) | ||
| 146 | continue; | 147 | continue; |
| 147 | 148 | ||
| 148 | r = amdgpu_bo_reserve(bo, true); | 149 | r = amdgpu_bo_reserve(bo, true); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8b88edb0434b..ca72a2e487b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
| @@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, | |||
| 354 | 354 | ||
| 355 | for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) | 355 | for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) |
| 356 | if (fences[i]) | 356 | if (fences[i]) |
| 357 | fences[count++] = fences[i]; | 357 | fences[count++] = fence_get(fences[i]); |
| 358 | 358 | ||
| 359 | if (count) { | 359 | if (count) { |
| 360 | spin_unlock(&sa_manager->wq.lock); | 360 | spin_unlock(&sa_manager->wq.lock); |
| 361 | t = fence_wait_any_timeout(fences, count, false, | 361 | t = fence_wait_any_timeout(fences, count, false, |
| 362 | MAX_SCHEDULE_TIMEOUT); | 362 | MAX_SCHEDULE_TIMEOUT); |
| 363 | for (i = 0; i < count; ++i) | ||
| 364 | fence_put(fences[i]); | ||
| 365 | |||
| 363 | r = (t > 0) ? 0 : t; | 366 | r = (t > 0) ? 0 : t; |
| 364 | spin_lock(&sa_manager->wq.lock); | 367 | spin_lock(&sa_manager->wq.lock); |
| 365 | } else { | 368 | } else { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 55cf05e1c81c..6442a06d6fdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) | |||
| 783 | return !!gtt->userptr; | 783 | return !!gtt->userptr; |
| 784 | } | 784 | } |
| 785 | 785 | ||
| 786 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, | ||
| 787 | unsigned long end) | ||
| 788 | { | ||
| 789 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | ||
| 790 | unsigned long size; | ||
| 791 | |||
| 792 | if (gtt == NULL) | ||
| 793 | return false; | ||
| 794 | |||
| 795 | if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr) | ||
| 796 | return false; | ||
| 797 | |||
| 798 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; | ||
| 799 | if (gtt->userptr > end || gtt->userptr + size <= start) | ||
| 800 | return false; | ||
| 801 | |||
| 802 | return true; | ||
| 803 | } | ||
| 804 | |||
| 786 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) | 805 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
| 787 | { | 806 | { |
| 788 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 807 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 8b4731d4e10e..474ca02b0949 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include "ci_dpm.h" | 31 | #include "ci_dpm.h" |
| 32 | #include "gfx_v7_0.h" | 32 | #include "gfx_v7_0.h" |
| 33 | #include "atom.h" | 33 | #include "atom.h" |
| 34 | #include "amd_pcie.h" | ||
| 34 | #include <linux/seq_file.h> | 35 | #include <linux/seq_file.h> |
| 35 | 36 | ||
| 36 | #include "smu/smu_7_0_1_d.h" | 37 | #include "smu/smu_7_0_1_d.h" |
| @@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev) | |||
| 5835 | u8 frev, crev; | 5836 | u8 frev, crev; |
| 5836 | struct ci_power_info *pi; | 5837 | struct ci_power_info *pi; |
| 5837 | int ret; | 5838 | int ret; |
| 5838 | u32 mask; | ||
| 5839 | 5839 | ||
| 5840 | pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); | 5840 | pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); |
| 5841 | if (pi == NULL) | 5841 | if (pi == NULL) |
| 5842 | return -ENOMEM; | 5842 | return -ENOMEM; |
| 5843 | adev->pm.dpm.priv = pi; | 5843 | adev->pm.dpm.priv = pi; |
| 5844 | 5844 | ||
| 5845 | ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); | 5845 | pi->sys_pcie_mask = |
| 5846 | if (ret) | 5846 | (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> |
| 5847 | pi->sys_pcie_mask = 0; | 5847 | CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT; |
| 5848 | else | 5848 | |
| 5849 | pi->sys_pcie_mask = mask; | ||
| 5850 | pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; | 5849 | pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; |
| 5851 | 5850 | ||
| 5852 | pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; | 5851 | pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index fd9c9588ef46..155965ed14a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
| @@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev) | |||
| 1762 | if (amdgpu_aspm == 0) | 1762 | if (amdgpu_aspm == 0) |
| 1763 | return; | 1763 | return; |
| 1764 | 1764 | ||
| 1765 | if (pci_is_root_bus(adev->pdev->bus)) | ||
| 1766 | return; | ||
| 1767 | |||
| 1765 | /* XXX double check APUs */ | 1768 | /* XXX double check APUs */ |
| 1766 | if (adev->flags & AMD_IS_APU) | 1769 | if (adev->flags & AMD_IS_APU) |
| 1767 | return; | 1770 | return; |
| @@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle) | |||
| 2332 | switch (adev->asic_type) { | 2335 | switch (adev->asic_type) { |
| 2333 | case CHIP_BONAIRE: | 2336 | case CHIP_BONAIRE: |
| 2334 | adev->cg_flags = | 2337 | adev->cg_flags = |
| 2335 | AMDGPU_CG_SUPPORT_GFX_MGCG | | 2338 | AMD_CG_SUPPORT_GFX_MGCG | |
| 2336 | AMDGPU_CG_SUPPORT_GFX_MGLS | | 2339 | AMD_CG_SUPPORT_GFX_MGLS | |
| 2337 | /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ | 2340 | /*AMD_CG_SUPPORT_GFX_CGCG |*/ |
| 2338 | AMDGPU_CG_SUPPORT_GFX_CGLS | | 2341 | AMD_CG_SUPPORT_GFX_CGLS | |
| 2339 | AMDGPU_CG_SUPPORT_GFX_CGTS | | 2342 | AMD_CG_SUPPORT_GFX_CGTS | |
| 2340 | AMDGPU_CG_SUPPORT_GFX_CGTS_LS | | 2343 | AMD_CG_SUPPORT_GFX_CGTS_LS | |
| 2341 | AMDGPU_CG_SUPPORT_GFX_CP_LS | | 2344 | AMD_CG_SUPPORT_GFX_CP_LS | |
| 2342 | AMDGPU_CG_SUPPORT_MC_LS | | 2345 | AMD_CG_SUPPORT_MC_LS | |
| 2343 | AMDGPU_CG_SUPPORT_MC_MGCG | | 2346 | AMD_CG_SUPPORT_MC_MGCG | |
| 2344 | AMDGPU_CG_SUPPORT_SDMA_MGCG | | 2347 | AMD_CG_SUPPORT_SDMA_MGCG | |
| 2345 | AMDGPU_CG_SUPPORT_SDMA_LS | | 2348 | AMD_CG_SUPPORT_SDMA_LS | |
| 2346 | AMDGPU_CG_SUPPORT_BIF_LS | | 2349 | AMD_CG_SUPPORT_BIF_LS | |
| 2347 | AMDGPU_CG_SUPPORT_VCE_MGCG | | 2350 | AMD_CG_SUPPORT_VCE_MGCG | |
| 2348 | AMDGPU_CG_SUPPORT_UVD_MGCG | | 2351 | AMD_CG_SUPPORT_UVD_MGCG | |
| 2349 | AMDGPU_CG_SUPPORT_HDP_LS | | 2352 | AMD_CG_SUPPORT_HDP_LS | |
| 2350 | AMDGPU_CG_SUPPORT_HDP_MGCG; | 2353 | AMD_CG_SUPPORT_HDP_MGCG; |
| 2351 | adev->pg_flags = 0; | 2354 | adev->pg_flags = 0; |
| 2352 | adev->external_rev_id = adev->rev_id + 0x14; | 2355 | adev->external_rev_id = adev->rev_id + 0x14; |
| 2353 | break; | 2356 | break; |
| 2354 | case CHIP_HAWAII: | 2357 | case CHIP_HAWAII: |
| 2355 | adev->cg_flags = | 2358 | adev->cg_flags = |
| 2356 | AMDGPU_CG_SUPPORT_GFX_MGCG | | 2359 | AMD_CG_SUPPORT_GFX_MGCG | |
| 2357 | AMDGPU_CG_SUPPORT_GFX_MGLS | | 2360 | AMD_CG_SUPPORT_GFX_MGLS | |
| 2358 | /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ | 2361 | /*AMD_CG_SUPPORT_GFX_CGCG |*/ |
| 2359 | AMDGPU_CG_SUPPORT_GFX_CGLS | | 2362 | AMD_CG_SUPPORT_GFX_CGLS | |
| 2360 | AMDGPU_CG_SUPPORT_GFX_CGTS | | 2363 | AMD_CG_SUPPORT_GFX_CGTS | |
| 2361 | AMDGPU_CG_SUPPORT_GFX_CP_LS | | 2364 | AMD_CG_SUPPORT_GFX_CP_LS | |
| 2362 | AMDGPU_CG_SUPPORT_MC_LS | | 2365 | AMD_CG_SUPPORT_MC_LS | |
| 2363 | AMDGPU_CG_SUPPORT_MC_MGCG | | 2366 | AMD_CG_SUPPORT_MC_MGCG | |
| 2364 | AMDGPU_CG_SUPPORT_SDMA_MGCG | | 2367 | AMD_CG_SUPPORT_SDMA_MGCG | |
| 2365 | AMDGPU_CG_SUPPORT_SDMA_LS | | 2368 | AMD_CG_SUPPORT_SDMA_LS | |
| 2366 | AMDGPU_CG_SUPPORT_BIF_LS | | 2369 | AMD_CG_SUPPORT_BIF_LS | |
| 2367 | AMDGPU_CG_SUPPORT_VCE_MGCG | | 2370 | AMD_CG_SUPPORT_VCE_MGCG | |
| 2368 | AMDGPU_CG_SUPPORT_UVD_MGCG | | 2371 | AMD_CG_SUPPORT_UVD_MGCG | |
| 2369 | AMDGPU_CG_SUPPORT_HDP_LS | | 2372 | AMD_CG_SUPPORT_HDP_LS | |
| 2370 | AMDGPU_CG_SUPPORT_HDP_MGCG; | 2373 | AMD_CG_SUPPORT_HDP_MGCG; |
| 2371 | adev->pg_flags = 0; | 2374 | adev->pg_flags = 0; |
| 2372 | adev->external_rev_id = 0x28; | 2375 | adev->external_rev_id = 0x28; |
| 2373 | break; | 2376 | break; |
| 2374 | case CHIP_KAVERI: | 2377 | case CHIP_KAVERI: |
| 2375 | adev->cg_flags = | 2378 | adev->cg_flags = |
| 2376 | AMDGPU_CG_SUPPORT_GFX_MGCG | | 2379 | AMD_CG_SUPPORT_GFX_MGCG | |
| 2377 | AMDGPU_CG_SUPPORT_GFX_MGLS | | 2380 | AMD_CG_SUPPORT_GFX_MGLS | |
| 2378 | /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ | 2381 | /*AMD_CG_SUPPORT_GFX_CGCG |*/ |
| 2379 | AMDGPU_CG_SUPPORT_GFX_CGLS | | 2382 | AMD_CG_SUPPORT_GFX_CGLS | |
| 2380 | AMDGPU_CG_SUPPORT_GFX_CGTS | | 2383 | AMD_CG_SUPPORT_GFX_CGTS | |
| 2381 | AMDGPU_CG_SUPPORT_GFX_CGTS_LS | | 2384 | AMD_CG_SUPPORT_GFX_CGTS_LS | |
| 2382 | AMDGPU_CG_SUPPORT_GFX_CP_LS | | 2385 | AMD_CG_SUPPORT_GFX_CP_LS | |
| 2383 | AMDGPU_CG_SUPPORT_SDMA_MGCG | | 2386 | AMD_CG_SUPPORT_SDMA_MGCG | |
| 2384 | AMDGPU_CG_SUPPORT_SDMA_LS | | 2387 | AMD_CG_SUPPORT_SDMA_LS | |
| 2385 | AMDGPU_CG_SUPPORT_BIF_LS | | 2388 | AMD_CG_SUPPORT_BIF_LS | |
| 2386 | AMDGPU_CG_SUPPORT_VCE_MGCG | | 2389 | AMD_CG_SUPPORT_VCE_MGCG | |
| 2387 | AMDGPU_CG_SUPPORT_UVD_MGCG | | 2390 | AMD_CG_SUPPORT_UVD_MGCG | |
| 2388 | AMDGPU_CG_SUPPORT_HDP_LS | | 2391 | AMD_CG_SUPPORT_HDP_LS | |
| 2389 | AMDGPU_CG_SUPPORT_HDP_MGCG; | 2392 | AMD_CG_SUPPORT_HDP_MGCG; |
| 2390 | adev->pg_flags = | 2393 | adev->pg_flags = |
| 2391 | /*AMDGPU_PG_SUPPORT_GFX_PG | | 2394 | /*AMD_PG_SUPPORT_GFX_PG | |
| 2392 | AMDGPU_PG_SUPPORT_GFX_SMG | | 2395 | AMD_PG_SUPPORT_GFX_SMG | |
| 2393 | AMDGPU_PG_SUPPORT_GFX_DMG |*/ | 2396 | AMD_PG_SUPPORT_GFX_DMG |*/ |
| 2394 | AMDGPU_PG_SUPPORT_UVD | | 2397 | AMD_PG_SUPPORT_UVD | |
| 2395 | /*AMDGPU_PG_SUPPORT_VCE | | 2398 | /*AMD_PG_SUPPORT_VCE | |
| 2396 | AMDGPU_PG_SUPPORT_CP | | 2399 | AMD_PG_SUPPORT_CP | |
| 2397 | AMDGPU_PG_SUPPORT_GDS | | 2400 | AMD_PG_SUPPORT_GDS | |
| 2398 | AMDGPU_PG_SUPPORT_RLC_SMU_HS | | 2401 | AMD_PG_SUPPORT_RLC_SMU_HS | |
| 2399 | AMDGPU_PG_SUPPORT_ACP | | 2402 | AMD_PG_SUPPORT_ACP | |
| 2400 | AMDGPU_PG_SUPPORT_SAMU |*/ | 2403 | AMD_PG_SUPPORT_SAMU |*/ |
| 2401 | 0; | 2404 | 0; |
| 2402 | if (adev->pdev->device == 0x1312 || | 2405 | if (adev->pdev->device == 0x1312 || |
| 2403 | adev->pdev->device == 0x1316 || | 2406 | adev->pdev->device == 0x1316 || |
| @@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle) | |||
| 2409 | case CHIP_KABINI: | 2412 | case CHIP_KABINI: |
| 2410 | case CHIP_MULLINS: | 2413 | case CHIP_MULLINS: |
| 2411 | adev->cg_flags = | 2414 | adev->cg_flags = |
| 2412 | AMDGPU_CG_SUPPORT_GFX_MGCG | | 2415 | AMD_CG_SUPPORT_GFX_MGCG | |
| 2413 | AMDGPU_CG_SUPPORT_GFX_MGLS | | 2416 | AMD_CG_SUPPORT_GFX_MGLS | |
| 2414 | /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ | 2417 | /*AMD_CG_SUPPORT_GFX_CGCG |*/ |
| 2415 | AMDGPU_CG_SUPPORT_GFX_CGLS | | 2418 | AMD_CG_SUPPORT_GFX_CGLS | |
| 2416 | AMDGPU_CG_SUPPORT_GFX_CGTS | | 2419 | AMD_CG_SUPPORT_GFX_CGTS | |
| 2417 | AMDGPU_CG_SUPPORT_GFX_CGTS_LS | | 2420 | AMD_CG_SUPPORT_GFX_CGTS_LS | |
| 2418 | AMDGPU_CG_SUPPORT_GFX_CP_LS | | 2421 | AMD_CG_SUPPORT_GFX_CP_LS | |
| 2419 | AMDGPU_CG_SUPPORT_SDMA_MGCG | | 2422 | AMD_CG_SUPPORT_SDMA_MGCG | |
| 2420 | AMDGPU_CG_SUPPORT_SDMA_LS | | 2423 | AMD_CG_SUPPORT_SDMA_LS | |
| 2421 | AMDGPU_CG_SUPPORT_BIF_LS | | 2424 | AMD_CG_SUPPORT_BIF_LS | |
| 2422 | AMDGPU_CG_SUPPORT_VCE_MGCG | | 2425 | AMD_CG_SUPPORT_VCE_MGCG | |
| 2423 | AMDGPU_CG_SUPPORT_UVD_MGCG | | 2426 | AMD_CG_SUPPORT_UVD_MGCG | |
| 2424 | AMDGPU_CG_SUPPORT_HDP_LS | | 2427 | AMD_CG_SUPPORT_HDP_LS | |
| 2425 | AMDGPU_CG_SUPPORT_HDP_MGCG; | 2428 | AMD_CG_SUPPORT_HDP_MGCG; |
| 2426 | adev->pg_flags = | 2429 | adev->pg_flags = |
| 2427 | /*AMDGPU_PG_SUPPORT_GFX_PG | | 2430 | /*AMD_PG_SUPPORT_GFX_PG | |
| 2428 | AMDGPU_PG_SUPPORT_GFX_SMG | */ | 2431 | AMD_PG_SUPPORT_GFX_SMG | */ |
| 2429 | AMDGPU_PG_SUPPORT_UVD | | 2432 | AMD_PG_SUPPORT_UVD | |
| 2430 | /*AMDGPU_PG_SUPPORT_VCE | | 2433 | /*AMD_PG_SUPPORT_VCE | |
| 2431 | AMDGPU_PG_SUPPORT_CP | | 2434 | AMD_PG_SUPPORT_CP | |
| 2432 | AMDGPU_PG_SUPPORT_GDS | | 2435 | AMD_PG_SUPPORT_GDS | |
| 2433 | AMDGPU_PG_SUPPORT_RLC_SMU_HS | | 2436 | AMD_PG_SUPPORT_RLC_SMU_HS | |
| 2434 | AMDGPU_PG_SUPPORT_SAMU |*/ | 2437 | AMD_PG_SUPPORT_SAMU |*/ |
| 2435 | 0; | 2438 | 0; |
| 2436 | if (adev->asic_type == CHIP_KABINI) { | 2439 | if (adev->asic_type == CHIP_KABINI) { |
| 2437 | if (adev->rev_id == 0) | 2440 | if (adev->rev_id == 0) |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 5f712ceddf08..c55ecf0ea845 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
| @@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, | |||
| 885 | { | 885 | { |
| 886 | u32 orig, data; | 886 | u32 orig, data; |
| 887 | 887 | ||
| 888 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { | 888 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { |
| 889 | WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); | 889 | WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); |
| 890 | WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); | 890 | WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); |
| 891 | } else { | 891 | } else { |
| @@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, | |||
| 906 | { | 906 | { |
| 907 | u32 orig, data; | 907 | u32 orig, data; |
| 908 | 908 | ||
| 909 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { | 909 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { |
| 910 | orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); | 910 | orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); |
| 911 | data |= 0x100; | 911 | data |= 0x100; |
| 912 | if (orig != data) | 912 | if (orig != data) |
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 4dd17f2dd905..9056355309d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c | |||
| @@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev) | |||
| 445 | pi->gfx_pg_threshold = 500; | 445 | pi->gfx_pg_threshold = 500; |
| 446 | pi->caps_fps = true; | 446 | pi->caps_fps = true; |
| 447 | /* uvd */ | 447 | /* uvd */ |
| 448 | pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; | 448 | pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; |
| 449 | pi->caps_uvd_dpm = true; | 449 | pi->caps_uvd_dpm = true; |
| 450 | /* vce */ | 450 | /* vce */ |
| 451 | pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; | 451 | pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; |
| 452 | pi->caps_vce_dpm = true; | 452 | pi->caps_vce_dpm = true; |
| 453 | /* acp */ | 453 | /* acp */ |
| 454 | pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; | 454 | pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; |
| 455 | pi->caps_acp_dpm = true; | 455 | pi->caps_acp_dpm = true; |
| 456 | 456 | ||
| 457 | pi->caps_stable_power_state = false; | 457 | pi->caps_stable_power_state = false; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 6c76139de1c9..7732059ae30f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
| @@ -4109,7 +4109,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) | |||
| 4109 | 4109 | ||
| 4110 | orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); | 4110 | orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); |
| 4111 | 4111 | ||
| 4112 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { | 4112 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { |
| 4113 | gfx_v7_0_enable_gui_idle_interrupt(adev, true); | 4113 | gfx_v7_0_enable_gui_idle_interrupt(adev, true); |
| 4114 | 4114 | ||
| 4115 | tmp = gfx_v7_0_halt_rlc(adev); | 4115 | tmp = gfx_v7_0_halt_rlc(adev); |
| @@ -4147,9 +4147,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) | |||
| 4147 | { | 4147 | { |
| 4148 | u32 data, orig, tmp = 0; | 4148 | u32 data, orig, tmp = 0; |
| 4149 | 4149 | ||
| 4150 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { | 4150 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { |
| 4151 | if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { | 4151 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { |
| 4152 | if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { | 4152 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { |
| 4153 | orig = data = RREG32(mmCP_MEM_SLP_CNTL); | 4153 | orig = data = RREG32(mmCP_MEM_SLP_CNTL); |
| 4154 | data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; | 4154 | data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; |
| 4155 | if (orig != data) | 4155 | if (orig != data) |
| @@ -4176,14 +4176,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) | |||
| 4176 | 4176 | ||
| 4177 | gfx_v7_0_update_rlc(adev, tmp); | 4177 | gfx_v7_0_update_rlc(adev, tmp); |
| 4178 | 4178 | ||
| 4179 | if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { | 4179 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) { |
| 4180 | orig = data = RREG32(mmCGTS_SM_CTRL_REG); | 4180 | orig = data = RREG32(mmCGTS_SM_CTRL_REG); |
| 4181 | data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; | 4181 | data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; |
| 4182 | data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); | 4182 | data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); |
| 4183 | data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; | 4183 | data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; |
| 4184 | data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; | 4184 | data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; |
| 4185 | if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && | 4185 | if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) && |
| 4186 | (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) | 4186 | (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS)) |
| 4187 | data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; | 4187 | data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; |
| 4188 | data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; | 4188 | data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; |
| 4189 | data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; | 4189 | data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; |
| @@ -4249,7 +4249,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, | |||
| 4249 | u32 data, orig; | 4249 | u32 data, orig; |
| 4250 | 4250 | ||
| 4251 | orig = data = RREG32(mmRLC_PG_CNTL); | 4251 | orig = data = RREG32(mmRLC_PG_CNTL); |
| 4252 | if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) | 4252 | if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) |
| 4253 | data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; | 4253 | data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; |
| 4254 | else | 4254 | else |
| 4255 | data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; | 4255 | data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; |
| @@ -4263,7 +4263,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, | |||
| 4263 | u32 data, orig; | 4263 | u32 data, orig; |
| 4264 | 4264 | ||
| 4265 | orig = data = RREG32(mmRLC_PG_CNTL); | 4265 | orig = data = RREG32(mmRLC_PG_CNTL); |
| 4266 | if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) | 4266 | if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) |
| 4267 | data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; | 4267 | data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; |
| 4268 | else | 4268 | else |
| 4269 | data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; | 4269 | data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; |
| @@ -4276,7 +4276,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) | |||
| 4276 | u32 data, orig; | 4276 | u32 data, orig; |
| 4277 | 4277 | ||
| 4278 | orig = data = RREG32(mmRLC_PG_CNTL); | 4278 | orig = data = RREG32(mmRLC_PG_CNTL); |
| 4279 | if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) | 4279 | if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP)) |
| 4280 | data &= ~0x8000; | 4280 | data &= ~0x8000; |
| 4281 | else | 4281 | else |
| 4282 | data |= 0x8000; | 4282 | data |= 0x8000; |
| @@ -4289,7 +4289,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) | |||
| 4289 | u32 data, orig; | 4289 | u32 data, orig; |
| 4290 | 4290 | ||
| 4291 | orig = data = RREG32(mmRLC_PG_CNTL); | 4291 | orig = data = RREG32(mmRLC_PG_CNTL); |
| 4292 | if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) | 4292 | if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS)) |
| 4293 | data &= ~0x2000; | 4293 | data &= ~0x2000; |
| 4294 | else | 4294 | else |
| 4295 | data |= 0x2000; | 4295 | data |= 0x2000; |
| @@ -4370,7 +4370,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, | |||
| 4370 | { | 4370 | { |
| 4371 | u32 data, orig; | 4371 | u32 data, orig; |
| 4372 | 4372 | ||
| 4373 | if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { | 4373 | if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { |
| 4374 | orig = data = RREG32(mmRLC_PG_CNTL); | 4374 | orig = data = RREG32(mmRLC_PG_CNTL); |
| 4375 | data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; | 4375 | data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; |
| 4376 | if (orig != data) | 4376 | if (orig != data) |
| @@ -4442,7 +4442,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, | |||
| 4442 | u32 data, orig; | 4442 | u32 data, orig; |
| 4443 | 4443 | ||
| 4444 | orig = data = RREG32(mmRLC_PG_CNTL); | 4444 | orig = data = RREG32(mmRLC_PG_CNTL); |
| 4445 | if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) | 4445 | if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)) |
| 4446 | data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; | 4446 | data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; |
| 4447 | else | 4447 | else |
| 4448 | data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; | 4448 | data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; |
| @@ -4456,7 +4456,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, | |||
| 4456 | u32 data, orig; | 4456 | u32 data, orig; |
| 4457 | 4457 | ||
| 4458 | orig = data = RREG32(mmRLC_PG_CNTL); | 4458 | orig = data = RREG32(mmRLC_PG_CNTL); |
| 4459 | if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) | 4459 | if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)) |
| 4460 | data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; | 4460 | data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; |
| 4461 | else | 4461 | else |
| 4462 | data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; | 4462 | data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; |
| @@ -4623,15 +4623,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, | |||
| 4623 | 4623 | ||
| 4624 | static void gfx_v7_0_init_pg(struct amdgpu_device *adev) | 4624 | static void gfx_v7_0_init_pg(struct amdgpu_device *adev) |
| 4625 | { | 4625 | { |
| 4626 | if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | | 4626 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | |
| 4627 | AMDGPU_PG_SUPPORT_GFX_SMG | | 4627 | AMD_PG_SUPPORT_GFX_SMG | |
| 4628 | AMDGPU_PG_SUPPORT_GFX_DMG | | 4628 | AMD_PG_SUPPORT_GFX_DMG | |
| 4629 | AMDGPU_PG_SUPPORT_CP | | 4629 | AMD_PG_SUPPORT_CP | |
| 4630 | AMDGPU_PG_SUPPORT_GDS | | 4630 | AMD_PG_SUPPORT_GDS | |
| 4631 | AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { | 4631 | AMD_PG_SUPPORT_RLC_SMU_HS)) { |
| 4632 | gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); | 4632 | gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); |
| 4633 | gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); | 4633 | gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); |
| 4634 | if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { | 4634 | if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { |
| 4635 | gfx_v7_0_init_gfx_cgpg(adev); | 4635 | gfx_v7_0_init_gfx_cgpg(adev); |
| 4636 | gfx_v7_0_enable_cp_pg(adev, true); | 4636 | gfx_v7_0_enable_cp_pg(adev, true); |
| 4637 | gfx_v7_0_enable_gds_pg(adev, true); | 4637 | gfx_v7_0_enable_gds_pg(adev, true); |
| @@ -4643,14 +4643,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev) | |||
| 4643 | 4643 | ||
| 4644 | static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) | 4644 | static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) |
| 4645 | { | 4645 | { |
| 4646 | if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | | 4646 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | |
| 4647 | AMDGPU_PG_SUPPORT_GFX_SMG | | 4647 | AMD_PG_SUPPORT_GFX_SMG | |
| 4648 | AMDGPU_PG_SUPPORT_GFX_DMG | | 4648 | AMD_PG_SUPPORT_GFX_DMG | |
| 4649 | AMDGPU_PG_SUPPORT_CP | | 4649 | AMD_PG_SUPPORT_CP | |
| 4650 | AMDGPU_PG_SUPPORT_GDS | | 4650 | AMD_PG_SUPPORT_GDS | |
| 4651 | AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { | 4651 | AMD_PG_SUPPORT_RLC_SMU_HS)) { |
| 4652 | gfx_v7_0_update_gfx_pg(adev, false); | 4652 | gfx_v7_0_update_gfx_pg(adev, false); |
| 4653 | if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { | 4653 | if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { |
| 4654 | gfx_v7_0_enable_cp_pg(adev, false); | 4654 | gfx_v7_0_enable_cp_pg(adev, false); |
| 4655 | gfx_v7_0_enable_gds_pg(adev, false); | 4655 | gfx_v7_0_enable_gds_pg(adev, false); |
| 4656 | } | 4656 | } |
| @@ -5527,14 +5527,14 @@ static int gfx_v7_0_set_powergating_state(void *handle, | |||
| 5527 | if (state == AMD_PG_STATE_GATE) | 5527 | if (state == AMD_PG_STATE_GATE) |
| 5528 | gate = true; | 5528 | gate = true; |
| 5529 | 5529 | ||
| 5530 | if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | | 5530 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | |
| 5531 | AMDGPU_PG_SUPPORT_GFX_SMG | | 5531 | AMD_PG_SUPPORT_GFX_SMG | |
| 5532 | AMDGPU_PG_SUPPORT_GFX_DMG | | 5532 | AMD_PG_SUPPORT_GFX_DMG | |
| 5533 | AMDGPU_PG_SUPPORT_CP | | 5533 | AMD_PG_SUPPORT_CP | |
| 5534 | AMDGPU_PG_SUPPORT_GDS | | 5534 | AMD_PG_SUPPORT_GDS | |
| 5535 | AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { | 5535 | AMD_PG_SUPPORT_RLC_SMU_HS)) { |
| 5536 | gfx_v7_0_update_gfx_pg(adev, gate); | 5536 | gfx_v7_0_update_gfx_pg(adev, gate); |
| 5537 | if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { | 5537 | if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { |
| 5538 | gfx_v7_0_enable_cp_pg(adev, gate); | 5538 | gfx_v7_0_enable_cp_pg(adev, gate); |
| 5539 | gfx_v7_0_enable_gds_pg(adev, gate); | 5539 | gfx_v7_0_enable_gds_pg(adev, gate); |
| 5540 | } | 5540 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 8aa2991ab379..b8060795b27b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
| @@ -792,7 +792,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, | |||
| 792 | 792 | ||
| 793 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { | 793 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { |
| 794 | orig = data = RREG32(mc_cg_registers[i]); | 794 | orig = data = RREG32(mc_cg_registers[i]); |
| 795 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) | 795 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) |
| 796 | data |= mc_cg_ls_en[i]; | 796 | data |= mc_cg_ls_en[i]; |
| 797 | else | 797 | else |
| 798 | data &= ~mc_cg_ls_en[i]; | 798 | data &= ~mc_cg_ls_en[i]; |
| @@ -809,7 +809,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, | |||
| 809 | 809 | ||
| 810 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { | 810 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { |
| 811 | orig = data = RREG32(mc_cg_registers[i]); | 811 | orig = data = RREG32(mc_cg_registers[i]); |
| 812 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) | 812 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) |
| 813 | data |= mc_cg_en[i]; | 813 | data |= mc_cg_en[i]; |
| 814 | else | 814 | else |
| 815 | data &= ~mc_cg_en[i]; | 815 | data &= ~mc_cg_en[i]; |
| @@ -825,7 +825,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, | |||
| 825 | 825 | ||
| 826 | orig = data = RREG32_PCIE(ixPCIE_CNTL2); | 826 | orig = data = RREG32_PCIE(ixPCIE_CNTL2); |
| 827 | 827 | ||
| 828 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { | 828 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { |
| 829 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); | 829 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); |
| 830 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); | 830 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); |
| 831 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); | 831 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); |
| @@ -848,7 +848,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, | |||
| 848 | 848 | ||
| 849 | orig = data = RREG32(mmHDP_HOST_PATH_CNTL); | 849 | orig = data = RREG32(mmHDP_HOST_PATH_CNTL); |
| 850 | 850 | ||
| 851 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) | 851 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) |
| 852 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); | 852 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); |
| 853 | else | 853 | else |
| 854 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); | 854 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); |
| @@ -864,7 +864,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, | |||
| 864 | 864 | ||
| 865 | orig = data = RREG32(mmHDP_MEM_POWER_LS); | 865 | orig = data = RREG32(mmHDP_MEM_POWER_LS); |
| 866 | 866 | ||
| 867 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) | 867 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) |
| 868 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); | 868 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); |
| 869 | else | 869 | else |
| 870 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); | 870 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 7e9154c7f1db..654d76723bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
| @@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev) | |||
| 2859 | pi->voltage_drop_t = 0; | 2859 | pi->voltage_drop_t = 0; |
| 2860 | pi->caps_sclk_throttle_low_notification = false; | 2860 | pi->caps_sclk_throttle_low_notification = false; |
| 2861 | pi->caps_fps = false; /* true? */ | 2861 | pi->caps_fps = false; /* true? */ |
| 2862 | pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; | 2862 | pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; |
| 2863 | pi->caps_uvd_dpm = true; | 2863 | pi->caps_uvd_dpm = true; |
| 2864 | pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; | 2864 | pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; |
| 2865 | pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false; | 2865 | pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; |
| 2866 | pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; | 2866 | pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; |
| 2867 | pi->caps_stable_p_state = false; | 2867 | pi->caps_stable_p_state = false; |
| 2868 | 2868 | ||
| 2869 | ret = kv_parse_sys_info_table(adev); | 2869 | ret = kv_parse_sys_info_table(adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 5e9f73af83a8..fbd3767671bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
| @@ -611,7 +611,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, | |||
| 611 | { | 611 | { |
| 612 | u32 orig, data; | 612 | u32 orig, data; |
| 613 | 613 | ||
| 614 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) { | 614 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { |
| 615 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); | 615 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); |
| 616 | data = 0xfff; | 616 | data = 0xfff; |
| 617 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); | 617 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); |
| @@ -830,6 +830,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle, | |||
| 830 | bool gate = false; | 830 | bool gate = false; |
| 831 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 831 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 832 | 832 | ||
| 833 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
| 834 | return 0; | ||
| 835 | |||
| 833 | if (state == AMD_CG_STATE_GATE) | 836 | if (state == AMD_CG_STATE_GATE) |
| 834 | gate = true; | 837 | gate = true; |
| 835 | 838 | ||
| @@ -848,7 +851,10 @@ static int uvd_v4_2_set_powergating_state(void *handle, | |||
| 848 | * revisit this when there is a cleaner line between | 851 | * revisit this when there is a cleaner line between |
| 849 | * the smc and the hw blocks | 852 | * the smc and the hw blocks |
| 850 | */ | 853 | */ |
| 851 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 854 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 855 | |||
| 856 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) | ||
| 857 | return 0; | ||
| 852 | 858 | ||
| 853 | if (state == AMD_PG_STATE_GATE) { | 859 | if (state == AMD_PG_STATE_GATE) { |
| 854 | uvd_v4_2_stop(adev); | 860 | uvd_v4_2_stop(adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 38864f562981..57f1c5bf3bf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
| @@ -774,6 +774,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, | |||
| 774 | static int uvd_v5_0_set_clockgating_state(void *handle, | 774 | static int uvd_v5_0_set_clockgating_state(void *handle, |
| 775 | enum amd_clockgating_state state) | 775 | enum amd_clockgating_state state) |
| 776 | { | 776 | { |
| 777 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 778 | |||
| 779 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
| 780 | return 0; | ||
| 781 | |||
| 777 | return 0; | 782 | return 0; |
| 778 | } | 783 | } |
| 779 | 784 | ||
| @@ -789,6 +794,9 @@ static int uvd_v5_0_set_powergating_state(void *handle, | |||
| 789 | */ | 794 | */ |
| 790 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 795 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 791 | 796 | ||
| 797 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) | ||
| 798 | return 0; | ||
| 799 | |||
| 792 | if (state == AMD_PG_STATE_GATE) { | 800 | if (state == AMD_PG_STATE_GATE) { |
| 793 | uvd_v5_0_stop(adev); | 801 | uvd_v5_0_stop(adev); |
| 794 | return 0; | 802 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 3d5913926436..0b365b7651ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
| @@ -532,7 +532,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) | |||
| 532 | uvd_v6_0_mc_resume(adev); | 532 | uvd_v6_0_mc_resume(adev); |
| 533 | 533 | ||
| 534 | /* Set dynamic clock gating in S/W control mode */ | 534 | /* Set dynamic clock gating in S/W control mode */ |
| 535 | if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) { | 535 | if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { |
| 536 | if (adev->flags & AMD_IS_APU) | 536 | if (adev->flags & AMD_IS_APU) |
| 537 | cz_set_uvd_clock_gating_branches(adev, false); | 537 | cz_set_uvd_clock_gating_branches(adev, false); |
| 538 | else | 538 | else |
| @@ -1000,7 +1000,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, | |||
| 1000 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1000 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1001 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | 1001 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
| 1002 | 1002 | ||
| 1003 | if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) | 1003 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) |
| 1004 | return 0; | 1004 | return 0; |
| 1005 | 1005 | ||
| 1006 | if (enable) { | 1006 | if (enable) { |
| @@ -1030,6 +1030,9 @@ static int uvd_v6_0_set_powergating_state(void *handle, | |||
| 1030 | */ | 1030 | */ |
| 1031 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1031 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1032 | 1032 | ||
| 1033 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) | ||
| 1034 | return 0; | ||
| 1035 | |||
| 1033 | if (state == AMD_PG_STATE_GATE) { | 1036 | if (state == AMD_PG_STATE_GATE) { |
| 1034 | uvd_v6_0_stop(adev); | 1037 | uvd_v6_0_stop(adev); |
| 1035 | return 0; | 1038 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 52ac7a8f1e58..a822edacfa95 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | |||
| @@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) | |||
| 373 | { | 373 | { |
| 374 | bool sw_cg = false; | 374 | bool sw_cg = false; |
| 375 | 375 | ||
| 376 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { | 376 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { |
| 377 | if (sw_cg) | 377 | if (sw_cg) |
| 378 | vce_v2_0_set_sw_cg(adev, true); | 378 | vce_v2_0_set_sw_cg(adev, true); |
| 379 | else | 379 | else |
| @@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle, | |||
| 608 | */ | 608 | */ |
| 609 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 609 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 610 | 610 | ||
| 611 | if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) | ||
| 612 | return 0; | ||
| 613 | |||
| 611 | if (state == AMD_PG_STATE_GATE) | 614 | if (state == AMD_PG_STATE_GATE) |
| 612 | /* XXX do we need a vce_v2_0_stop()? */ | 615 | /* XXX do we need a vce_v2_0_stop()? */ |
| 613 | return 0; | 616 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index e99af81e4aec..d662fa9f9091 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
| @@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
| 277 | WREG32_P(mmVCE_STATUS, 0, ~1); | 277 | WREG32_P(mmVCE_STATUS, 0, ~1); |
| 278 | 278 | ||
| 279 | /* Set Clock-Gating off */ | 279 | /* Set Clock-Gating off */ |
| 280 | if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG) | 280 | if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) |
| 281 | vce_v3_0_set_vce_sw_clock_gating(adev, false); | 281 | vce_v3_0_set_vce_sw_clock_gating(adev, false); |
| 282 | 282 | ||
| 283 | if (r) { | 283 | if (r) { |
| @@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
| 676 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | 676 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
| 677 | int i; | 677 | int i; |
| 678 | 678 | ||
| 679 | if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) | 679 | if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) |
| 680 | return 0; | 680 | return 0; |
| 681 | 681 | ||
| 682 | mutex_lock(&adev->grbm_idx_mutex); | 682 | mutex_lock(&adev->grbm_idx_mutex); |
| @@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle, | |||
| 728 | */ | 728 | */ |
| 729 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 729 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 730 | 730 | ||
| 731 | if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) | ||
| 732 | return 0; | ||
| 733 | |||
| 731 | if (state == AMD_PG_STATE_GATE) | 734 | if (state == AMD_PG_STATE_GATE) |
| 732 | /* XXX do we need a vce_v3_0_stop()? */ | 735 | /* XXX do we need a vce_v3_0_stop()? */ |
| 733 | return 0; | 736 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 89f5a1ff6f43..0d14d108a6c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
| @@ -1457,8 +1457,7 @@ static int vi_common_early_init(void *handle) | |||
| 1457 | case CHIP_STONEY: | 1457 | case CHIP_STONEY: |
| 1458 | adev->has_uvd = true; | 1458 | adev->has_uvd = true; |
| 1459 | adev->cg_flags = 0; | 1459 | adev->cg_flags = 0; |
| 1460 | /* Disable UVD pg */ | 1460 | adev->pg_flags = 0; |
| 1461 | adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE; | ||
| 1462 | adev->external_rev_id = adev->rev_id + 0x1; | 1461 | adev->external_rev_id = adev->rev_id + 0x1; |
| 1463 | break; | 1462 | break; |
| 1464 | default: | 1463 | default: |
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 1195d06f55bc..dbf7e6413cab 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h | |||
| @@ -85,6 +85,38 @@ enum amd_powergating_state { | |||
| 85 | AMD_PG_STATE_UNGATE, | 85 | AMD_PG_STATE_UNGATE, |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | /* CG flags */ | ||
| 89 | #define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) | ||
| 90 | #define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) | ||
| 91 | #define AMD_CG_SUPPORT_GFX_CGCG (1 << 2) | ||
| 92 | #define AMD_CG_SUPPORT_GFX_CGLS (1 << 3) | ||
| 93 | #define AMD_CG_SUPPORT_GFX_CGTS (1 << 4) | ||
| 94 | #define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5) | ||
| 95 | #define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6) | ||
| 96 | #define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7) | ||
| 97 | #define AMD_CG_SUPPORT_MC_LS (1 << 8) | ||
| 98 | #define AMD_CG_SUPPORT_MC_MGCG (1 << 9) | ||
| 99 | #define AMD_CG_SUPPORT_SDMA_LS (1 << 10) | ||
| 100 | #define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11) | ||
| 101 | #define AMD_CG_SUPPORT_BIF_LS (1 << 12) | ||
| 102 | #define AMD_CG_SUPPORT_UVD_MGCG (1 << 13) | ||
| 103 | #define AMD_CG_SUPPORT_VCE_MGCG (1 << 14) | ||
| 104 | #define AMD_CG_SUPPORT_HDP_LS (1 << 15) | ||
| 105 | #define AMD_CG_SUPPORT_HDP_MGCG (1 << 16) | ||
| 106 | |||
| 107 | /* PG flags */ | ||
| 108 | #define AMD_PG_SUPPORT_GFX_PG (1 << 0) | ||
| 109 | #define AMD_PG_SUPPORT_GFX_SMG (1 << 1) | ||
| 110 | #define AMD_PG_SUPPORT_GFX_DMG (1 << 2) | ||
| 111 | #define AMD_PG_SUPPORT_UVD (1 << 3) | ||
| 112 | #define AMD_PG_SUPPORT_VCE (1 << 4) | ||
| 113 | #define AMD_PG_SUPPORT_CP (1 << 5) | ||
| 114 | #define AMD_PG_SUPPORT_GDS (1 << 6) | ||
| 115 | #define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7) | ||
| 116 | #define AMD_PG_SUPPORT_SDMA (1 << 8) | ||
| 117 | #define AMD_PG_SUPPORT_ACP (1 << 9) | ||
| 118 | #define AMD_PG_SUPPORT_SAMU (1 << 10) | ||
| 119 | |||
| 88 | enum amd_pm_state_type { | 120 | enum amd_pm_state_type { |
| 89 | /* not used for dpm */ | 121 | /* not used for dpm */ |
| 90 | POWER_STATE_TYPE_DEFAULT, | 122 | POWER_STATE_TYPE_DEFAULT, |
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 713aec954692..aec38fc3834f 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h | |||
| @@ -109,6 +109,8 @@ enum cgs_system_info_id { | |||
| 109 | CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, | 109 | CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, |
| 110 | CGS_SYSTEM_INFO_PCIE_GEN_INFO, | 110 | CGS_SYSTEM_INFO_PCIE_GEN_INFO, |
| 111 | CGS_SYSTEM_INFO_PCIE_MLW, | 111 | CGS_SYSTEM_INFO_PCIE_MLW, |
| 112 | CGS_SYSTEM_INFO_CG_FLAGS, | ||
| 113 | CGS_SYSTEM_INFO_PG_FLAGS, | ||
| 112 | CGS_SYSTEM_INFO_ID_MAXIMUM, | 114 | CGS_SYSTEM_INFO_ID_MAXIMUM, |
| 113 | }; | 115 | }; |
| 114 | 116 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 0874ab42ee95..cf01177ca3b5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | |||
| @@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
| 174 | { | 174 | { |
| 175 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 175 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
| 176 | uint32_t i; | 176 | uint32_t i; |
| 177 | struct cgs_system_info sys_info = {0}; | ||
| 178 | int result; | ||
| 177 | 179 | ||
| 178 | cz_hwmgr->gfx_ramp_step = 256*25/100; | 180 | cz_hwmgr->gfx_ramp_step = 256*25/100; |
| 179 | 181 | ||
| @@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
| 247 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 249 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 248 | PHM_PlatformCaps_DisableVoltageIsland); | 250 | PHM_PlatformCaps_DisableVoltageIsland); |
| 249 | 251 | ||
| 252 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | ||
| 253 | PHM_PlatformCaps_UVDPowerGating); | ||
| 254 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | ||
| 255 | PHM_PlatformCaps_VCEPowerGating); | ||
| 256 | sys_info.size = sizeof(struct cgs_system_info); | ||
| 257 | sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; | ||
| 258 | result = cgs_query_system_info(hwmgr->device, &sys_info); | ||
| 259 | if (!result) { | ||
| 260 | if (sys_info.value & AMD_PG_SUPPORT_UVD) | ||
| 261 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
| 262 | PHM_PlatformCaps_UVDPowerGating); | ||
| 263 | if (sys_info.value & AMD_PG_SUPPORT_VCE) | ||
| 264 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
| 265 | PHM_PlatformCaps_VCEPowerGating); | ||
| 266 | } | ||
| 267 | |||
| 250 | return 0; | 268 | return 0; |
| 251 | } | 269 | } |
| 252 | 270 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 44a925006479..980d3bf8ea76 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | |||
| @@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 4451 | pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; | 4451 | pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; |
| 4452 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); | 4452 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 4453 | phw_tonga_ulv_parm *ulv; | 4453 | phw_tonga_ulv_parm *ulv; |
| 4454 | struct cgs_system_info sys_info = {0}; | ||
| 4454 | 4455 | ||
| 4455 | PP_ASSERT_WITH_CODE((NULL != hwmgr), | 4456 | PP_ASSERT_WITH_CODE((NULL != hwmgr), |
| 4456 | "Invalid Parameter!", return -1;); | 4457 | "Invalid Parameter!", return -1;); |
| @@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 4615 | 4616 | ||
| 4616 | data->vddc_phase_shed_control = 0; | 4617 | data->vddc_phase_shed_control = 0; |
| 4617 | 4618 | ||
| 4618 | if (0 == result) { | 4619 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
| 4619 | struct cgs_system_info sys_info = {0}; | 4620 | PHM_PlatformCaps_UVDPowerGating); |
| 4621 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | ||
| 4622 | PHM_PlatformCaps_VCEPowerGating); | ||
| 4623 | sys_info.size = sizeof(struct cgs_system_info); | ||
| 4624 | sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; | ||
| 4625 | result = cgs_query_system_info(hwmgr->device, &sys_info); | ||
| 4626 | if (!result) { | ||
| 4627 | if (sys_info.value & AMD_PG_SUPPORT_UVD) | ||
| 4628 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
| 4629 | PHM_PlatformCaps_UVDPowerGating); | ||
| 4630 | if (sys_info.value & AMD_PG_SUPPORT_VCE) | ||
| 4631 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
| 4632 | PHM_PlatformCaps_VCEPowerGating); | ||
| 4633 | } | ||
| 4620 | 4634 | ||
| 4635 | if (0 == result) { | ||
| 4621 | data->is_tlu_enabled = 0; | 4636 | data->is_tlu_enabled = 0; |
| 4622 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = | 4637 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = |
| 4623 | TONGA_MAX_HARDWARE_POWERLEVELS; | 4638 | TONGA_MAX_HARDWARE_POWERLEVELS; |
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index c507896aca45..197b157b73d0 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c | |||
| @@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev, | |||
| 349 | /* see if we can skip over some allocations */ | 349 | /* see if we can skip over some allocations */ |
| 350 | } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); | 350 | } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); |
| 351 | 351 | ||
| 352 | for (i = 0; i < RADEON_NUM_RINGS; ++i) | ||
| 353 | radeon_fence_ref(fences[i]); | ||
| 354 | |||
| 352 | spin_unlock(&sa_manager->wq.lock); | 355 | spin_unlock(&sa_manager->wq.lock); |
| 353 | r = radeon_fence_wait_any(rdev, fences, false); | 356 | r = radeon_fence_wait_any(rdev, fences, false); |
| 357 | for (i = 0; i < RADEON_NUM_RINGS; ++i) | ||
| 358 | radeon_fence_unref(&fences[i]); | ||
| 354 | spin_lock(&sa_manager->wq.lock); | 359 | spin_lock(&sa_manager->wq.lock); |
| 355 | /* if we have nothing to wait for block */ | 360 | /* if we have nothing to wait for block */ |
| 356 | if (r == -ENOENT) { | 361 | if (r == -ENOENT) { |
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 1161d68a1863..56dd261f7142 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
| @@ -219,6 +219,21 @@ error0: | |||
| 219 | } | 219 | } |
| 220 | EXPORT_SYMBOL_GPL(vmbus_open); | 220 | EXPORT_SYMBOL_GPL(vmbus_open); |
| 221 | 221 | ||
| 222 | /* Used for Hyper-V Socket: a guest client's connect() to the host */ | ||
| 223 | int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, | ||
| 224 | const uuid_le *shv_host_servie_id) | ||
| 225 | { | ||
| 226 | struct vmbus_channel_tl_connect_request conn_msg; | ||
| 227 | |||
| 228 | memset(&conn_msg, 0, sizeof(conn_msg)); | ||
| 229 | conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST; | ||
| 230 | conn_msg.guest_endpoint_id = *shv_guest_servie_id; | ||
| 231 | conn_msg.host_service_id = *shv_host_servie_id; | ||
| 232 | |||
| 233 | return vmbus_post_msg(&conn_msg, sizeof(conn_msg)); | ||
| 234 | } | ||
| 235 | EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request); | ||
| 236 | |||
| 222 | /* | 237 | /* |
| 223 | * create_gpadl_header - Creates a gpadl for the specified buffer | 238 | * create_gpadl_header - Creates a gpadl for the specified buffer |
| 224 | */ | 239 | */ |
| @@ -624,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, | |||
| 624 | u64 aligned_data = 0; | 639 | u64 aligned_data = 0; |
| 625 | int ret; | 640 | int ret; |
| 626 | bool signal = false; | 641 | bool signal = false; |
| 642 | bool lock = channel->acquire_ring_lock; | ||
| 627 | int num_vecs = ((bufferlen != 0) ? 3 : 1); | 643 | int num_vecs = ((bufferlen != 0) ? 3 : 1); |
| 628 | 644 | ||
| 629 | 645 | ||
| @@ -643,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, | |||
| 643 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); | 659 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); |
| 644 | 660 | ||
| 645 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, | 661 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, |
| 646 | &signal); | 662 | &signal, lock); |
| 647 | 663 | ||
| 648 | /* | 664 | /* |
| 649 | * Signalling the host is conditional on many factors: | 665 | * Signalling the host is conditional on many factors: |
| @@ -659,6 +675,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, | |||
| 659 | * If we cannot write to the ring-buffer; signal the host | 675 | * If we cannot write to the ring-buffer; signal the host |
| 660 | * even if we may not have written anything. This is a rare | 676 | * even if we may not have written anything. This is a rare |
| 661 | * enough condition that it should not matter. | 677 | * enough condition that it should not matter. |
| 678 | * NOTE: in this case, the hvsock channel is an exception, because | ||
| 679 | * it looks the host side's hvsock implementation has a throttling | ||
| 680 | * mechanism which can hurt the performance otherwise. | ||
| 662 | */ | 681 | */ |
| 663 | 682 | ||
| 664 | if (channel->signal_policy) | 683 | if (channel->signal_policy) |
| @@ -666,7 +685,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, | |||
| 666 | else | 685 | else |
| 667 | kick_q = true; | 686 | kick_q = true; |
| 668 | 687 | ||
| 669 | if (((ret == 0) && kick_q && signal) || (ret)) | 688 | if (((ret == 0) && kick_q && signal) || |
| 689 | (ret && !is_hvsock_channel(channel))) | ||
| 670 | vmbus_setevent(channel); | 690 | vmbus_setevent(channel); |
| 671 | 691 | ||
| 672 | return ret; | 692 | return ret; |
| @@ -719,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, | |||
| 719 | struct kvec bufferlist[3]; | 739 | struct kvec bufferlist[3]; |
| 720 | u64 aligned_data = 0; | 740 | u64 aligned_data = 0; |
| 721 | bool signal = false; | 741 | bool signal = false; |
| 742 | bool lock = channel->acquire_ring_lock; | ||
| 722 | 743 | ||
| 723 | if (pagecount > MAX_PAGE_BUFFER_COUNT) | 744 | if (pagecount > MAX_PAGE_BUFFER_COUNT) |
| 724 | return -EINVAL; | 745 | return -EINVAL; |
| @@ -755,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, | |||
| 755 | bufferlist[2].iov_base = &aligned_data; | 776 | bufferlist[2].iov_base = &aligned_data; |
| 756 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); | 777 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); |
| 757 | 778 | ||
| 758 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); | 779 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, |
| 780 | &signal, lock); | ||
| 759 | 781 | ||
| 760 | /* | 782 | /* |
| 761 | * Signalling the host is conditional on many factors: | 783 | * Signalling the host is conditional on many factors: |
| @@ -818,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, | |||
| 818 | struct kvec bufferlist[3]; | 840 | struct kvec bufferlist[3]; |
| 819 | u64 aligned_data = 0; | 841 | u64 aligned_data = 0; |
| 820 | bool signal = false; | 842 | bool signal = false; |
| 843 | bool lock = channel->acquire_ring_lock; | ||
| 821 | 844 | ||
| 822 | packetlen = desc_size + bufferlen; | 845 | packetlen = desc_size + bufferlen; |
| 823 | packetlen_aligned = ALIGN(packetlen, sizeof(u64)); | 846 | packetlen_aligned = ALIGN(packetlen, sizeof(u64)); |
| @@ -837,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, | |||
| 837 | bufferlist[2].iov_base = &aligned_data; | 860 | bufferlist[2].iov_base = &aligned_data; |
| 838 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); | 861 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); |
| 839 | 862 | ||
| 840 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); | 863 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, |
| 864 | &signal, lock); | ||
| 841 | 865 | ||
| 842 | if (ret == 0 && signal) | 866 | if (ret == 0 && signal) |
| 843 | vmbus_setevent(channel); | 867 | vmbus_setevent(channel); |
| @@ -862,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, | |||
| 862 | struct kvec bufferlist[3]; | 886 | struct kvec bufferlist[3]; |
| 863 | u64 aligned_data = 0; | 887 | u64 aligned_data = 0; |
| 864 | bool signal = false; | 888 | bool signal = false; |
| 889 | bool lock = channel->acquire_ring_lock; | ||
| 865 | u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, | 890 | u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, |
| 866 | multi_pagebuffer->len); | 891 | multi_pagebuffer->len); |
| 867 | 892 | ||
| @@ -900,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, | |||
| 900 | bufferlist[2].iov_base = &aligned_data; | 925 | bufferlist[2].iov_base = &aligned_data; |
| 901 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); | 926 | bufferlist[2].iov_len = (packetlen_aligned - packetlen); |
| 902 | 927 | ||
| 903 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); | 928 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, |
| 929 | &signal, lock); | ||
| 904 | 930 | ||
| 905 | if (ret == 0 && signal) | 931 | if (ret == 0 && signal) |
| 906 | vmbus_setevent(channel); | 932 | vmbus_setevent(channel); |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 1c1ad47042c5..b40f429aaa13 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
| @@ -28,12 +28,127 @@ | |||
| 28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
| 29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 30 | #include <linux/completion.h> | 30 | #include <linux/completion.h> |
| 31 | #include <linux/delay.h> | ||
| 31 | #include <linux/hyperv.h> | 32 | #include <linux/hyperv.h> |
| 32 | 33 | ||
| 33 | #include "hyperv_vmbus.h" | 34 | #include "hyperv_vmbus.h" |
| 34 | 35 | ||
| 35 | static void init_vp_index(struct vmbus_channel *channel, | 36 | static void init_vp_index(struct vmbus_channel *channel, u16 dev_type); |
| 36 | const uuid_le *type_guid); | 37 | |
| 38 | static const struct vmbus_device vmbus_devs[] = { | ||
| 39 | /* IDE */ | ||
| 40 | { .dev_type = HV_IDE, | ||
| 41 | HV_IDE_GUID, | ||
| 42 | .perf_device = true, | ||
| 43 | }, | ||
| 44 | |||
| 45 | /* SCSI */ | ||
| 46 | { .dev_type = HV_SCSI, | ||
| 47 | HV_SCSI_GUID, | ||
| 48 | .perf_device = true, | ||
| 49 | }, | ||
| 50 | |||
| 51 | /* Fibre Channel */ | ||
| 52 | { .dev_type = HV_FC, | ||
| 53 | HV_SYNTHFC_GUID, | ||
| 54 | .perf_device = true, | ||
| 55 | }, | ||
| 56 | |||
| 57 | /* Synthetic NIC */ | ||
| 58 | { .dev_type = HV_NIC, | ||
| 59 | HV_NIC_GUID, | ||
| 60 | .perf_device = true, | ||
| 61 | }, | ||
| 62 | |||
| 63 | /* Network Direct */ | ||
| 64 | { .dev_type = HV_ND, | ||
| 65 | HV_ND_GUID, | ||
| 66 | .perf_device = true, | ||
| 67 | }, | ||
| 68 | |||
| 69 | /* PCIE */ | ||
| 70 | { .dev_type = HV_PCIE, | ||
| 71 | HV_PCIE_GUID, | ||
| 72 | .perf_device = true, | ||
| 73 | }, | ||
| 74 | |||
| 75 | /* Synthetic Frame Buffer */ | ||
| 76 | { .dev_type = HV_FB, | ||
| 77 | HV_SYNTHVID_GUID, | ||
| 78 | .perf_device = false, | ||
| 79 | }, | ||
| 80 | |||
| 81 | /* Synthetic Keyboard */ | ||
| 82 | { .dev_type = HV_KBD, | ||
| 83 | HV_KBD_GUID, | ||
| 84 | .perf_device = false, | ||
| 85 | }, | ||
| 86 | |||
| 87 | /* Synthetic MOUSE */ | ||
| 88 | { .dev_type = HV_MOUSE, | ||
| 89 | HV_MOUSE_GUID, | ||
| 90 | .perf_device = false, | ||
| 91 | }, | ||
| 92 | |||
| 93 | /* KVP */ | ||
| 94 | { .dev_type = HV_KVP, | ||
| 95 | HV_KVP_GUID, | ||
| 96 | .perf_device = false, | ||
| 97 | }, | ||
| 98 | |||
| 99 | /* Time Synch */ | ||
| 100 | { .dev_type = HV_TS, | ||
| 101 | HV_TS_GUID, | ||
| 102 | .perf_device = false, | ||
| 103 | }, | ||
| 104 | |||
| 105 | /* Heartbeat */ | ||
| 106 | { .dev_type = HV_HB, | ||
| 107 | HV_HEART_BEAT_GUID, | ||
| 108 | .perf_device = false, | ||
| 109 | }, | ||
| 110 | |||
| 111 | /* Shutdown */ | ||
| 112 | { .dev_type = HV_SHUTDOWN, | ||
| 113 | HV_SHUTDOWN_GUID, | ||
| 114 | .perf_device = false, | ||
| 115 | }, | ||
| 116 | |||
| 117 | /* File copy */ | ||
| 118 | { .dev_type = HV_FCOPY, | ||
| 119 | HV_FCOPY_GUID, | ||
| 120 | .perf_device = false, | ||
| 121 | }, | ||
| 122 | |||
| 123 | /* Backup */ | ||
| 124 | { .dev_type = HV_BACKUP, | ||
| 125 | HV_VSS_GUID, | ||
| 126 | .perf_device = false, | ||
| 127 | }, | ||
| 128 | |||
| 129 | /* Dynamic Memory */ | ||
| 130 | { .dev_type = HV_DM, | ||
| 131 | HV_DM_GUID, | ||
| 132 | .perf_device = false, | ||
| 133 | }, | ||
| 134 | |||
| 135 | /* Unknown GUID */ | ||
| 136 | { .dev_type = HV_UNKOWN, | ||
| 137 | .perf_device = false, | ||
| 138 | }, | ||
| 139 | }; | ||
| 140 | |||
| 141 | static u16 hv_get_dev_type(const uuid_le *guid) | ||
| 142 | { | ||
| 143 | u16 i; | ||
| 144 | |||
| 145 | for (i = HV_IDE; i < HV_UNKOWN; i++) { | ||
| 146 | if (!uuid_le_cmp(*guid, vmbus_devs[i].guid)) | ||
| 147 | return i; | ||
| 148 | } | ||
| 149 | pr_info("Unknown GUID: %pUl\n", guid); | ||
| 150 | return i; | ||
| 151 | } | ||
| 37 | 152 | ||
| 38 | /** | 153 | /** |
| 39 | * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message | 154 | * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message |
| @@ -144,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void) | |||
| 144 | return NULL; | 259 | return NULL; |
| 145 | 260 | ||
| 146 | channel->id = atomic_inc_return(&chan_num); | 261 | channel->id = atomic_inc_return(&chan_num); |
| 262 | channel->acquire_ring_lock = true; | ||
| 147 | spin_lock_init(&channel->inbound_lock); | 263 | spin_lock_init(&channel->inbound_lock); |
| 148 | spin_lock_init(&channel->lock); | 264 | spin_lock_init(&channel->lock); |
| 149 | 265 | ||
| @@ -195,6 +311,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | |||
| 195 | vmbus_release_relid(relid); | 311 | vmbus_release_relid(relid); |
| 196 | 312 | ||
| 197 | BUG_ON(!channel->rescind); | 313 | BUG_ON(!channel->rescind); |
| 314 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); | ||
| 198 | 315 | ||
| 199 | if (channel->target_cpu != get_cpu()) { | 316 | if (channel->target_cpu != get_cpu()) { |
| 200 | put_cpu(); | 317 | put_cpu(); |
| @@ -206,9 +323,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | |||
| 206 | } | 323 | } |
| 207 | 324 | ||
| 208 | if (channel->primary_channel == NULL) { | 325 | if (channel->primary_channel == NULL) { |
| 209 | mutex_lock(&vmbus_connection.channel_mutex); | ||
| 210 | list_del(&channel->listentry); | 326 | list_del(&channel->listentry); |
| 211 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 212 | 327 | ||
| 213 | primary_channel = channel; | 328 | primary_channel = channel; |
| 214 | } else { | 329 | } else { |
| @@ -251,6 +366,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
| 251 | struct vmbus_channel *channel; | 366 | struct vmbus_channel *channel; |
| 252 | bool fnew = true; | 367 | bool fnew = true; |
| 253 | unsigned long flags; | 368 | unsigned long flags; |
| 369 | u16 dev_type; | ||
| 370 | int ret; | ||
| 254 | 371 | ||
| 255 | /* Make sure this is a new offer */ | 372 | /* Make sure this is a new offer */ |
| 256 | mutex_lock(&vmbus_connection.channel_mutex); | 373 | mutex_lock(&vmbus_connection.channel_mutex); |
| @@ -288,7 +405,9 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
| 288 | goto err_free_chan; | 405 | goto err_free_chan; |
| 289 | } | 406 | } |
| 290 | 407 | ||
| 291 | init_vp_index(newchannel, &newchannel->offermsg.offer.if_type); | 408 | dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type); |
| 409 | |||
| 410 | init_vp_index(newchannel, dev_type); | ||
| 292 | 411 | ||
| 293 | if (newchannel->target_cpu != get_cpu()) { | 412 | if (newchannel->target_cpu != get_cpu()) { |
| 294 | put_cpu(); | 413 | put_cpu(); |
| @@ -325,12 +444,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
| 325 | if (!newchannel->device_obj) | 444 | if (!newchannel->device_obj) |
| 326 | goto err_deq_chan; | 445 | goto err_deq_chan; |
| 327 | 446 | ||
| 447 | newchannel->device_obj->device_id = dev_type; | ||
| 328 | /* | 448 | /* |
| 329 | * Add the new device to the bus. This will kick off device-driver | 449 | * Add the new device to the bus. This will kick off device-driver |
| 330 | * binding which eventually invokes the device driver's AddDevice() | 450 | * binding which eventually invokes the device driver's AddDevice() |
| 331 | * method. | 451 | * method. |
| 332 | */ | 452 | */ |
| 333 | if (vmbus_device_register(newchannel->device_obj) != 0) { | 453 | mutex_lock(&vmbus_connection.channel_mutex); |
| 454 | ret = vmbus_device_register(newchannel->device_obj); | ||
| 455 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 456 | |||
| 457 | if (ret != 0) { | ||
| 334 | pr_err("unable to add child device object (relid %d)\n", | 458 | pr_err("unable to add child device object (relid %d)\n", |
| 335 | newchannel->offermsg.child_relid); | 459 | newchannel->offermsg.child_relid); |
| 336 | kfree(newchannel->device_obj); | 460 | kfree(newchannel->device_obj); |
| @@ -358,37 +482,6 @@ err_free_chan: | |||
| 358 | free_channel(newchannel); | 482 | free_channel(newchannel); |
| 359 | } | 483 | } |
| 360 | 484 | ||
| 361 | enum { | ||
| 362 | IDE = 0, | ||
| 363 | SCSI, | ||
| 364 | FC, | ||
| 365 | NIC, | ||
| 366 | ND_NIC, | ||
| 367 | PCIE, | ||
| 368 | MAX_PERF_CHN, | ||
| 369 | }; | ||
| 370 | |||
| 371 | /* | ||
| 372 | * This is an array of device_ids (device types) that are performance critical. | ||
| 373 | * We attempt to distribute the interrupt load for these devices across | ||
| 374 | * all available CPUs. | ||
| 375 | */ | ||
| 376 | static const struct hv_vmbus_device_id hp_devs[] = { | ||
| 377 | /* IDE */ | ||
| 378 | { HV_IDE_GUID, }, | ||
| 379 | /* Storage - SCSI */ | ||
| 380 | { HV_SCSI_GUID, }, | ||
| 381 | /* Storage - FC */ | ||
| 382 | { HV_SYNTHFC_GUID, }, | ||
| 383 | /* Network */ | ||
| 384 | { HV_NIC_GUID, }, | ||
| 385 | /* NetworkDirect Guest RDMA */ | ||
| 386 | { HV_ND_GUID, }, | ||
| 387 | /* PCI Express Pass Through */ | ||
| 388 | { HV_PCIE_GUID, }, | ||
| 389 | }; | ||
| 390 | |||
| 391 | |||
| 392 | /* | 485 | /* |
| 393 | * We use this state to statically distribute the channel interrupt load. | 486 | * We use this state to statically distribute the channel interrupt load. |
| 394 | */ | 487 | */ |
| @@ -405,22 +498,15 @@ static int next_numa_node_id; | |||
| 405 | * For pre-win8 hosts or non-performance critical channels we assign the | 498 | * For pre-win8 hosts or non-performance critical channels we assign the |
| 406 | * first CPU in the first NUMA node. | 499 | * first CPU in the first NUMA node. |
| 407 | */ | 500 | */ |
| 408 | static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid) | 501 | static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) |
| 409 | { | 502 | { |
| 410 | u32 cur_cpu; | 503 | u32 cur_cpu; |
| 411 | int i; | 504 | bool perf_chn = vmbus_devs[dev_type].perf_device; |
| 412 | bool perf_chn = false; | ||
| 413 | struct vmbus_channel *primary = channel->primary_channel; | 505 | struct vmbus_channel *primary = channel->primary_channel; |
| 414 | int next_node; | 506 | int next_node; |
| 415 | struct cpumask available_mask; | 507 | struct cpumask available_mask; |
| 416 | struct cpumask *alloced_mask; | 508 | struct cpumask *alloced_mask; |
| 417 | 509 | ||
| 418 | for (i = IDE; i < MAX_PERF_CHN; i++) { | ||
| 419 | if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) { | ||
| 420 | perf_chn = true; | ||
| 421 | break; | ||
| 422 | } | ||
| 423 | } | ||
| 424 | if ((vmbus_proto_version == VERSION_WS2008) || | 510 | if ((vmbus_proto_version == VERSION_WS2008) || |
| 425 | (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { | 511 | (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) { |
| 426 | /* | 512 | /* |
| @@ -469,6 +555,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui | |||
| 469 | cpumask_of_node(primary->numa_node)); | 555 | cpumask_of_node(primary->numa_node)); |
| 470 | 556 | ||
| 471 | cur_cpu = -1; | 557 | cur_cpu = -1; |
| 558 | |||
| 559 | /* | ||
| 560 | * Normally Hyper-V host doesn't create more subchannels than there | ||
| 561 | * are VCPUs on the node but it is possible when not all present VCPUs | ||
| 562 | * on the node are initialized by guest. Clear the alloced_cpus_in_node | ||
| 563 | * to start over. | ||
| 564 | */ | ||
| 565 | if (cpumask_equal(&primary->alloced_cpus_in_node, | ||
| 566 | cpumask_of_node(primary->numa_node))) | ||
| 567 | cpumask_clear(&primary->alloced_cpus_in_node); | ||
| 568 | |||
| 472 | while (true) { | 569 | while (true) { |
| 473 | cur_cpu = cpumask_next(cur_cpu, &available_mask); | 570 | cur_cpu = cpumask_next(cur_cpu, &available_mask); |
| 474 | if (cur_cpu >= nr_cpu_ids) { | 571 | if (cur_cpu >= nr_cpu_ids) { |
| @@ -498,6 +595,40 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui | |||
| 498 | channel->target_vp = hv_context.vp_index[cur_cpu]; | 595 | channel->target_vp = hv_context.vp_index[cur_cpu]; |
| 499 | } | 596 | } |
| 500 | 597 | ||
| 598 | static void vmbus_wait_for_unload(void) | ||
| 599 | { | ||
| 600 | int cpu = smp_processor_id(); | ||
| 601 | void *page_addr = hv_context.synic_message_page[cpu]; | ||
| 602 | struct hv_message *msg = (struct hv_message *)page_addr + | ||
| 603 | VMBUS_MESSAGE_SINT; | ||
| 604 | struct vmbus_channel_message_header *hdr; | ||
| 605 | bool unloaded = false; | ||
| 606 | |||
| 607 | while (1) { | ||
| 608 | if (msg->header.message_type == HVMSG_NONE) { | ||
| 609 | mdelay(10); | ||
| 610 | continue; | ||
| 611 | } | ||
| 612 | |||
| 613 | hdr = (struct vmbus_channel_message_header *)msg->u.payload; | ||
| 614 | if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE) | ||
| 615 | unloaded = true; | ||
| 616 | |||
| 617 | msg->header.message_type = HVMSG_NONE; | ||
| 618 | /* | ||
| 619 | * header.message_type needs to be written before we do | ||
| 620 | * wrmsrl() below. | ||
| 621 | */ | ||
| 622 | mb(); | ||
| 623 | |||
| 624 | if (msg->header.message_flags.msg_pending) | ||
| 625 | wrmsrl(HV_X64_MSR_EOM, 0); | ||
| 626 | |||
| 627 | if (unloaded) | ||
| 628 | break; | ||
| 629 | } | ||
| 630 | } | ||
| 631 | |||
| 501 | /* | 632 | /* |
| 502 | * vmbus_unload_response - Handler for the unload response. | 633 | * vmbus_unload_response - Handler for the unload response. |
| 503 | */ | 634 | */ |
| @@ -523,7 +654,14 @@ void vmbus_initiate_unload(void) | |||
| 523 | hdr.msgtype = CHANNELMSG_UNLOAD; | 654 | hdr.msgtype = CHANNELMSG_UNLOAD; |
| 524 | vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); | 655 | vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header)); |
| 525 | 656 | ||
| 526 | wait_for_completion(&vmbus_connection.unload_event); | 657 | /* |
| 658 | * vmbus_initiate_unload() is also called on crash and the crash can be | ||
| 659 | * happening in an interrupt context, where scheduling is impossible. | ||
| 660 | */ | ||
| 661 | if (!in_interrupt()) | ||
| 662 | wait_for_completion(&vmbus_connection.unload_event); | ||
| 663 | else | ||
| 664 | vmbus_wait_for_unload(); | ||
| 527 | } | 665 | } |
| 528 | 666 | ||
| 529 | /* | 667 | /* |
| @@ -592,6 +730,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 592 | struct device *dev; | 730 | struct device *dev; |
| 593 | 731 | ||
| 594 | rescind = (struct vmbus_channel_rescind_offer *)hdr; | 732 | rescind = (struct vmbus_channel_rescind_offer *)hdr; |
| 733 | |||
| 734 | mutex_lock(&vmbus_connection.channel_mutex); | ||
| 595 | channel = relid2channel(rescind->child_relid); | 735 | channel = relid2channel(rescind->child_relid); |
| 596 | 736 | ||
| 597 | if (channel == NULL) { | 737 | if (channel == NULL) { |
| @@ -600,7 +740,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 600 | * vmbus_process_offer(), we have already invoked | 740 | * vmbus_process_offer(), we have already invoked |
| 601 | * vmbus_release_relid() on error. | 741 | * vmbus_release_relid() on error. |
| 602 | */ | 742 | */ |
| 603 | return; | 743 | goto out; |
| 604 | } | 744 | } |
| 605 | 745 | ||
| 606 | spin_lock_irqsave(&channel->lock, flags); | 746 | spin_lock_irqsave(&channel->lock, flags); |
| @@ -608,6 +748,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 608 | spin_unlock_irqrestore(&channel->lock, flags); | 748 | spin_unlock_irqrestore(&channel->lock, flags); |
| 609 | 749 | ||
| 610 | if (channel->device_obj) { | 750 | if (channel->device_obj) { |
| 751 | if (channel->chn_rescind_callback) { | ||
| 752 | channel->chn_rescind_callback(channel); | ||
| 753 | goto out; | ||
| 754 | } | ||
| 611 | /* | 755 | /* |
| 612 | * We will have to unregister this device from the | 756 | * We will have to unregister this device from the |
| 613 | * driver core. | 757 | * driver core. |
| @@ -621,8 +765,25 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
| 621 | hv_process_channel_removal(channel, | 765 | hv_process_channel_removal(channel, |
| 622 | channel->offermsg.child_relid); | 766 | channel->offermsg.child_relid); |
| 623 | } | 767 | } |
| 768 | |||
| 769 | out: | ||
| 770 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 624 | } | 771 | } |
| 625 | 772 | ||
| 773 | void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) | ||
| 774 | { | ||
| 775 | mutex_lock(&vmbus_connection.channel_mutex); | ||
| 776 | |||
| 777 | BUG_ON(!is_hvsock_channel(channel)); | ||
| 778 | |||
| 779 | channel->rescind = true; | ||
| 780 | vmbus_device_unregister(channel->device_obj); | ||
| 781 | |||
| 782 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 783 | } | ||
| 784 | EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); | ||
| 785 | |||
| 786 | |||
| 626 | /* | 787 | /* |
| 627 | * vmbus_onoffers_delivered - | 788 | * vmbus_onoffers_delivered - |
| 628 | * This is invoked when all offers have been delivered. | 789 | * This is invoked when all offers have been delivered. |
| @@ -825,6 +986,10 @@ struct vmbus_channel_message_table_entry | |||
| 825 | {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response}, | 986 | {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response}, |
| 826 | {CHANNELMSG_UNLOAD, 0, NULL}, | 987 | {CHANNELMSG_UNLOAD, 0, NULL}, |
| 827 | {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response}, | 988 | {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response}, |
| 989 | {CHANNELMSG_18, 0, NULL}, | ||
| 990 | {CHANNELMSG_19, 0, NULL}, | ||
| 991 | {CHANNELMSG_20, 0, NULL}, | ||
| 992 | {CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL}, | ||
| 828 | }; | 993 | }; |
| 829 | 994 | ||
| 830 | /* | 995 | /* |
| @@ -973,3 +1138,10 @@ bool vmbus_are_subchannels_present(struct vmbus_channel *primary) | |||
| 973 | return ret; | 1138 | return ret; |
| 974 | } | 1139 | } |
| 975 | EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present); | 1140 | EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present); |
| 1141 | |||
| 1142 | void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, | ||
| 1143 | void (*chn_rescind_cb)(struct vmbus_channel *)) | ||
| 1144 | { | ||
| 1145 | channel->chn_rescind_callback = chn_rescind_cb; | ||
| 1146 | } | ||
| 1147 | EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback); | ||
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 3dc5a9c7fad6..fa86b2cb28b8 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
| @@ -288,7 +288,8 @@ struct vmbus_channel *relid2channel(u32 relid) | |||
| 288 | struct list_head *cur, *tmp; | 288 | struct list_head *cur, *tmp; |
| 289 | struct vmbus_channel *cur_sc; | 289 | struct vmbus_channel *cur_sc; |
| 290 | 290 | ||
| 291 | mutex_lock(&vmbus_connection.channel_mutex); | 291 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); |
| 292 | |||
| 292 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { | 293 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { |
| 293 | if (channel->offermsg.child_relid == relid) { | 294 | if (channel->offermsg.child_relid == relid) { |
| 294 | found_channel = channel; | 295 | found_channel = channel; |
| @@ -307,7 +308,6 @@ struct vmbus_channel *relid2channel(u32 relid) | |||
| 307 | } | 308 | } |
| 308 | } | 309 | } |
| 309 | } | 310 | } |
| 310 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
| 311 | 311 | ||
| 312 | return found_channel; | 312 | return found_channel; |
| 313 | } | 313 | } |
| @@ -474,7 +474,7 @@ int vmbus_post_msg(void *buffer, size_t buflen) | |||
| 474 | /* | 474 | /* |
| 475 | * vmbus_set_event - Send an event notification to the parent | 475 | * vmbus_set_event - Send an event notification to the parent |
| 476 | */ | 476 | */ |
| 477 | int vmbus_set_event(struct vmbus_channel *channel) | 477 | void vmbus_set_event(struct vmbus_channel *channel) |
| 478 | { | 478 | { |
| 479 | u32 child_relid = channel->offermsg.child_relid; | 479 | u32 child_relid = channel->offermsg.child_relid; |
| 480 | 480 | ||
| @@ -485,5 +485,5 @@ int vmbus_set_event(struct vmbus_channel *channel) | |||
| 485 | (child_relid >> 5)); | 485 | (child_relid >> 5)); |
| 486 | } | 486 | } |
| 487 | 487 | ||
| 488 | return hv_signal_event(channel->sig_event); | 488 | hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL); |
| 489 | } | 489 | } |
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 11bca51ef5ff..ccb335f57c88 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c | |||
| @@ -295,8 +295,14 @@ void hv_cleanup(void) | |||
| 295 | * Cleanup the TSC page based CS. | 295 | * Cleanup the TSC page based CS. |
| 296 | */ | 296 | */ |
| 297 | if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { | 297 | if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) { |
| 298 | clocksource_change_rating(&hyperv_cs_tsc, 10); | 298 | /* |
| 299 | clocksource_unregister(&hyperv_cs_tsc); | 299 | * Crash can happen in an interrupt context and unregistering |
| 300 | * a clocksource is impossible and redundant in this case. | ||
| 301 | */ | ||
| 302 | if (!oops_in_progress) { | ||
| 303 | clocksource_change_rating(&hyperv_cs_tsc, 10); | ||
| 304 | clocksource_unregister(&hyperv_cs_tsc); | ||
| 305 | } | ||
| 300 | 306 | ||
| 301 | hypercall_msr.as_uint64 = 0; | 307 | hypercall_msr.as_uint64 = 0; |
| 302 | wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); | 308 | wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64); |
| @@ -337,22 +343,6 @@ int hv_post_message(union hv_connection_id connection_id, | |||
| 337 | return status & 0xFFFF; | 343 | return status & 0xFFFF; |
| 338 | } | 344 | } |
| 339 | 345 | ||
| 340 | |||
| 341 | /* | ||
| 342 | * hv_signal_event - | ||
| 343 | * Signal an event on the specified connection using the hypervisor event IPC. | ||
| 344 | * | ||
| 345 | * This involves a hypercall. | ||
| 346 | */ | ||
| 347 | int hv_signal_event(void *con_id) | ||
| 348 | { | ||
| 349 | u64 status; | ||
| 350 | |||
| 351 | status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL); | ||
| 352 | |||
| 353 | return status & 0xFFFF; | ||
| 354 | } | ||
| 355 | |||
| 356 | static int hv_ce_set_next_event(unsigned long delta, | 346 | static int hv_ce_set_next_event(unsigned long delta, |
| 357 | struct clock_event_device *evt) | 347 | struct clock_event_device *evt) |
| 358 | { | 348 | { |
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 4ebc796b4f33..b9ea7f59036b 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h | |||
| @@ -501,8 +501,6 @@ extern int hv_post_message(union hv_connection_id connection_id, | |||
| 501 | enum hv_message_type message_type, | 501 | enum hv_message_type message_type, |
| 502 | void *payload, size_t payload_size); | 502 | void *payload, size_t payload_size); |
| 503 | 503 | ||
| 504 | extern int hv_signal_event(void *con_id); | ||
| 505 | |||
| 506 | extern int hv_synic_alloc(void); | 504 | extern int hv_synic_alloc(void); |
| 507 | 505 | ||
| 508 | extern void hv_synic_free(void); | 506 | extern void hv_synic_free(void); |
| @@ -531,7 +529,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); | |||
| 531 | 529 | ||
| 532 | int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, | 530 | int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, |
| 533 | struct kvec *kv_list, | 531 | struct kvec *kv_list, |
| 534 | u32 kv_count, bool *signal); | 532 | u32 kv_count, bool *signal, bool lock); |
| 535 | 533 | ||
| 536 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | 534 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, |
| 537 | void *buffer, u32 buflen, u32 *buffer_actual_len, | 535 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
| @@ -650,7 +648,7 @@ void vmbus_disconnect(void); | |||
| 650 | 648 | ||
| 651 | int vmbus_post_msg(void *buffer, size_t buflen); | 649 | int vmbus_post_msg(void *buffer, size_t buflen); |
| 652 | 650 | ||
| 653 | int vmbus_set_event(struct vmbus_channel *channel); | 651 | void vmbus_set_event(struct vmbus_channel *channel); |
| 654 | 652 | ||
| 655 | void vmbus_on_event(unsigned long data); | 653 | void vmbus_on_event(unsigned long data); |
| 656 | 654 | ||
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index b53702ce692f..5613e2b5cff7 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
| @@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | |||
| 314 | 314 | ||
| 315 | /* Write to the ring buffer. */ | 315 | /* Write to the ring buffer. */ |
| 316 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | 316 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
| 317 | struct kvec *kv_list, u32 kv_count, bool *signal) | 317 | struct kvec *kv_list, u32 kv_count, bool *signal, bool lock) |
| 318 | { | 318 | { |
| 319 | int i = 0; | 319 | int i = 0; |
| 320 | u32 bytes_avail_towrite; | 320 | u32 bytes_avail_towrite; |
| @@ -324,14 +324,15 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 324 | u32 next_write_location; | 324 | u32 next_write_location; |
| 325 | u32 old_write; | 325 | u32 old_write; |
| 326 | u64 prev_indices = 0; | 326 | u64 prev_indices = 0; |
| 327 | unsigned long flags; | 327 | unsigned long flags = 0; |
| 328 | 328 | ||
| 329 | for (i = 0; i < kv_count; i++) | 329 | for (i = 0; i < kv_count; i++) |
| 330 | totalbytes_towrite += kv_list[i].iov_len; | 330 | totalbytes_towrite += kv_list[i].iov_len; |
| 331 | 331 | ||
| 332 | totalbytes_towrite += sizeof(u64); | 332 | totalbytes_towrite += sizeof(u64); |
| 333 | 333 | ||
| 334 | spin_lock_irqsave(&outring_info->ring_lock, flags); | 334 | if (lock) |
| 335 | spin_lock_irqsave(&outring_info->ring_lock, flags); | ||
| 335 | 336 | ||
| 336 | hv_get_ringbuffer_availbytes(outring_info, | 337 | hv_get_ringbuffer_availbytes(outring_info, |
| 337 | &bytes_avail_toread, | 338 | &bytes_avail_toread, |
| @@ -343,7 +344,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 343 | * is empty since the read index == write index. | 344 | * is empty since the read index == write index. |
| 344 | */ | 345 | */ |
| 345 | if (bytes_avail_towrite <= totalbytes_towrite) { | 346 | if (bytes_avail_towrite <= totalbytes_towrite) { |
| 346 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | 347 | if (lock) |
| 348 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | ||
| 347 | return -EAGAIN; | 349 | return -EAGAIN; |
| 348 | } | 350 | } |
| 349 | 351 | ||
| @@ -374,7 +376,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 374 | hv_set_next_write_location(outring_info, next_write_location); | 376 | hv_set_next_write_location(outring_info, next_write_location); |
| 375 | 377 | ||
| 376 | 378 | ||
| 377 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | 379 | if (lock) |
| 380 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | ||
| 378 | 381 | ||
| 379 | *signal = hv_need_to_signal(old_write, outring_info); | 382 | *signal = hv_need_to_signal(old_write, outring_info); |
| 380 | return 0; | 383 | return 0; |
| @@ -388,7 +391,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
| 388 | u32 bytes_avail_toread; | 391 | u32 bytes_avail_toread; |
| 389 | u32 next_read_location = 0; | 392 | u32 next_read_location = 0; |
| 390 | u64 prev_indices = 0; | 393 | u64 prev_indices = 0; |
| 391 | unsigned long flags; | ||
| 392 | struct vmpacket_descriptor desc; | 394 | struct vmpacket_descriptor desc; |
| 393 | u32 offset; | 395 | u32 offset; |
| 394 | u32 packetlen; | 396 | u32 packetlen; |
| @@ -397,7 +399,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
| 397 | if (buflen <= 0) | 399 | if (buflen <= 0) |
| 398 | return -EINVAL; | 400 | return -EINVAL; |
| 399 | 401 | ||
| 400 | spin_lock_irqsave(&inring_info->ring_lock, flags); | ||
| 401 | 402 | ||
| 402 | *buffer_actual_len = 0; | 403 | *buffer_actual_len = 0; |
| 403 | *requestid = 0; | 404 | *requestid = 0; |
| @@ -412,7 +413,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
| 412 | * No error is set when there is even no header, drivers are | 413 | * No error is set when there is even no header, drivers are |
| 413 | * supposed to analyze buffer_actual_len. | 414 | * supposed to analyze buffer_actual_len. |
| 414 | */ | 415 | */ |
| 415 | goto out_unlock; | 416 | return ret; |
| 416 | } | 417 | } |
| 417 | 418 | ||
| 418 | next_read_location = hv_get_next_read_location(inring_info); | 419 | next_read_location = hv_get_next_read_location(inring_info); |
| @@ -425,15 +426,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
| 425 | *buffer_actual_len = packetlen; | 426 | *buffer_actual_len = packetlen; |
| 426 | *requestid = desc.trans_id; | 427 | *requestid = desc.trans_id; |
| 427 | 428 | ||
| 428 | if (bytes_avail_toread < packetlen + offset) { | 429 | if (bytes_avail_toread < packetlen + offset) |
| 429 | ret = -EAGAIN; | 430 | return -EAGAIN; |
| 430 | goto out_unlock; | ||
| 431 | } | ||
| 432 | 431 | ||
| 433 | if (packetlen > buflen) { | 432 | if (packetlen > buflen) |
| 434 | ret = -ENOBUFS; | 433 | return -ENOBUFS; |
| 435 | goto out_unlock; | ||
| 436 | } | ||
| 437 | 434 | ||
| 438 | next_read_location = | 435 | next_read_location = |
| 439 | hv_get_next_readlocation_withoffset(inring_info, offset); | 436 | hv_get_next_readlocation_withoffset(inring_info, offset); |
| @@ -460,7 +457,5 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
| 460 | 457 | ||
| 461 | *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); | 458 | *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info); |
| 462 | 459 | ||
| 463 | out_unlock: | ||
| 464 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); | ||
| 465 | return ret; | 460 | return ret; |
| 466 | } | 461 | } |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 328e4c3808e0..063e5f53ca78 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
| @@ -477,6 +477,24 @@ static ssize_t channel_vp_mapping_show(struct device *dev, | |||
| 477 | } | 477 | } |
| 478 | static DEVICE_ATTR_RO(channel_vp_mapping); | 478 | static DEVICE_ATTR_RO(channel_vp_mapping); |
| 479 | 479 | ||
| 480 | static ssize_t vendor_show(struct device *dev, | ||
| 481 | struct device_attribute *dev_attr, | ||
| 482 | char *buf) | ||
| 483 | { | ||
| 484 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
| 485 | return sprintf(buf, "0x%x\n", hv_dev->vendor_id); | ||
| 486 | } | ||
| 487 | static DEVICE_ATTR_RO(vendor); | ||
| 488 | |||
| 489 | static ssize_t device_show(struct device *dev, | ||
| 490 | struct device_attribute *dev_attr, | ||
| 491 | char *buf) | ||
| 492 | { | ||
| 493 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
| 494 | return sprintf(buf, "0x%x\n", hv_dev->device_id); | ||
| 495 | } | ||
| 496 | static DEVICE_ATTR_RO(device); | ||
| 497 | |||
| 480 | /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ | 498 | /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ |
| 481 | static struct attribute *vmbus_attrs[] = { | 499 | static struct attribute *vmbus_attrs[] = { |
| 482 | &dev_attr_id.attr, | 500 | &dev_attr_id.attr, |
| @@ -502,6 +520,8 @@ static struct attribute *vmbus_attrs[] = { | |||
| 502 | &dev_attr_in_read_bytes_avail.attr, | 520 | &dev_attr_in_read_bytes_avail.attr, |
| 503 | &dev_attr_in_write_bytes_avail.attr, | 521 | &dev_attr_in_write_bytes_avail.attr, |
| 504 | &dev_attr_channel_vp_mapping.attr, | 522 | &dev_attr_channel_vp_mapping.attr, |
| 523 | &dev_attr_vendor.attr, | ||
| 524 | &dev_attr_device.attr, | ||
| 505 | NULL, | 525 | NULL, |
| 506 | }; | 526 | }; |
| 507 | ATTRIBUTE_GROUPS(vmbus); | 527 | ATTRIBUTE_GROUPS(vmbus); |
| @@ -562,6 +582,10 @@ static int vmbus_match(struct device *device, struct device_driver *driver) | |||
| 562 | struct hv_driver *drv = drv_to_hv_drv(driver); | 582 | struct hv_driver *drv = drv_to_hv_drv(driver); |
| 563 | struct hv_device *hv_dev = device_to_hv_device(device); | 583 | struct hv_device *hv_dev = device_to_hv_device(device); |
| 564 | 584 | ||
| 585 | /* The hv_sock driver handles all hv_sock offers. */ | ||
| 586 | if (is_hvsock_channel(hv_dev->channel)) | ||
| 587 | return drv->hvsock; | ||
| 588 | |||
| 565 | if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type)) | 589 | if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type)) |
| 566 | return 1; | 590 | return 1; |
| 567 | 591 | ||
| @@ -957,6 +981,7 @@ struct hv_device *vmbus_device_create(const uuid_le *type, | |||
| 957 | memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); | 981 | memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); |
| 958 | memcpy(&child_device_obj->dev_instance, instance, | 982 | memcpy(&child_device_obj->dev_instance, instance, |
| 959 | sizeof(uuid_le)); | 983 | sizeof(uuid_le)); |
| 984 | child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */ | ||
| 960 | 985 | ||
| 961 | 986 | ||
| 962 | return child_device_obj; | 987 | return child_device_obj; |
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index c85935f3525a..db0541031c72 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | menuconfig CORESIGHT | 4 | menuconfig CORESIGHT |
| 5 | bool "CoreSight Tracing Support" | 5 | bool "CoreSight Tracing Support" |
| 6 | select ARM_AMBA | 6 | select ARM_AMBA |
| 7 | select PERF_EVENTS | ||
| 7 | help | 8 | help |
| 8 | This framework provides a kernel interface for the CoreSight debug | 9 | This framework provides a kernel interface for the CoreSight debug |
| 9 | and trace drivers to register themselves with. It's intended to build | 10 | and trace drivers to register themselves with. It's intended to build |
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index 99f8e5f6256e..cf8c6d689747 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile | |||
| @@ -8,6 +8,8 @@ obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o | |||
| 8 | obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o | 8 | obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o |
| 9 | obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ | 9 | obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ |
| 10 | coresight-replicator.o | 10 | coresight-replicator.o |
| 11 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o | 11 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \ |
| 12 | coresight-etm3x-sysfs.o \ | ||
| 13 | coresight-etm-perf.o | ||
| 12 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o | 14 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o |
| 13 | obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o | 15 | obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o |
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 77d0f9c1118d..acbce79934d6 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
| 2 | * | 2 | * |
| 3 | * Description: CoreSight Embedded Trace Buffer driver | ||
| 4 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
| @@ -10,8 +12,8 @@ | |||
| 10 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
| 11 | */ | 13 | */ |
| 12 | 14 | ||
| 15 | #include <asm/local.h> | ||
| 13 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 16 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 17 | #include <linux/device.h> | 19 | #include <linux/device.h> |
| @@ -27,6 +29,11 @@ | |||
| 27 | #include <linux/coresight.h> | 29 | #include <linux/coresight.h> |
| 28 | #include <linux/amba/bus.h> | 30 | #include <linux/amba/bus.h> |
| 29 | #include <linux/clk.h> | 31 | #include <linux/clk.h> |
| 32 | #include <linux/circ_buf.h> | ||
| 33 | #include <linux/mm.h> | ||
| 34 | #include <linux/perf_event.h> | ||
| 35 | |||
| 36 | #include <asm/local.h> | ||
| 30 | 37 | ||
| 31 | #include "coresight-priv.h" | 38 | #include "coresight-priv.h" |
| 32 | 39 | ||
| @@ -64,6 +71,26 @@ | |||
| 64 | #define ETB_FRAME_SIZE_WORDS 4 | 71 | #define ETB_FRAME_SIZE_WORDS 4 |
| 65 | 72 | ||
| 66 | /** | 73 | /** |
| 74 | * struct cs_buffer - keep track of a recording session' specifics | ||
| 75 | * @cur: index of the current buffer | ||
| 76 | * @nr_pages: max number of pages granted to us | ||
| 77 | * @offset: offset within the current buffer | ||
| 78 | * @data_size: how much we collected in this run | ||
| 79 | * @lost: other than zero if we had a HW buffer wrap around | ||
| 80 | * @snapshot: is this run in snapshot mode | ||
| 81 | * @data_pages: a handle the ring buffer | ||
| 82 | */ | ||
| 83 | struct cs_buffers { | ||
| 84 | unsigned int cur; | ||
| 85 | unsigned int nr_pages; | ||
| 86 | unsigned long offset; | ||
| 87 | local_t data_size; | ||
| 88 | local_t lost; | ||
| 89 | bool snapshot; | ||
| 90 | void **data_pages; | ||
| 91 | }; | ||
| 92 | |||
| 93 | /** | ||
| 67 | * struct etb_drvdata - specifics associated to an ETB component | 94 | * struct etb_drvdata - specifics associated to an ETB component |
| 68 | * @base: memory mapped base address for this component. | 95 | * @base: memory mapped base address for this component. |
| 69 | * @dev: the device entity associated to this component. | 96 | * @dev: the device entity associated to this component. |
| @@ -71,10 +98,10 @@ | |||
| 71 | * @csdev: component vitals needed by the framework. | 98 | * @csdev: component vitals needed by the framework. |
| 72 | * @miscdev: specifics to handle "/dev/xyz.etb" entry. | 99 | * @miscdev: specifics to handle "/dev/xyz.etb" entry. |
| 73 | * @spinlock: only one at a time pls. | 100 | * @spinlock: only one at a time pls. |
| 74 | * @in_use: synchronise user space access to etb buffer. | 101 | * @reading: synchronise user space access to etb buffer. |
| 102 | * @mode: this ETB is being used. | ||
| 75 | * @buf: area of memory where ETB buffer content gets sent. | 103 | * @buf: area of memory where ETB buffer content gets sent. |
| 76 | * @buffer_depth: size of @buf. | 104 | * @buffer_depth: size of @buf. |
| 77 | * @enable: this ETB is being used. | ||
| 78 | * @trigger_cntr: amount of words to store after a trigger. | 105 | * @trigger_cntr: amount of words to store after a trigger. |
| 79 | */ | 106 | */ |
| 80 | struct etb_drvdata { | 107 | struct etb_drvdata { |
| @@ -84,10 +111,10 @@ struct etb_drvdata { | |||
| 84 | struct coresight_device *csdev; | 111 | struct coresight_device *csdev; |
| 85 | struct miscdevice miscdev; | 112 | struct miscdevice miscdev; |
| 86 | spinlock_t spinlock; | 113 | spinlock_t spinlock; |
| 87 | atomic_t in_use; | 114 | local_t reading; |
| 115 | local_t mode; | ||
| 88 | u8 *buf; | 116 | u8 *buf; |
| 89 | u32 buffer_depth; | 117 | u32 buffer_depth; |
| 90 | bool enable; | ||
| 91 | u32 trigger_cntr; | 118 | u32 trigger_cntr; |
| 92 | }; | 119 | }; |
| 93 | 120 | ||
| @@ -132,18 +159,31 @@ static void etb_enable_hw(struct etb_drvdata *drvdata) | |||
| 132 | CS_LOCK(drvdata->base); | 159 | CS_LOCK(drvdata->base); |
| 133 | } | 160 | } |
| 134 | 161 | ||
| 135 | static int etb_enable(struct coresight_device *csdev) | 162 | static int etb_enable(struct coresight_device *csdev, u32 mode) |
| 136 | { | 163 | { |
| 137 | struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 164 | u32 val; |
| 138 | unsigned long flags; | 165 | unsigned long flags; |
| 166 | struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 139 | 167 | ||
| 140 | pm_runtime_get_sync(drvdata->dev); | 168 | val = local_cmpxchg(&drvdata->mode, |
| 169 | CS_MODE_DISABLED, mode); | ||
| 170 | /* | ||
| 171 | * When accessing from Perf, a HW buffer can be handled | ||
| 172 | * by a single trace entity. In sysFS mode many tracers | ||
| 173 | * can be logging to the same HW buffer. | ||
| 174 | */ | ||
| 175 | if (val == CS_MODE_PERF) | ||
| 176 | return -EBUSY; | ||
| 177 | |||
| 178 | /* Nothing to do, the tracer is already enabled. */ | ||
| 179 | if (val == CS_MODE_SYSFS) | ||
| 180 | goto out; | ||
| 141 | 181 | ||
| 142 | spin_lock_irqsave(&drvdata->spinlock, flags); | 182 | spin_lock_irqsave(&drvdata->spinlock, flags); |
| 143 | etb_enable_hw(drvdata); | 183 | etb_enable_hw(drvdata); |
| 144 | drvdata->enable = true; | ||
| 145 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 184 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
| 146 | 185 | ||
| 186 | out: | ||
| 147 | dev_info(drvdata->dev, "ETB enabled\n"); | 187 | dev_info(drvdata->dev, "ETB enabled\n"); |
| 148 | return 0; | 188 | return 0; |
| 149 | } | 189 | } |
| @@ -244,17 +284,225 @@ static void etb_disable(struct coresight_device *csdev) | |||
| 244 | spin_lock_irqsave(&drvdata->spinlock, flags); | 284 | spin_lock_irqsave(&drvdata->spinlock, flags); |
| 245 | etb_disable_hw(drvdata); | 285 | etb_disable_hw(drvdata); |
| 246 | etb_dump_hw(drvdata); | 286 | etb_dump_hw(drvdata); |
| 247 | drvdata->enable = false; | ||
| 248 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 287 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
| 249 | 288 | ||
| 250 | pm_runtime_put(drvdata->dev); | 289 | local_set(&drvdata->mode, CS_MODE_DISABLED); |
| 251 | 290 | ||
| 252 | dev_info(drvdata->dev, "ETB disabled\n"); | 291 | dev_info(drvdata->dev, "ETB disabled\n"); |
| 253 | } | 292 | } |
| 254 | 293 | ||
| 294 | static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu, | ||
| 295 | void **pages, int nr_pages, bool overwrite) | ||
| 296 | { | ||
| 297 | int node; | ||
| 298 | struct cs_buffers *buf; | ||
| 299 | |||
| 300 | if (cpu == -1) | ||
| 301 | cpu = smp_processor_id(); | ||
| 302 | node = cpu_to_node(cpu); | ||
| 303 | |||
| 304 | buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); | ||
| 305 | if (!buf) | ||
| 306 | return NULL; | ||
| 307 | |||
| 308 | buf->snapshot = overwrite; | ||
| 309 | buf->nr_pages = nr_pages; | ||
| 310 | buf->data_pages = pages; | ||
| 311 | |||
| 312 | return buf; | ||
| 313 | } | ||
| 314 | |||
| 315 | static void etb_free_buffer(void *config) | ||
| 316 | { | ||
| 317 | struct cs_buffers *buf = config; | ||
| 318 | |||
| 319 | kfree(buf); | ||
| 320 | } | ||
| 321 | |||
| 322 | static int etb_set_buffer(struct coresight_device *csdev, | ||
| 323 | struct perf_output_handle *handle, | ||
| 324 | void *sink_config) | ||
| 325 | { | ||
| 326 | int ret = 0; | ||
| 327 | unsigned long head; | ||
| 328 | struct cs_buffers *buf = sink_config; | ||
| 329 | |||
| 330 | /* wrap head around to the amount of space we have */ | ||
| 331 | head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); | ||
| 332 | |||
| 333 | /* find the page to write to */ | ||
| 334 | buf->cur = head / PAGE_SIZE; | ||
| 335 | |||
| 336 | /* and offset within that page */ | ||
| 337 | buf->offset = head % PAGE_SIZE; | ||
| 338 | |||
| 339 | local_set(&buf->data_size, 0); | ||
| 340 | |||
| 341 | return ret; | ||
| 342 | } | ||
| 343 | |||
| 344 | static unsigned long etb_reset_buffer(struct coresight_device *csdev, | ||
| 345 | struct perf_output_handle *handle, | ||
| 346 | void *sink_config, bool *lost) | ||
| 347 | { | ||
| 348 | unsigned long size = 0; | ||
| 349 | struct cs_buffers *buf = sink_config; | ||
| 350 | |||
| 351 | if (buf) { | ||
| 352 | /* | ||
| 353 | * In snapshot mode ->data_size holds the new address of the | ||
| 354 | * ring buffer's head. The size itself is the whole address | ||
| 355 | * range since we want the latest information. | ||
| 356 | */ | ||
| 357 | if (buf->snapshot) | ||
| 358 | handle->head = local_xchg(&buf->data_size, | ||
| 359 | buf->nr_pages << PAGE_SHIFT); | ||
| 360 | |||
| 361 | /* | ||
| 362 | * Tell the tracer PMU how much we got in this run and if | ||
| 363 | * something went wrong along the way. Nobody else can use | ||
| 364 | * this cs_buffers instance until we are done. As such | ||
| 365 | * resetting parameters here and squaring off with the ring | ||
| 366 | * buffer API in the tracer PMU is fine. | ||
| 367 | */ | ||
| 368 | *lost = !!local_xchg(&buf->lost, 0); | ||
| 369 | size = local_xchg(&buf->data_size, 0); | ||
| 370 | } | ||
| 371 | |||
| 372 | return size; | ||
| 373 | } | ||
| 374 | |||
| 375 | static void etb_update_buffer(struct coresight_device *csdev, | ||
| 376 | struct perf_output_handle *handle, | ||
| 377 | void *sink_config) | ||
| 378 | { | ||
| 379 | int i, cur; | ||
| 380 | u8 *buf_ptr; | ||
| 381 | u32 read_ptr, write_ptr, capacity; | ||
| 382 | u32 status, read_data, to_read; | ||
| 383 | unsigned long offset; | ||
| 384 | struct cs_buffers *buf = sink_config; | ||
| 385 | struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 386 | |||
| 387 | if (!buf) | ||
| 388 | return; | ||
| 389 | |||
| 390 | capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS; | ||
| 391 | |||
| 392 | CS_UNLOCK(drvdata->base); | ||
| 393 | etb_disable_hw(drvdata); | ||
| 394 | |||
| 395 | /* unit is in words, not bytes */ | ||
| 396 | read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); | ||
| 397 | write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); | ||
| 398 | |||
| 399 | /* | ||
| 400 | * Entries should be aligned to the frame size. If they are not | ||
| 401 | * go back to the last alignement point to give decoding tools a | ||
| 402 | * chance to fix things. | ||
| 403 | */ | ||
| 404 | if (write_ptr % ETB_FRAME_SIZE_WORDS) { | ||
| 405 | dev_err(drvdata->dev, | ||
| 406 | "write_ptr: %lu not aligned to formatter frame size\n", | ||
| 407 | (unsigned long)write_ptr); | ||
| 408 | |||
| 409 | write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1); | ||
| 410 | local_inc(&buf->lost); | ||
| 411 | } | ||
| 412 | |||
| 413 | /* | ||
| 414 | * Get a hold of the status register and see if a wrap around | ||
| 415 | * has occurred. If so adjust things accordingly. Otherwise | ||
| 416 | * start at the beginning and go until the write pointer has | ||
| 417 | * been reached. | ||
| 418 | */ | ||
| 419 | status = readl_relaxed(drvdata->base + ETB_STATUS_REG); | ||
| 420 | if (status & ETB_STATUS_RAM_FULL) { | ||
| 421 | local_inc(&buf->lost); | ||
| 422 | to_read = capacity; | ||
| 423 | read_ptr = write_ptr; | ||
| 424 | } else { | ||
| 425 | to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth); | ||
| 426 | to_read *= ETB_FRAME_SIZE_WORDS; | ||
| 427 | } | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Make sure we don't overwrite data that hasn't been consumed yet. | ||
| 431 | * It is entirely possible that the HW buffer has more data than the | ||
| 432 | * ring buffer can currently handle. If so adjust the start address | ||
| 433 | * to take only the last traces. | ||
| 434 | * | ||
| 435 | * In snapshot mode we are looking to get the latest traces only and as | ||
| 436 | * such, we don't care about not overwriting data that hasn't been | ||
| 437 | * processed by user space. | ||
| 438 | */ | ||
| 439 | if (!buf->snapshot && to_read > handle->size) { | ||
| 440 | u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1); | ||
| 441 | |||
| 442 | /* The new read pointer must be frame size aligned */ | ||
| 443 | to_read -= handle->size & mask; | ||
| 444 | /* | ||
| 445 | * Move the RAM read pointer up, keeping in mind that | ||
| 446 | * everything is in frame size units. | ||
| 447 | */ | ||
| 448 | read_ptr = (write_ptr + drvdata->buffer_depth) - | ||
| 449 | to_read / ETB_FRAME_SIZE_WORDS; | ||
| 450 | /* Wrap around if need be*/ | ||
| 451 | read_ptr &= ~(drvdata->buffer_depth - 1); | ||
| 452 | /* let the decoder know we've skipped ahead */ | ||
| 453 | local_inc(&buf->lost); | ||
| 454 | } | ||
| 455 | |||
| 456 | /* finally tell HW where we want to start reading from */ | ||
| 457 | writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER); | ||
| 458 | |||
| 459 | cur = buf->cur; | ||
| 460 | offset = buf->offset; | ||
| 461 | for (i = 0; i < to_read; i += 4) { | ||
| 462 | buf_ptr = buf->data_pages[cur] + offset; | ||
| 463 | read_data = readl_relaxed(drvdata->base + | ||
| 464 | ETB_RAM_READ_DATA_REG); | ||
| 465 | *buf_ptr++ = read_data >> 0; | ||
| 466 | *buf_ptr++ = read_data >> 8; | ||
| 467 | *buf_ptr++ = read_data >> 16; | ||
| 468 | *buf_ptr++ = read_data >> 24; | ||
| 469 | |||
| 470 | offset += 4; | ||
| 471 | if (offset >= PAGE_SIZE) { | ||
| 472 | offset = 0; | ||
| 473 | cur++; | ||
| 474 | /* wrap around at the end of the buffer */ | ||
| 475 | cur &= buf->nr_pages - 1; | ||
| 476 | } | ||
| 477 | } | ||
| 478 | |||
| 479 | /* reset ETB buffer for next run */ | ||
| 480 | writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER); | ||
| 481 | writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER); | ||
| 482 | |||
| 483 | /* | ||
| 484 | * In snapshot mode all we have to do is communicate to | ||
| 485 | * perf_aux_output_end() the address of the current head. In full | ||
| 486 | * trace mode the same function expects a size to move rb->aux_head | ||
| 487 | * forward. | ||
| 488 | */ | ||
| 489 | if (buf->snapshot) | ||
| 490 | local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); | ||
| 491 | else | ||
| 492 | local_add(to_read, &buf->data_size); | ||
| 493 | |||
| 494 | etb_enable_hw(drvdata); | ||
| 495 | CS_LOCK(drvdata->base); | ||
| 496 | } | ||
| 497 | |||
| 255 | static const struct coresight_ops_sink etb_sink_ops = { | 498 | static const struct coresight_ops_sink etb_sink_ops = { |
| 256 | .enable = etb_enable, | 499 | .enable = etb_enable, |
| 257 | .disable = etb_disable, | 500 | .disable = etb_disable, |
| 501 | .alloc_buffer = etb_alloc_buffer, | ||
| 502 | .free_buffer = etb_free_buffer, | ||
| 503 | .set_buffer = etb_set_buffer, | ||
| 504 | .reset_buffer = etb_reset_buffer, | ||
| 505 | .update_buffer = etb_update_buffer, | ||
| 258 | }; | 506 | }; |
| 259 | 507 | ||
| 260 | static const struct coresight_ops etb_cs_ops = { | 508 | static const struct coresight_ops etb_cs_ops = { |
| @@ -266,7 +514,7 @@ static void etb_dump(struct etb_drvdata *drvdata) | |||
| 266 | unsigned long flags; | 514 | unsigned long flags; |
| 267 | 515 | ||
| 268 | spin_lock_irqsave(&drvdata->spinlock, flags); | 516 | spin_lock_irqsave(&drvdata->spinlock, flags); |
| 269 | if (drvdata->enable) { | 517 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { |
| 270 | etb_disable_hw(drvdata); | 518 | etb_disable_hw(drvdata); |
| 271 | etb_dump_hw(drvdata); | 519 | etb_dump_hw(drvdata); |
| 272 | etb_enable_hw(drvdata); | 520 | etb_enable_hw(drvdata); |
| @@ -281,7 +529,7 @@ static int etb_open(struct inode *inode, struct file *file) | |||
| 281 | struct etb_drvdata *drvdata = container_of(file->private_data, | 529 | struct etb_drvdata *drvdata = container_of(file->private_data, |
| 282 | struct etb_drvdata, miscdev); | 530 | struct etb_drvdata, miscdev); |
| 283 | 531 | ||
| 284 | if (atomic_cmpxchg(&drvdata->in_use, 0, 1)) | 532 | if (local_cmpxchg(&drvdata->reading, 0, 1)) |
| 285 | return -EBUSY; | 533 | return -EBUSY; |
| 286 | 534 | ||
| 287 | dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); | 535 | dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); |
| @@ -317,7 +565,7 @@ static int etb_release(struct inode *inode, struct file *file) | |||
| 317 | { | 565 | { |
| 318 | struct etb_drvdata *drvdata = container_of(file->private_data, | 566 | struct etb_drvdata *drvdata = container_of(file->private_data, |
| 319 | struct etb_drvdata, miscdev); | 567 | struct etb_drvdata, miscdev); |
| 320 | atomic_set(&drvdata->in_use, 0); | 568 | local_set(&drvdata->reading, 0); |
| 321 | 569 | ||
| 322 | dev_dbg(drvdata->dev, "%s: released\n", __func__); | 570 | dev_dbg(drvdata->dev, "%s: released\n", __func__); |
| 323 | return 0; | 571 | return 0; |
| @@ -489,15 +737,6 @@ err_misc_register: | |||
| 489 | return ret; | 737 | return ret; |
| 490 | } | 738 | } |
| 491 | 739 | ||
| 492 | static int etb_remove(struct amba_device *adev) | ||
| 493 | { | ||
| 494 | struct etb_drvdata *drvdata = amba_get_drvdata(adev); | ||
| 495 | |||
| 496 | misc_deregister(&drvdata->miscdev); | ||
| 497 | coresight_unregister(drvdata->csdev); | ||
| 498 | return 0; | ||
| 499 | } | ||
| 500 | |||
| 501 | #ifdef CONFIG_PM | 740 | #ifdef CONFIG_PM |
| 502 | static int etb_runtime_suspend(struct device *dev) | 741 | static int etb_runtime_suspend(struct device *dev) |
| 503 | { | 742 | { |
| @@ -537,14 +776,10 @@ static struct amba_driver etb_driver = { | |||
| 537 | .name = "coresight-etb10", | 776 | .name = "coresight-etb10", |
| 538 | .owner = THIS_MODULE, | 777 | .owner = THIS_MODULE, |
| 539 | .pm = &etb_dev_pm_ops, | 778 | .pm = &etb_dev_pm_ops, |
| 779 | .suppress_bind_attrs = true, | ||
| 540 | 780 | ||
| 541 | }, | 781 | }, |
| 542 | .probe = etb_probe, | 782 | .probe = etb_probe, |
| 543 | .remove = etb_remove, | ||
| 544 | .id_table = etb_ids, | 783 | .id_table = etb_ids, |
| 545 | }; | 784 | }; |
| 546 | 785 | builtin_amba_driver(etb_driver); | |
| 547 | module_amba_driver(etb_driver); | ||
| 548 | |||
| 549 | MODULE_LICENSE("GPL v2"); | ||
| 550 | MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c new file mode 100644 index 000000000000..36153a77e982 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c | |||
| @@ -0,0 +1,393 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
| 3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/coresight.h> | ||
| 19 | #include <linux/coresight-pmu.h> | ||
| 20 | #include <linux/cpumask.h> | ||
| 21 | #include <linux/device.h> | ||
| 22 | #include <linux/list.h> | ||
| 23 | #include <linux/mm.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/perf_event.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | #include <linux/types.h> | ||
| 28 | #include <linux/workqueue.h> | ||
| 29 | |||
| 30 | #include "coresight-priv.h" | ||
| 31 | |||
| 32 | static struct pmu etm_pmu; | ||
| 33 | static bool etm_perf_up; | ||
| 34 | |||
| 35 | /** | ||
| 36 | * struct etm_event_data - Coresight specifics associated to an event | ||
| 37 | * @work: Handle to free allocated memory outside IRQ context. | ||
| 38 | * @mask: Hold the CPU(s) this event was set for. | ||
| 39 | * @snk_config: The sink configuration. | ||
| 40 | * @path: An array of path, each slot for one CPU. | ||
| 41 | */ | ||
| 42 | struct etm_event_data { | ||
| 43 | struct work_struct work; | ||
| 44 | cpumask_t mask; | ||
| 45 | void *snk_config; | ||
| 46 | struct list_head **path; | ||
| 47 | }; | ||
| 48 | |||
| 49 | static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); | ||
| 50 | static DEFINE_PER_CPU(struct coresight_device *, csdev_src); | ||
| 51 | |||
| 52 | /* ETMv3.5/PTM's ETMCR is 'config' */ | ||
| 53 | PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); | ||
| 54 | PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); | ||
| 55 | |||
| 56 | static struct attribute *etm_config_formats_attr[] = { | ||
| 57 | &format_attr_cycacc.attr, | ||
| 58 | &format_attr_timestamp.attr, | ||
| 59 | NULL, | ||
| 60 | }; | ||
| 61 | |||
| 62 | static struct attribute_group etm_pmu_format_group = { | ||
| 63 | .name = "format", | ||
| 64 | .attrs = etm_config_formats_attr, | ||
| 65 | }; | ||
| 66 | |||
| 67 | static const struct attribute_group *etm_pmu_attr_groups[] = { | ||
| 68 | &etm_pmu_format_group, | ||
| 69 | NULL, | ||
| 70 | }; | ||
| 71 | |||
| 72 | static void etm_event_read(struct perf_event *event) {} | ||
| 73 | |||
| 74 | static int etm_event_init(struct perf_event *event) | ||
| 75 | { | ||
| 76 | if (event->attr.type != etm_pmu.type) | ||
| 77 | return -ENOENT; | ||
| 78 | |||
| 79 | return 0; | ||
| 80 | } | ||
| 81 | |||
| 82 | static void free_event_data(struct work_struct *work) | ||
| 83 | { | ||
| 84 | int cpu; | ||
| 85 | cpumask_t *mask; | ||
| 86 | struct etm_event_data *event_data; | ||
| 87 | struct coresight_device *sink; | ||
| 88 | |||
| 89 | event_data = container_of(work, struct etm_event_data, work); | ||
| 90 | mask = &event_data->mask; | ||
| 91 | /* | ||
| 92 | * First deal with the sink configuration. See comment in | ||
| 93 | * etm_setup_aux() about why we take the first available path. | ||
| 94 | */ | ||
| 95 | if (event_data->snk_config) { | ||
| 96 | cpu = cpumask_first(mask); | ||
| 97 | sink = coresight_get_sink(event_data->path[cpu]); | ||
| 98 | if (sink_ops(sink)->free_buffer) | ||
| 99 | sink_ops(sink)->free_buffer(event_data->snk_config); | ||
| 100 | } | ||
| 101 | |||
| 102 | for_each_cpu(cpu, mask) { | ||
| 103 | if (event_data->path[cpu]) | ||
| 104 | coresight_release_path(event_data->path[cpu]); | ||
| 105 | } | ||
| 106 | |||
| 107 | kfree(event_data->path); | ||
| 108 | kfree(event_data); | ||
| 109 | } | ||
| 110 | |||
| 111 | static void *alloc_event_data(int cpu) | ||
| 112 | { | ||
| 113 | int size; | ||
| 114 | cpumask_t *mask; | ||
| 115 | struct etm_event_data *event_data; | ||
| 116 | |||
| 117 | /* First get memory for the session's data */ | ||
| 118 | event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL); | ||
| 119 | if (!event_data) | ||
| 120 | return NULL; | ||
| 121 | |||
| 122 | /* Make sure nothing disappears under us */ | ||
| 123 | get_online_cpus(); | ||
| 124 | size = num_online_cpus(); | ||
| 125 | |||
| 126 | mask = &event_data->mask; | ||
| 127 | if (cpu != -1) | ||
| 128 | cpumask_set_cpu(cpu, mask); | ||
| 129 | else | ||
| 130 | cpumask_copy(mask, cpu_online_mask); | ||
| 131 | put_online_cpus(); | ||
| 132 | |||
| 133 | /* | ||
| 134 | * Each CPU has a single path between source and destination. As such | ||
| 135 | * allocate an array using CPU numbers as indexes. That way a path | ||
| 136 | * for any CPU can easily be accessed at any given time. We proceed | ||
| 137 | * the same way for sessions involving a single CPU. The cost of | ||
| 138 | * unused memory when dealing with single CPU trace scenarios is small | ||
| 139 | * compared to the cost of searching through an optimized array. | ||
| 140 | */ | ||
| 141 | event_data->path = kcalloc(size, | ||
| 142 | sizeof(struct list_head *), GFP_KERNEL); | ||
| 143 | if (!event_data->path) { | ||
| 144 | kfree(event_data); | ||
| 145 | return NULL; | ||
| 146 | } | ||
| 147 | |||
| 148 | return event_data; | ||
| 149 | } | ||
| 150 | |||
| 151 | static void etm_free_aux(void *data) | ||
| 152 | { | ||
| 153 | struct etm_event_data *event_data = data; | ||
| 154 | |||
| 155 | schedule_work(&event_data->work); | ||
| 156 | } | ||
| 157 | |||
| 158 | static void *etm_setup_aux(int event_cpu, void **pages, | ||
| 159 | int nr_pages, bool overwrite) | ||
| 160 | { | ||
| 161 | int cpu; | ||
| 162 | cpumask_t *mask; | ||
| 163 | struct coresight_device *sink; | ||
| 164 | struct etm_event_data *event_data = NULL; | ||
| 165 | |||
| 166 | event_data = alloc_event_data(event_cpu); | ||
| 167 | if (!event_data) | ||
| 168 | return NULL; | ||
| 169 | |||
| 170 | INIT_WORK(&event_data->work, free_event_data); | ||
| 171 | |||
| 172 | mask = &event_data->mask; | ||
| 173 | |||
| 174 | /* Setup the path for each CPU in a trace session */ | ||
| 175 | for_each_cpu(cpu, mask) { | ||
| 176 | struct coresight_device *csdev; | ||
| 177 | |||
| 178 | csdev = per_cpu(csdev_src, cpu); | ||
| 179 | if (!csdev) | ||
| 180 | goto err; | ||
| 181 | |||
| 182 | /* | ||
| 183 | * Building a path doesn't enable it, it simply builds a | ||
| 184 | * list of devices from source to sink that can be | ||
| 185 | * referenced later when the path is actually needed. | ||
| 186 | */ | ||
| 187 | event_data->path[cpu] = coresight_build_path(csdev); | ||
| 188 | if (!event_data->path[cpu]) | ||
| 189 | goto err; | ||
| 190 | } | ||
| 191 | |||
| 192 | /* | ||
| 193 | * In theory nothing prevent tracers in a trace session from being | ||
| 194 | * associated with different sinks, nor having a sink per tracer. But | ||
| 195 | * until we have HW with this kind of topology and a way to convey | ||
| 196 | * sink assignement from the perf cmd line we need to assume tracers | ||
| 197 | * in a trace session are using the same sink. Therefore pick the sink | ||
| 198 | * found at the end of the first available path. | ||
| 199 | */ | ||
| 200 | cpu = cpumask_first(mask); | ||
| 201 | /* Grab the sink at the end of the path */ | ||
| 202 | sink = coresight_get_sink(event_data->path[cpu]); | ||
| 203 | if (!sink) | ||
| 204 | goto err; | ||
| 205 | |||
| 206 | if (!sink_ops(sink)->alloc_buffer) | ||
| 207 | goto err; | ||
| 208 | |||
| 209 | /* Get the AUX specific data from the sink buffer */ | ||
| 210 | event_data->snk_config = | ||
| 211 | sink_ops(sink)->alloc_buffer(sink, cpu, pages, | ||
| 212 | nr_pages, overwrite); | ||
| 213 | if (!event_data->snk_config) | ||
| 214 | goto err; | ||
| 215 | |||
| 216 | out: | ||
| 217 | return event_data; | ||
| 218 | |||
| 219 | err: | ||
| 220 | etm_free_aux(event_data); | ||
| 221 | event_data = NULL; | ||
| 222 | goto out; | ||
| 223 | } | ||
| 224 | |||
| 225 | static void etm_event_start(struct perf_event *event, int flags) | ||
| 226 | { | ||
| 227 | int cpu = smp_processor_id(); | ||
| 228 | struct etm_event_data *event_data; | ||
| 229 | struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); | ||
| 230 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); | ||
| 231 | |||
| 232 | if (!csdev) | ||
| 233 | goto fail; | ||
| 234 | |||
| 235 | /* | ||
| 236 | * Deal with the ring buffer API and get a handle on the | ||
| 237 | * session's information. | ||
| 238 | */ | ||
| 239 | event_data = perf_aux_output_begin(handle, event); | ||
| 240 | if (!event_data) | ||
| 241 | goto fail; | ||
| 242 | |||
| 243 | /* We need a sink, no need to continue without one */ | ||
| 244 | sink = coresight_get_sink(event_data->path[cpu]); | ||
| 245 | if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer)) | ||
| 246 | goto fail_end_stop; | ||
| 247 | |||
| 248 | /* Configure the sink */ | ||
| 249 | if (sink_ops(sink)->set_buffer(sink, handle, | ||
| 250 | event_data->snk_config)) | ||
| 251 | goto fail_end_stop; | ||
| 252 | |||
| 253 | /* Nothing will happen without a path */ | ||
| 254 | if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF)) | ||
| 255 | goto fail_end_stop; | ||
| 256 | |||
| 257 | /* Tell the perf core the event is alive */ | ||
| 258 | event->hw.state = 0; | ||
| 259 | |||
| 260 | /* Finally enable the tracer */ | ||
| 261 | if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF)) | ||
| 262 | goto fail_end_stop; | ||
| 263 | |||
| 264 | out: | ||
| 265 | return; | ||
| 266 | |||
| 267 | fail_end_stop: | ||
| 268 | perf_aux_output_end(handle, 0, true); | ||
| 269 | fail: | ||
| 270 | event->hw.state = PERF_HES_STOPPED; | ||
| 271 | goto out; | ||
| 272 | } | ||
| 273 | |||
| 274 | static void etm_event_stop(struct perf_event *event, int mode) | ||
| 275 | { | ||
| 276 | bool lost; | ||
| 277 | int cpu = smp_processor_id(); | ||
| 278 | unsigned long size; | ||
| 279 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); | ||
| 280 | struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); | ||
| 281 | struct etm_event_data *event_data = perf_get_aux(handle); | ||
| 282 | |||
| 283 | if (event->hw.state == PERF_HES_STOPPED) | ||
| 284 | return; | ||
| 285 | |||
| 286 | if (!csdev) | ||
| 287 | return; | ||
| 288 | |||
| 289 | sink = coresight_get_sink(event_data->path[cpu]); | ||
| 290 | if (!sink) | ||
| 291 | return; | ||
| 292 | |||
| 293 | /* stop tracer */ | ||
| 294 | source_ops(csdev)->disable(csdev); | ||
| 295 | |||
| 296 | /* tell the core */ | ||
| 297 | event->hw.state = PERF_HES_STOPPED; | ||
| 298 | |||
| 299 | if (mode & PERF_EF_UPDATE) { | ||
| 300 | if (WARN_ON_ONCE(handle->event != event)) | ||
| 301 | return; | ||
| 302 | |||
| 303 | /* update trace information */ | ||
| 304 | if (!sink_ops(sink)->update_buffer) | ||
| 305 | return; | ||
| 306 | |||
| 307 | sink_ops(sink)->update_buffer(sink, handle, | ||
| 308 | event_data->snk_config); | ||
| 309 | |||
| 310 | if (!sink_ops(sink)->reset_buffer) | ||
| 311 | return; | ||
| 312 | |||
| 313 | size = sink_ops(sink)->reset_buffer(sink, handle, | ||
| 314 | event_data->snk_config, | ||
| 315 | &lost); | ||
| 316 | |||
| 317 | perf_aux_output_end(handle, size, lost); | ||
| 318 | } | ||
| 319 | |||
| 320 | /* Disabling the path make its elements available to other sessions */ | ||
| 321 | coresight_disable_path(event_data->path[cpu]); | ||
| 322 | } | ||
| 323 | |||
| 324 | static int etm_event_add(struct perf_event *event, int mode) | ||
| 325 | { | ||
| 326 | int ret = 0; | ||
| 327 | struct hw_perf_event *hwc = &event->hw; | ||
| 328 | |||
| 329 | if (mode & PERF_EF_START) { | ||
| 330 | etm_event_start(event, 0); | ||
| 331 | if (hwc->state & PERF_HES_STOPPED) | ||
| 332 | ret = -EINVAL; | ||
| 333 | } else { | ||
| 334 | hwc->state = PERF_HES_STOPPED; | ||
| 335 | } | ||
| 336 | |||
| 337 | return ret; | ||
| 338 | } | ||
| 339 | |||
| 340 | static void etm_event_del(struct perf_event *event, int mode) | ||
| 341 | { | ||
| 342 | etm_event_stop(event, PERF_EF_UPDATE); | ||
| 343 | } | ||
| 344 | |||
| 345 | int etm_perf_symlink(struct coresight_device *csdev, bool link) | ||
| 346 | { | ||
| 347 | char entry[sizeof("cpu9999999")]; | ||
| 348 | int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev); | ||
| 349 | struct device *pmu_dev = etm_pmu.dev; | ||
| 350 | struct device *cs_dev = &csdev->dev; | ||
| 351 | |||
| 352 | sprintf(entry, "cpu%d", cpu); | ||
| 353 | |||
| 354 | if (!etm_perf_up) | ||
| 355 | return -EPROBE_DEFER; | ||
| 356 | |||
| 357 | if (link) { | ||
| 358 | ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry); | ||
| 359 | if (ret) | ||
| 360 | return ret; | ||
| 361 | per_cpu(csdev_src, cpu) = csdev; | ||
| 362 | } else { | ||
| 363 | sysfs_remove_link(&pmu_dev->kobj, entry); | ||
| 364 | per_cpu(csdev_src, cpu) = NULL; | ||
| 365 | } | ||
| 366 | |||
| 367 | return 0; | ||
| 368 | } | ||
| 369 | |||
| 370 | static int __init etm_perf_init(void) | ||
| 371 | { | ||
| 372 | int ret; | ||
| 373 | |||
| 374 | etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE; | ||
| 375 | |||
| 376 | etm_pmu.attr_groups = etm_pmu_attr_groups; | ||
| 377 | etm_pmu.task_ctx_nr = perf_sw_context; | ||
| 378 | etm_pmu.read = etm_event_read; | ||
| 379 | etm_pmu.event_init = etm_event_init; | ||
| 380 | etm_pmu.setup_aux = etm_setup_aux; | ||
| 381 | etm_pmu.free_aux = etm_free_aux; | ||
| 382 | etm_pmu.start = etm_event_start; | ||
| 383 | etm_pmu.stop = etm_event_stop; | ||
| 384 | etm_pmu.add = etm_event_add; | ||
| 385 | etm_pmu.del = etm_event_del; | ||
| 386 | |||
| 387 | ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); | ||
| 388 | if (ret == 0) | ||
| 389 | etm_perf_up = true; | ||
| 390 | |||
| 391 | return ret; | ||
| 392 | } | ||
| 393 | module_init(etm_perf_init); | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h new file mode 100644 index 000000000000..87f5a134eb6f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
| 3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef _CORESIGHT_ETM_PERF_H | ||
| 19 | #define _CORESIGHT_ETM_PERF_H | ||
| 20 | |||
| 21 | struct coresight_device; | ||
| 22 | |||
| 23 | #ifdef CONFIG_CORESIGHT | ||
| 24 | int etm_perf_symlink(struct coresight_device *csdev, bool link); | ||
| 25 | |||
| 26 | #else | ||
| 27 | static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) | ||
| 28 | { return -EINVAL; } | ||
| 29 | |||
| 30 | #endif /* CONFIG_CORESIGHT */ | ||
| 31 | |||
| 32 | #endif | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h index b4481eb29304..51597cb2c08a 100644 --- a/drivers/hwtracing/coresight/coresight-etm.h +++ b/drivers/hwtracing/coresight/coresight-etm.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #ifndef _CORESIGHT_CORESIGHT_ETM_H | 13 | #ifndef _CORESIGHT_CORESIGHT_ETM_H |
| 14 | #define _CORESIGHT_CORESIGHT_ETM_H | 14 | #define _CORESIGHT_CORESIGHT_ETM_H |
| 15 | 15 | ||
| 16 | #include <asm/local.h> | ||
| 16 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
| 17 | #include "coresight-priv.h" | 18 | #include "coresight-priv.h" |
| 18 | 19 | ||
| @@ -109,7 +110,10 @@ | |||
| 109 | #define ETM_MODE_STALL BIT(2) | 110 | #define ETM_MODE_STALL BIT(2) |
| 110 | #define ETM_MODE_TIMESTAMP BIT(3) | 111 | #define ETM_MODE_TIMESTAMP BIT(3) |
| 111 | #define ETM_MODE_CTXID BIT(4) | 112 | #define ETM_MODE_CTXID BIT(4) |
| 112 | #define ETM_MODE_ALL 0x1f | 113 | #define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \ |
| 114 | ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \ | ||
| 115 | ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \ | ||
| 116 | ETM_MODE_EXCL_USER) | ||
| 113 | 117 | ||
| 114 | #define ETM_SQR_MASK 0x3 | 118 | #define ETM_SQR_MASK 0x3 |
| 115 | #define ETM_TRACEID_MASK 0x3f | 119 | #define ETM_TRACEID_MASK 0x3f |
| @@ -136,35 +140,16 @@ | |||
| 136 | #define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \ | 140 | #define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \ |
| 137 | ETM_ADD_COMP_0 | \ | 141 | ETM_ADD_COMP_0 | \ |
| 138 | ETM_EVENT_NOT_A) | 142 | ETM_EVENT_NOT_A) |
| 143 | |||
| 139 | /** | 144 | /** |
| 140 | * struct etm_drvdata - specifics associated to an ETM component | 145 | * struct etm_config - configuration information related to an ETM |
| 141 | * @base: memory mapped base address for this component. | ||
| 142 | * @dev: the device entity associated to this component. | ||
| 143 | * @atclk: optional clock for the core parts of the ETM. | ||
| 144 | * @csdev: component vitals needed by the framework. | ||
| 145 | * @spinlock: only one at a time pls. | ||
| 146 | * @cpu: the cpu this component is affined to. | ||
| 147 | * @port_size: port size as reported by ETMCR bit 4-6 and 21. | ||
| 148 | * @arch: ETM/PTM version number. | ||
| 149 | * @use_cpu14: true if management registers need to be accessed via CP14. | ||
| 150 | * @enable: is this ETM/PTM currently tracing. | ||
| 151 | * @sticky_enable: true if ETM base configuration has been done. | ||
| 152 | * @boot_enable:true if we should start tracing at boot time. | ||
| 153 | * @os_unlock: true if access to management registers is allowed. | ||
| 154 | * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR. | ||
| 155 | * @nr_cntr: Number of counters as found in ETMCCR bit 13-15. | ||
| 156 | * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19. | ||
| 157 | * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22. | ||
| 158 | * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25. | ||
| 159 | * @etmccr: value of register ETMCCR. | ||
| 160 | * @etmccer: value of register ETMCCER. | ||
| 161 | * @traceid: value of the current ID for this component. | ||
| 162 | * @mode: controls various modes supported by this ETM/PTM. | 146 | * @mode: controls various modes supported by this ETM/PTM. |
| 163 | * @ctrl: used in conjunction with @mode. | 147 | * @ctrl: used in conjunction with @mode. |
| 164 | * @trigger_event: setting for register ETMTRIGGER. | 148 | * @trigger_event: setting for register ETMTRIGGER. |
| 165 | * @startstop_ctrl: setting for register ETMTSSCR. | 149 | * @startstop_ctrl: setting for register ETMTSSCR. |
| 166 | * @enable_event: setting for register ETMTEEVR. | 150 | * @enable_event: setting for register ETMTEEVR. |
| 167 | * @enable_ctrl1: setting for register ETMTECR1. | 151 | * @enable_ctrl1: setting for register ETMTECR1. |
| 152 | * @enable_ctrl2: setting for register ETMTECR2. | ||
| 168 | * @fifofull_level: setting for register ETMFFLR. | 153 | * @fifofull_level: setting for register ETMFFLR. |
| 169 | * @addr_idx: index for the address comparator selection. | 154 | * @addr_idx: index for the address comparator selection. |
| 170 | * @addr_val: value for address comparator register. | 155 | * @addr_val: value for address comparator register. |
| @@ -189,36 +174,16 @@ | |||
| 189 | * @ctxid_mask: mask applicable to all the context IDs. | 174 | * @ctxid_mask: mask applicable to all the context IDs. |
| 190 | * @sync_freq: Synchronisation frequency. | 175 | * @sync_freq: Synchronisation frequency. |
| 191 | * @timestamp_event: Defines an event that requests the insertion | 176 | * @timestamp_event: Defines an event that requests the insertion |
| 192 | of a timestamp into the trace stream. | 177 | * of a timestamp into the trace stream. |
| 193 | */ | 178 | */ |
| 194 | struct etm_drvdata { | 179 | struct etm_config { |
| 195 | void __iomem *base; | ||
| 196 | struct device *dev; | ||
| 197 | struct clk *atclk; | ||
| 198 | struct coresight_device *csdev; | ||
| 199 | spinlock_t spinlock; | ||
| 200 | int cpu; | ||
| 201 | int port_size; | ||
| 202 | u8 arch; | ||
| 203 | bool use_cp14; | ||
| 204 | bool enable; | ||
| 205 | bool sticky_enable; | ||
| 206 | bool boot_enable; | ||
| 207 | bool os_unlock; | ||
| 208 | u8 nr_addr_cmp; | ||
| 209 | u8 nr_cntr; | ||
| 210 | u8 nr_ext_inp; | ||
| 211 | u8 nr_ext_out; | ||
| 212 | u8 nr_ctxid_cmp; | ||
| 213 | u32 etmccr; | ||
| 214 | u32 etmccer; | ||
| 215 | u32 traceid; | ||
| 216 | u32 mode; | 180 | u32 mode; |
| 217 | u32 ctrl; | 181 | u32 ctrl; |
| 218 | u32 trigger_event; | 182 | u32 trigger_event; |
| 219 | u32 startstop_ctrl; | 183 | u32 startstop_ctrl; |
| 220 | u32 enable_event; | 184 | u32 enable_event; |
| 221 | u32 enable_ctrl1; | 185 | u32 enable_ctrl1; |
| 186 | u32 enable_ctrl2; | ||
| 222 | u32 fifofull_level; | 187 | u32 fifofull_level; |
| 223 | u8 addr_idx; | 188 | u8 addr_idx; |
| 224 | u32 addr_val[ETM_MAX_ADDR_CMP]; | 189 | u32 addr_val[ETM_MAX_ADDR_CMP]; |
| @@ -244,6 +209,56 @@ struct etm_drvdata { | |||
| 244 | u32 timestamp_event; | 209 | u32 timestamp_event; |
| 245 | }; | 210 | }; |
| 246 | 211 | ||
| 212 | /** | ||
| 213 | * struct etm_drvdata - specifics associated to an ETM component | ||
| 214 | * @base: memory mapped base address for this component. | ||
| 215 | * @dev: the device entity associated to this component. | ||
| 216 | * @atclk: optional clock for the core parts of the ETM. | ||
| 217 | * @csdev: component vitals needed by the framework. | ||
| 218 | * @spinlock: only one at a time pls. | ||
| 219 | * @cpu: the cpu this component is affined to. | ||
| 220 | * @port_size: port size as reported by ETMCR bit 4-6 and 21. | ||
| 221 | * @arch: ETM/PTM version number. | ||
| 222 | * @use_cpu14: true if management registers need to be accessed via CP14. | ||
| 223 | * @mode: this tracer's mode, i.e sysFS, Perf or disabled. | ||
| 224 | * @sticky_enable: true if ETM base configuration has been done. | ||
| 225 | * @boot_enable:true if we should start tracing at boot time. | ||
| 226 | * @os_unlock: true if access to management registers is allowed. | ||
| 227 | * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR. | ||
| 228 | * @nr_cntr: Number of counters as found in ETMCCR bit 13-15. | ||
| 229 | * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19. | ||
| 230 | * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22. | ||
| 231 | * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25. | ||
| 232 | * @etmccr: value of register ETMCCR. | ||
| 233 | * @etmccer: value of register ETMCCER. | ||
| 234 | * @traceid: value of the current ID for this component. | ||
| 235 | * @config: structure holding configuration parameters. | ||
| 236 | */ | ||
| 237 | struct etm_drvdata { | ||
| 238 | void __iomem *base; | ||
| 239 | struct device *dev; | ||
| 240 | struct clk *atclk; | ||
| 241 | struct coresight_device *csdev; | ||
| 242 | spinlock_t spinlock; | ||
| 243 | int cpu; | ||
| 244 | int port_size; | ||
| 245 | u8 arch; | ||
| 246 | bool use_cp14; | ||
| 247 | local_t mode; | ||
| 248 | bool sticky_enable; | ||
| 249 | bool boot_enable; | ||
| 250 | bool os_unlock; | ||
| 251 | u8 nr_addr_cmp; | ||
| 252 | u8 nr_cntr; | ||
| 253 | u8 nr_ext_inp; | ||
| 254 | u8 nr_ext_out; | ||
| 255 | u8 nr_ctxid_cmp; | ||
| 256 | u32 etmccr; | ||
| 257 | u32 etmccer; | ||
| 258 | u32 traceid; | ||
| 259 | struct etm_config config; | ||
| 260 | }; | ||
| 261 | |||
| 247 | enum etm_addr_type { | 262 | enum etm_addr_type { |
| 248 | ETM_ADDR_TYPE_NONE, | 263 | ETM_ADDR_TYPE_NONE, |
| 249 | ETM_ADDR_TYPE_SINGLE, | 264 | ETM_ADDR_TYPE_SINGLE, |
| @@ -251,4 +266,39 @@ enum etm_addr_type { | |||
| 251 | ETM_ADDR_TYPE_START, | 266 | ETM_ADDR_TYPE_START, |
| 252 | ETM_ADDR_TYPE_STOP, | 267 | ETM_ADDR_TYPE_STOP, |
| 253 | }; | 268 | }; |
| 269 | |||
| 270 | static inline void etm_writel(struct etm_drvdata *drvdata, | ||
| 271 | u32 val, u32 off) | ||
| 272 | { | ||
| 273 | if (drvdata->use_cp14) { | ||
| 274 | if (etm_writel_cp14(off, val)) { | ||
| 275 | dev_err(drvdata->dev, | ||
| 276 | "invalid CP14 access to ETM reg: %#x", off); | ||
| 277 | } | ||
| 278 | } else { | ||
| 279 | writel_relaxed(val, drvdata->base + off); | ||
| 280 | } | ||
| 281 | } | ||
| 282 | |||
| 283 | static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off) | ||
| 284 | { | ||
| 285 | u32 val; | ||
| 286 | |||
| 287 | if (drvdata->use_cp14) { | ||
| 288 | if (etm_readl_cp14(off, &val)) { | ||
| 289 | dev_err(drvdata->dev, | ||
| 290 | "invalid CP14 access to ETM reg: %#x", off); | ||
| 291 | } | ||
| 292 | } else { | ||
| 293 | val = readl_relaxed(drvdata->base + off); | ||
| 294 | } | ||
| 295 | |||
| 296 | return val; | ||
| 297 | } | ||
| 298 | |||
| 299 | extern const struct attribute_group *coresight_etm_groups[]; | ||
| 300 | int etm_get_trace_id(struct etm_drvdata *drvdata); | ||
| 301 | void etm_set_default(struct etm_config *config); | ||
| 302 | void etm_config_trace_mode(struct etm_config *config); | ||
| 303 | struct etm_config *get_etm_config(struct etm_drvdata *drvdata); | ||
| 254 | #endif | 304 | #endif |
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c new file mode 100644 index 000000000000..cbb4046c1070 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c | |||
| @@ -0,0 +1,1272 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
| 3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/pm_runtime.h> | ||
| 19 | #include <linux/sysfs.h> | ||
| 20 | #include "coresight-etm.h" | ||
| 21 | |||
| 22 | static ssize_t nr_addr_cmp_show(struct device *dev, | ||
| 23 | struct device_attribute *attr, char *buf) | ||
| 24 | { | ||
| 25 | unsigned long val; | ||
| 26 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 27 | |||
| 28 | val = drvdata->nr_addr_cmp; | ||
| 29 | return sprintf(buf, "%#lx\n", val); | ||
| 30 | } | ||
| 31 | static DEVICE_ATTR_RO(nr_addr_cmp); | ||
| 32 | |||
| 33 | static ssize_t nr_cntr_show(struct device *dev, | ||
| 34 | struct device_attribute *attr, char *buf) | ||
| 35 | { unsigned long val; | ||
| 36 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 37 | |||
| 38 | val = drvdata->nr_cntr; | ||
| 39 | return sprintf(buf, "%#lx\n", val); | ||
| 40 | } | ||
| 41 | static DEVICE_ATTR_RO(nr_cntr); | ||
| 42 | |||
| 43 | static ssize_t nr_ctxid_cmp_show(struct device *dev, | ||
| 44 | struct device_attribute *attr, char *buf) | ||
| 45 | { | ||
| 46 | unsigned long val; | ||
| 47 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 48 | |||
| 49 | val = drvdata->nr_ctxid_cmp; | ||
| 50 | return sprintf(buf, "%#lx\n", val); | ||
| 51 | } | ||
| 52 | static DEVICE_ATTR_RO(nr_ctxid_cmp); | ||
| 53 | |||
| 54 | static ssize_t etmsr_show(struct device *dev, | ||
| 55 | struct device_attribute *attr, char *buf) | ||
| 56 | { | ||
| 57 | unsigned long flags, val; | ||
| 58 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 59 | |||
| 60 | pm_runtime_get_sync(drvdata->dev); | ||
| 61 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 62 | CS_UNLOCK(drvdata->base); | ||
| 63 | |||
| 64 | val = etm_readl(drvdata, ETMSR); | ||
| 65 | |||
| 66 | CS_LOCK(drvdata->base); | ||
| 67 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 68 | pm_runtime_put(drvdata->dev); | ||
| 69 | |||
| 70 | return sprintf(buf, "%#lx\n", val); | ||
| 71 | } | ||
| 72 | static DEVICE_ATTR_RO(etmsr); | ||
| 73 | |||
| 74 | static ssize_t reset_store(struct device *dev, | ||
| 75 | struct device_attribute *attr, | ||
| 76 | const char *buf, size_t size) | ||
| 77 | { | ||
| 78 | int i, ret; | ||
| 79 | unsigned long val; | ||
| 80 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 81 | struct etm_config *config = &drvdata->config; | ||
| 82 | |||
| 83 | ret = kstrtoul(buf, 16, &val); | ||
| 84 | if (ret) | ||
| 85 | return ret; | ||
| 86 | |||
| 87 | if (val) { | ||
| 88 | spin_lock(&drvdata->spinlock); | ||
| 89 | memset(config, 0, sizeof(struct etm_config)); | ||
| 90 | config->mode = ETM_MODE_EXCLUDE; | ||
| 91 | config->trigger_event = ETM_DEFAULT_EVENT_VAL; | ||
| 92 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | ||
| 93 | config->addr_type[i] = ETM_ADDR_TYPE_NONE; | ||
| 94 | } | ||
| 95 | |||
| 96 | etm_set_default(config); | ||
| 97 | spin_unlock(&drvdata->spinlock); | ||
| 98 | } | ||
| 99 | |||
| 100 | return size; | ||
| 101 | } | ||
| 102 | static DEVICE_ATTR_WO(reset); | ||
| 103 | |||
| 104 | static ssize_t mode_show(struct device *dev, | ||
| 105 | struct device_attribute *attr, char *buf) | ||
| 106 | { | ||
| 107 | unsigned long val; | ||
| 108 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 109 | struct etm_config *config = &drvdata->config; | ||
| 110 | |||
| 111 | val = config->mode; | ||
| 112 | return sprintf(buf, "%#lx\n", val); | ||
| 113 | } | ||
| 114 | |||
| 115 | static ssize_t mode_store(struct device *dev, | ||
| 116 | struct device_attribute *attr, | ||
| 117 | const char *buf, size_t size) | ||
| 118 | { | ||
| 119 | int ret; | ||
| 120 | unsigned long val; | ||
| 121 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 122 | struct etm_config *config = &drvdata->config; | ||
| 123 | |||
| 124 | ret = kstrtoul(buf, 16, &val); | ||
| 125 | if (ret) | ||
| 126 | return ret; | ||
| 127 | |||
| 128 | spin_lock(&drvdata->spinlock); | ||
| 129 | config->mode = val & ETM_MODE_ALL; | ||
| 130 | |||
| 131 | if (config->mode & ETM_MODE_EXCLUDE) | ||
| 132 | config->enable_ctrl1 |= ETMTECR1_INC_EXC; | ||
| 133 | else | ||
| 134 | config->enable_ctrl1 &= ~ETMTECR1_INC_EXC; | ||
| 135 | |||
| 136 | if (config->mode & ETM_MODE_CYCACC) | ||
| 137 | config->ctrl |= ETMCR_CYC_ACC; | ||
| 138 | else | ||
| 139 | config->ctrl &= ~ETMCR_CYC_ACC; | ||
| 140 | |||
| 141 | if (config->mode & ETM_MODE_STALL) { | ||
| 142 | if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { | ||
| 143 | dev_warn(drvdata->dev, "stall mode not supported\n"); | ||
| 144 | ret = -EINVAL; | ||
| 145 | goto err_unlock; | ||
| 146 | } | ||
| 147 | config->ctrl |= ETMCR_STALL_MODE; | ||
| 148 | } else | ||
| 149 | config->ctrl &= ~ETMCR_STALL_MODE; | ||
| 150 | |||
| 151 | if (config->mode & ETM_MODE_TIMESTAMP) { | ||
| 152 | if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { | ||
| 153 | dev_warn(drvdata->dev, "timestamp not supported\n"); | ||
| 154 | ret = -EINVAL; | ||
| 155 | goto err_unlock; | ||
| 156 | } | ||
| 157 | config->ctrl |= ETMCR_TIMESTAMP_EN; | ||
| 158 | } else | ||
| 159 | config->ctrl &= ~ETMCR_TIMESTAMP_EN; | ||
| 160 | |||
| 161 | if (config->mode & ETM_MODE_CTXID) | ||
| 162 | config->ctrl |= ETMCR_CTXID_SIZE; | ||
| 163 | else | ||
| 164 | config->ctrl &= ~ETMCR_CTXID_SIZE; | ||
| 165 | |||
| 166 | if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) | ||
| 167 | etm_config_trace_mode(config); | ||
| 168 | |||
| 169 | spin_unlock(&drvdata->spinlock); | ||
| 170 | |||
| 171 | return size; | ||
| 172 | |||
| 173 | err_unlock: | ||
| 174 | spin_unlock(&drvdata->spinlock); | ||
| 175 | return ret; | ||
| 176 | } | ||
| 177 | static DEVICE_ATTR_RW(mode); | ||
| 178 | |||
| 179 | static ssize_t trigger_event_show(struct device *dev, | ||
| 180 | struct device_attribute *attr, char *buf) | ||
| 181 | { | ||
| 182 | unsigned long val; | ||
| 183 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 184 | struct etm_config *config = &drvdata->config; | ||
| 185 | |||
| 186 | val = config->trigger_event; | ||
| 187 | return sprintf(buf, "%#lx\n", val); | ||
| 188 | } | ||
| 189 | |||
| 190 | static ssize_t trigger_event_store(struct device *dev, | ||
| 191 | struct device_attribute *attr, | ||
| 192 | const char *buf, size_t size) | ||
| 193 | { | ||
| 194 | int ret; | ||
| 195 | unsigned long val; | ||
| 196 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 197 | struct etm_config *config = &drvdata->config; | ||
| 198 | |||
| 199 | ret = kstrtoul(buf, 16, &val); | ||
| 200 | if (ret) | ||
| 201 | return ret; | ||
| 202 | |||
| 203 | config->trigger_event = val & ETM_EVENT_MASK; | ||
| 204 | |||
| 205 | return size; | ||
| 206 | } | ||
| 207 | static DEVICE_ATTR_RW(trigger_event); | ||
| 208 | |||
| 209 | static ssize_t enable_event_show(struct device *dev, | ||
| 210 | struct device_attribute *attr, char *buf) | ||
| 211 | { | ||
| 212 | unsigned long val; | ||
| 213 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 214 | struct etm_config *config = &drvdata->config; | ||
| 215 | |||
| 216 | val = config->enable_event; | ||
| 217 | return sprintf(buf, "%#lx\n", val); | ||
| 218 | } | ||
| 219 | |||
| 220 | static ssize_t enable_event_store(struct device *dev, | ||
| 221 | struct device_attribute *attr, | ||
| 222 | const char *buf, size_t size) | ||
| 223 | { | ||
| 224 | int ret; | ||
| 225 | unsigned long val; | ||
| 226 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 227 | struct etm_config *config = &drvdata->config; | ||
| 228 | |||
| 229 | ret = kstrtoul(buf, 16, &val); | ||
| 230 | if (ret) | ||
| 231 | return ret; | ||
| 232 | |||
| 233 | config->enable_event = val & ETM_EVENT_MASK; | ||
| 234 | |||
| 235 | return size; | ||
| 236 | } | ||
| 237 | static DEVICE_ATTR_RW(enable_event); | ||
| 238 | |||
| 239 | static ssize_t fifofull_level_show(struct device *dev, | ||
| 240 | struct device_attribute *attr, char *buf) | ||
| 241 | { | ||
| 242 | unsigned long val; | ||
| 243 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 244 | struct etm_config *config = &drvdata->config; | ||
| 245 | |||
| 246 | val = config->fifofull_level; | ||
| 247 | return sprintf(buf, "%#lx\n", val); | ||
| 248 | } | ||
| 249 | |||
| 250 | static ssize_t fifofull_level_store(struct device *dev, | ||
| 251 | struct device_attribute *attr, | ||
| 252 | const char *buf, size_t size) | ||
| 253 | { | ||
| 254 | int ret; | ||
| 255 | unsigned long val; | ||
| 256 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 257 | struct etm_config *config = &drvdata->config; | ||
| 258 | |||
| 259 | ret = kstrtoul(buf, 16, &val); | ||
| 260 | if (ret) | ||
| 261 | return ret; | ||
| 262 | |||
| 263 | config->fifofull_level = val; | ||
| 264 | |||
| 265 | return size; | ||
| 266 | } | ||
| 267 | static DEVICE_ATTR_RW(fifofull_level); | ||
| 268 | |||
| 269 | static ssize_t addr_idx_show(struct device *dev, | ||
| 270 | struct device_attribute *attr, char *buf) | ||
| 271 | { | ||
| 272 | unsigned long val; | ||
| 273 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 274 | struct etm_config *config = &drvdata->config; | ||
| 275 | |||
| 276 | val = config->addr_idx; | ||
| 277 | return sprintf(buf, "%#lx\n", val); | ||
| 278 | } | ||
| 279 | |||
| 280 | static ssize_t addr_idx_store(struct device *dev, | ||
| 281 | struct device_attribute *attr, | ||
| 282 | const char *buf, size_t size) | ||
| 283 | { | ||
| 284 | int ret; | ||
| 285 | unsigned long val; | ||
| 286 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 287 | struct etm_config *config = &drvdata->config; | ||
| 288 | |||
| 289 | ret = kstrtoul(buf, 16, &val); | ||
| 290 | if (ret) | ||
| 291 | return ret; | ||
| 292 | |||
| 293 | if (val >= drvdata->nr_addr_cmp) | ||
| 294 | return -EINVAL; | ||
| 295 | |||
| 296 | /* | ||
| 297 | * Use spinlock to ensure index doesn't change while it gets | ||
| 298 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 299 | */ | ||
| 300 | spin_lock(&drvdata->spinlock); | ||
| 301 | config->addr_idx = val; | ||
| 302 | spin_unlock(&drvdata->spinlock); | ||
| 303 | |||
| 304 | return size; | ||
| 305 | } | ||
| 306 | static DEVICE_ATTR_RW(addr_idx); | ||
| 307 | |||
| 308 | static ssize_t addr_single_show(struct device *dev, | ||
| 309 | struct device_attribute *attr, char *buf) | ||
| 310 | { | ||
| 311 | u8 idx; | ||
| 312 | unsigned long val; | ||
| 313 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 314 | struct etm_config *config = &drvdata->config; | ||
| 315 | |||
| 316 | spin_lock(&drvdata->spinlock); | ||
| 317 | idx = config->addr_idx; | ||
| 318 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 319 | config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
| 320 | spin_unlock(&drvdata->spinlock); | ||
| 321 | return -EINVAL; | ||
| 322 | } | ||
| 323 | |||
| 324 | val = config->addr_val[idx]; | ||
| 325 | spin_unlock(&drvdata->spinlock); | ||
| 326 | |||
| 327 | return sprintf(buf, "%#lx\n", val); | ||
| 328 | } | ||
| 329 | |||
| 330 | static ssize_t addr_single_store(struct device *dev, | ||
| 331 | struct device_attribute *attr, | ||
| 332 | const char *buf, size_t size) | ||
| 333 | { | ||
| 334 | u8 idx; | ||
| 335 | int ret; | ||
| 336 | unsigned long val; | ||
| 337 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 338 | struct etm_config *config = &drvdata->config; | ||
| 339 | |||
| 340 | ret = kstrtoul(buf, 16, &val); | ||
| 341 | if (ret) | ||
| 342 | return ret; | ||
| 343 | |||
| 344 | spin_lock(&drvdata->spinlock); | ||
| 345 | idx = config->addr_idx; | ||
| 346 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 347 | config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
| 348 | spin_unlock(&drvdata->spinlock); | ||
| 349 | return -EINVAL; | ||
| 350 | } | ||
| 351 | |||
| 352 | config->addr_val[idx] = val; | ||
| 353 | config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; | ||
| 354 | spin_unlock(&drvdata->spinlock); | ||
| 355 | |||
| 356 | return size; | ||
| 357 | } | ||
| 358 | static DEVICE_ATTR_RW(addr_single); | ||
| 359 | |||
| 360 | static ssize_t addr_range_show(struct device *dev, | ||
| 361 | struct device_attribute *attr, char *buf) | ||
| 362 | { | ||
| 363 | u8 idx; | ||
| 364 | unsigned long val1, val2; | ||
| 365 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 366 | struct etm_config *config = &drvdata->config; | ||
| 367 | |||
| 368 | spin_lock(&drvdata->spinlock); | ||
| 369 | idx = config->addr_idx; | ||
| 370 | if (idx % 2 != 0) { | ||
| 371 | spin_unlock(&drvdata->spinlock); | ||
| 372 | return -EPERM; | ||
| 373 | } | ||
| 374 | if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
| 375 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
| 376 | (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
| 377 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
| 378 | spin_unlock(&drvdata->spinlock); | ||
| 379 | return -EPERM; | ||
| 380 | } | ||
| 381 | |||
| 382 | val1 = config->addr_val[idx]; | ||
| 383 | val2 = config->addr_val[idx + 1]; | ||
| 384 | spin_unlock(&drvdata->spinlock); | ||
| 385 | |||
| 386 | return sprintf(buf, "%#lx %#lx\n", val1, val2); | ||
| 387 | } | ||
| 388 | |||
| 389 | static ssize_t addr_range_store(struct device *dev, | ||
| 390 | struct device_attribute *attr, | ||
| 391 | const char *buf, size_t size) | ||
| 392 | { | ||
| 393 | u8 idx; | ||
| 394 | unsigned long val1, val2; | ||
| 395 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 396 | struct etm_config *config = &drvdata->config; | ||
| 397 | |||
| 398 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
| 399 | return -EINVAL; | ||
| 400 | /* Lower address comparator cannot have a higher address value */ | ||
| 401 | if (val1 > val2) | ||
| 402 | return -EINVAL; | ||
| 403 | |||
| 404 | spin_lock(&drvdata->spinlock); | ||
| 405 | idx = config->addr_idx; | ||
| 406 | if (idx % 2 != 0) { | ||
| 407 | spin_unlock(&drvdata->spinlock); | ||
| 408 | return -EPERM; | ||
| 409 | } | ||
| 410 | if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
| 411 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
| 412 | (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
| 413 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
| 414 | spin_unlock(&drvdata->spinlock); | ||
| 415 | return -EPERM; | ||
| 416 | } | ||
| 417 | |||
| 418 | config->addr_val[idx] = val1; | ||
| 419 | config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; | ||
| 420 | config->addr_val[idx + 1] = val2; | ||
| 421 | config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; | ||
| 422 | config->enable_ctrl1 |= (1 << (idx/2)); | ||
| 423 | spin_unlock(&drvdata->spinlock); | ||
| 424 | |||
| 425 | return size; | ||
| 426 | } | ||
| 427 | static DEVICE_ATTR_RW(addr_range); | ||
| 428 | |||
| 429 | static ssize_t addr_start_show(struct device *dev, | ||
| 430 | struct device_attribute *attr, char *buf) | ||
| 431 | { | ||
| 432 | u8 idx; | ||
| 433 | unsigned long val; | ||
| 434 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 435 | struct etm_config *config = &drvdata->config; | ||
| 436 | |||
| 437 | spin_lock(&drvdata->spinlock); | ||
| 438 | idx = config->addr_idx; | ||
| 439 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 440 | config->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
| 441 | spin_unlock(&drvdata->spinlock); | ||
| 442 | return -EPERM; | ||
| 443 | } | ||
| 444 | |||
| 445 | val = config->addr_val[idx]; | ||
| 446 | spin_unlock(&drvdata->spinlock); | ||
| 447 | |||
| 448 | return sprintf(buf, "%#lx\n", val); | ||
| 449 | } | ||
| 450 | |||
| 451 | static ssize_t addr_start_store(struct device *dev, | ||
| 452 | struct device_attribute *attr, | ||
| 453 | const char *buf, size_t size) | ||
| 454 | { | ||
| 455 | u8 idx; | ||
| 456 | int ret; | ||
| 457 | unsigned long val; | ||
| 458 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 459 | struct etm_config *config = &drvdata->config; | ||
| 460 | |||
| 461 | ret = kstrtoul(buf, 16, &val); | ||
| 462 | if (ret) | ||
| 463 | return ret; | ||
| 464 | |||
| 465 | spin_lock(&drvdata->spinlock); | ||
| 466 | idx = config->addr_idx; | ||
| 467 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 468 | config->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
| 469 | spin_unlock(&drvdata->spinlock); | ||
| 470 | return -EPERM; | ||
| 471 | } | ||
| 472 | |||
| 473 | config->addr_val[idx] = val; | ||
| 474 | config->addr_type[idx] = ETM_ADDR_TYPE_START; | ||
| 475 | config->startstop_ctrl |= (1 << idx); | ||
| 476 | config->enable_ctrl1 |= BIT(25); | ||
| 477 | spin_unlock(&drvdata->spinlock); | ||
| 478 | |||
| 479 | return size; | ||
| 480 | } | ||
| 481 | static DEVICE_ATTR_RW(addr_start); | ||
| 482 | |||
| 483 | static ssize_t addr_stop_show(struct device *dev, | ||
| 484 | struct device_attribute *attr, char *buf) | ||
| 485 | { | ||
| 486 | u8 idx; | ||
| 487 | unsigned long val; | ||
| 488 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 489 | struct etm_config *config = &drvdata->config; | ||
| 490 | |||
| 491 | spin_lock(&drvdata->spinlock); | ||
| 492 | idx = config->addr_idx; | ||
| 493 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 494 | config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
| 495 | spin_unlock(&drvdata->spinlock); | ||
| 496 | return -EPERM; | ||
| 497 | } | ||
| 498 | |||
| 499 | val = config->addr_val[idx]; | ||
| 500 | spin_unlock(&drvdata->spinlock); | ||
| 501 | |||
| 502 | return sprintf(buf, "%#lx\n", val); | ||
| 503 | } | ||
| 504 | |||
| 505 | static ssize_t addr_stop_store(struct device *dev, | ||
| 506 | struct device_attribute *attr, | ||
| 507 | const char *buf, size_t size) | ||
| 508 | { | ||
| 509 | u8 idx; | ||
| 510 | int ret; | ||
| 511 | unsigned long val; | ||
| 512 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 513 | struct etm_config *config = &drvdata->config; | ||
| 514 | |||
| 515 | ret = kstrtoul(buf, 16, &val); | ||
| 516 | if (ret) | ||
| 517 | return ret; | ||
| 518 | |||
| 519 | spin_lock(&drvdata->spinlock); | ||
| 520 | idx = config->addr_idx; | ||
| 521 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 522 | config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
| 523 | spin_unlock(&drvdata->spinlock); | ||
| 524 | return -EPERM; | ||
| 525 | } | ||
| 526 | |||
| 527 | config->addr_val[idx] = val; | ||
| 528 | config->addr_type[idx] = ETM_ADDR_TYPE_STOP; | ||
| 529 | config->startstop_ctrl |= (1 << (idx + 16)); | ||
| 530 | config->enable_ctrl1 |= ETMTECR1_START_STOP; | ||
| 531 | spin_unlock(&drvdata->spinlock); | ||
| 532 | |||
| 533 | return size; | ||
| 534 | } | ||
| 535 | static DEVICE_ATTR_RW(addr_stop); | ||
| 536 | |||
| 537 | static ssize_t addr_acctype_show(struct device *dev, | ||
| 538 | struct device_attribute *attr, char *buf) | ||
| 539 | { | ||
| 540 | unsigned long val; | ||
| 541 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 542 | struct etm_config *config = &drvdata->config; | ||
| 543 | |||
| 544 | spin_lock(&drvdata->spinlock); | ||
| 545 | val = config->addr_acctype[config->addr_idx]; | ||
| 546 | spin_unlock(&drvdata->spinlock); | ||
| 547 | |||
| 548 | return sprintf(buf, "%#lx\n", val); | ||
| 549 | } | ||
| 550 | |||
| 551 | static ssize_t addr_acctype_store(struct device *dev, | ||
| 552 | struct device_attribute *attr, | ||
| 553 | const char *buf, size_t size) | ||
| 554 | { | ||
| 555 | int ret; | ||
| 556 | unsigned long val; | ||
| 557 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 558 | struct etm_config *config = &drvdata->config; | ||
| 559 | |||
| 560 | ret = kstrtoul(buf, 16, &val); | ||
| 561 | if (ret) | ||
| 562 | return ret; | ||
| 563 | |||
| 564 | spin_lock(&drvdata->spinlock); | ||
| 565 | config->addr_acctype[config->addr_idx] = val; | ||
| 566 | spin_unlock(&drvdata->spinlock); | ||
| 567 | |||
| 568 | return size; | ||
| 569 | } | ||
| 570 | static DEVICE_ATTR_RW(addr_acctype); | ||
| 571 | |||
| 572 | static ssize_t cntr_idx_show(struct device *dev, | ||
| 573 | struct device_attribute *attr, char *buf) | ||
| 574 | { | ||
| 575 | unsigned long val; | ||
| 576 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 577 | struct etm_config *config = &drvdata->config; | ||
| 578 | |||
| 579 | val = config->cntr_idx; | ||
| 580 | return sprintf(buf, "%#lx\n", val); | ||
| 581 | } | ||
| 582 | |||
| 583 | static ssize_t cntr_idx_store(struct device *dev, | ||
| 584 | struct device_attribute *attr, | ||
| 585 | const char *buf, size_t size) | ||
| 586 | { | ||
| 587 | int ret; | ||
| 588 | unsigned long val; | ||
| 589 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 590 | struct etm_config *config = &drvdata->config; | ||
| 591 | |||
| 592 | ret = kstrtoul(buf, 16, &val); | ||
| 593 | if (ret) | ||
| 594 | return ret; | ||
| 595 | |||
| 596 | if (val >= drvdata->nr_cntr) | ||
| 597 | return -EINVAL; | ||
| 598 | /* | ||
| 599 | * Use spinlock to ensure index doesn't change while it gets | ||
| 600 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 601 | */ | ||
| 602 | spin_lock(&drvdata->spinlock); | ||
| 603 | config->cntr_idx = val; | ||
| 604 | spin_unlock(&drvdata->spinlock); | ||
| 605 | |||
| 606 | return size; | ||
| 607 | } | ||
| 608 | static DEVICE_ATTR_RW(cntr_idx); | ||
| 609 | |||
| 610 | static ssize_t cntr_rld_val_show(struct device *dev, | ||
| 611 | struct device_attribute *attr, char *buf) | ||
| 612 | { | ||
| 613 | unsigned long val; | ||
| 614 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 615 | struct etm_config *config = &drvdata->config; | ||
| 616 | |||
| 617 | spin_lock(&drvdata->spinlock); | ||
| 618 | val = config->cntr_rld_val[config->cntr_idx]; | ||
| 619 | spin_unlock(&drvdata->spinlock); | ||
| 620 | |||
| 621 | return sprintf(buf, "%#lx\n", val); | ||
| 622 | } | ||
| 623 | |||
| 624 | static ssize_t cntr_rld_val_store(struct device *dev, | ||
| 625 | struct device_attribute *attr, | ||
| 626 | const char *buf, size_t size) | ||
| 627 | { | ||
| 628 | int ret; | ||
| 629 | unsigned long val; | ||
| 630 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 631 | struct etm_config *config = &drvdata->config; | ||
| 632 | |||
| 633 | ret = kstrtoul(buf, 16, &val); | ||
| 634 | if (ret) | ||
| 635 | return ret; | ||
| 636 | |||
| 637 | spin_lock(&drvdata->spinlock); | ||
| 638 | config->cntr_rld_val[config->cntr_idx] = val; | ||
| 639 | spin_unlock(&drvdata->spinlock); | ||
| 640 | |||
| 641 | return size; | ||
| 642 | } | ||
| 643 | static DEVICE_ATTR_RW(cntr_rld_val); | ||
| 644 | |||
| 645 | static ssize_t cntr_event_show(struct device *dev, | ||
| 646 | struct device_attribute *attr, char *buf) | ||
| 647 | { | ||
| 648 | unsigned long val; | ||
| 649 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 650 | struct etm_config *config = &drvdata->config; | ||
| 651 | |||
| 652 | spin_lock(&drvdata->spinlock); | ||
| 653 | val = config->cntr_event[config->cntr_idx]; | ||
| 654 | spin_unlock(&drvdata->spinlock); | ||
| 655 | |||
| 656 | return sprintf(buf, "%#lx\n", val); | ||
| 657 | } | ||
| 658 | |||
| 659 | static ssize_t cntr_event_store(struct device *dev, | ||
| 660 | struct device_attribute *attr, | ||
| 661 | const char *buf, size_t size) | ||
| 662 | { | ||
| 663 | int ret; | ||
| 664 | unsigned long val; | ||
| 665 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 666 | struct etm_config *config = &drvdata->config; | ||
| 667 | |||
| 668 | ret = kstrtoul(buf, 16, &val); | ||
| 669 | if (ret) | ||
| 670 | return ret; | ||
| 671 | |||
| 672 | spin_lock(&drvdata->spinlock); | ||
| 673 | config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK; | ||
| 674 | spin_unlock(&drvdata->spinlock); | ||
| 675 | |||
| 676 | return size; | ||
| 677 | } | ||
| 678 | static DEVICE_ATTR_RW(cntr_event); | ||
| 679 | |||
| 680 | static ssize_t cntr_rld_event_show(struct device *dev, | ||
| 681 | struct device_attribute *attr, char *buf) | ||
| 682 | { | ||
| 683 | unsigned long val; | ||
| 684 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 685 | struct etm_config *config = &drvdata->config; | ||
| 686 | |||
| 687 | spin_lock(&drvdata->spinlock); | ||
| 688 | val = config->cntr_rld_event[config->cntr_idx]; | ||
| 689 | spin_unlock(&drvdata->spinlock); | ||
| 690 | |||
| 691 | return sprintf(buf, "%#lx\n", val); | ||
| 692 | } | ||
| 693 | |||
| 694 | static ssize_t cntr_rld_event_store(struct device *dev, | ||
| 695 | struct device_attribute *attr, | ||
| 696 | const char *buf, size_t size) | ||
| 697 | { | ||
| 698 | int ret; | ||
| 699 | unsigned long val; | ||
| 700 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 701 | struct etm_config *config = &drvdata->config; | ||
| 702 | |||
| 703 | ret = kstrtoul(buf, 16, &val); | ||
| 704 | if (ret) | ||
| 705 | return ret; | ||
| 706 | |||
| 707 | spin_lock(&drvdata->spinlock); | ||
| 708 | config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK; | ||
| 709 | spin_unlock(&drvdata->spinlock); | ||
| 710 | |||
| 711 | return size; | ||
| 712 | } | ||
| 713 | static DEVICE_ATTR_RW(cntr_rld_event); | ||
| 714 | |||
| 715 | static ssize_t cntr_val_show(struct device *dev, | ||
| 716 | struct device_attribute *attr, char *buf) | ||
| 717 | { | ||
| 718 | int i, ret = 0; | ||
| 719 | u32 val; | ||
| 720 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 721 | struct etm_config *config = &drvdata->config; | ||
| 722 | |||
| 723 | if (!local_read(&drvdata->mode)) { | ||
| 724 | spin_lock(&drvdata->spinlock); | ||
| 725 | for (i = 0; i < drvdata->nr_cntr; i++) | ||
| 726 | ret += sprintf(buf, "counter %d: %x\n", | ||
| 727 | i, config->cntr_val[i]); | ||
| 728 | spin_unlock(&drvdata->spinlock); | ||
| 729 | return ret; | ||
| 730 | } | ||
| 731 | |||
| 732 | for (i = 0; i < drvdata->nr_cntr; i++) { | ||
| 733 | val = etm_readl(drvdata, ETMCNTVRn(i)); | ||
| 734 | ret += sprintf(buf, "counter %d: %x\n", i, val); | ||
| 735 | } | ||
| 736 | |||
| 737 | return ret; | ||
| 738 | } | ||
| 739 | |||
| 740 | static ssize_t cntr_val_store(struct device *dev, | ||
| 741 | struct device_attribute *attr, | ||
| 742 | const char *buf, size_t size) | ||
| 743 | { | ||
| 744 | int ret; | ||
| 745 | unsigned long val; | ||
| 746 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 747 | struct etm_config *config = &drvdata->config; | ||
| 748 | |||
| 749 | ret = kstrtoul(buf, 16, &val); | ||
| 750 | if (ret) | ||
| 751 | return ret; | ||
| 752 | |||
| 753 | spin_lock(&drvdata->spinlock); | ||
| 754 | config->cntr_val[config->cntr_idx] = val; | ||
| 755 | spin_unlock(&drvdata->spinlock); | ||
| 756 | |||
| 757 | return size; | ||
| 758 | } | ||
| 759 | static DEVICE_ATTR_RW(cntr_val); | ||
| 760 | |||
| 761 | static ssize_t seq_12_event_show(struct device *dev, | ||
| 762 | struct device_attribute *attr, char *buf) | ||
| 763 | { | ||
| 764 | unsigned long val; | ||
| 765 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 766 | struct etm_config *config = &drvdata->config; | ||
| 767 | |||
| 768 | val = config->seq_12_event; | ||
| 769 | return sprintf(buf, "%#lx\n", val); | ||
| 770 | } | ||
| 771 | |||
| 772 | static ssize_t seq_12_event_store(struct device *dev, | ||
| 773 | struct device_attribute *attr, | ||
| 774 | const char *buf, size_t size) | ||
| 775 | { | ||
| 776 | int ret; | ||
| 777 | unsigned long val; | ||
| 778 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 779 | struct etm_config *config = &drvdata->config; | ||
| 780 | |||
| 781 | ret = kstrtoul(buf, 16, &val); | ||
| 782 | if (ret) | ||
| 783 | return ret; | ||
| 784 | |||
| 785 | config->seq_12_event = val & ETM_EVENT_MASK; | ||
| 786 | return size; | ||
| 787 | } | ||
| 788 | static DEVICE_ATTR_RW(seq_12_event); | ||
| 789 | |||
| 790 | static ssize_t seq_21_event_show(struct device *dev, | ||
| 791 | struct device_attribute *attr, char *buf) | ||
| 792 | { | ||
| 793 | unsigned long val; | ||
| 794 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 795 | struct etm_config *config = &drvdata->config; | ||
| 796 | |||
| 797 | val = config->seq_21_event; | ||
| 798 | return sprintf(buf, "%#lx\n", val); | ||
| 799 | } | ||
| 800 | |||
| 801 | static ssize_t seq_21_event_store(struct device *dev, | ||
| 802 | struct device_attribute *attr, | ||
| 803 | const char *buf, size_t size) | ||
| 804 | { | ||
| 805 | int ret; | ||
| 806 | unsigned long val; | ||
| 807 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 808 | struct etm_config *config = &drvdata->config; | ||
| 809 | |||
| 810 | ret = kstrtoul(buf, 16, &val); | ||
| 811 | if (ret) | ||
| 812 | return ret; | ||
| 813 | |||
| 814 | config->seq_21_event = val & ETM_EVENT_MASK; | ||
| 815 | return size; | ||
| 816 | } | ||
| 817 | static DEVICE_ATTR_RW(seq_21_event); | ||
| 818 | |||
| 819 | static ssize_t seq_23_event_show(struct device *dev, | ||
| 820 | struct device_attribute *attr, char *buf) | ||
| 821 | { | ||
| 822 | unsigned long val; | ||
| 823 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 824 | struct etm_config *config = &drvdata->config; | ||
| 825 | |||
| 826 | val = config->seq_23_event; | ||
| 827 | return sprintf(buf, "%#lx\n", val); | ||
| 828 | } | ||
| 829 | |||
| 830 | static ssize_t seq_23_event_store(struct device *dev, | ||
| 831 | struct device_attribute *attr, | ||
| 832 | const char *buf, size_t size) | ||
| 833 | { | ||
| 834 | int ret; | ||
| 835 | unsigned long val; | ||
| 836 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 837 | struct etm_config *config = &drvdata->config; | ||
| 838 | |||
| 839 | ret = kstrtoul(buf, 16, &val); | ||
| 840 | if (ret) | ||
| 841 | return ret; | ||
| 842 | |||
| 843 | config->seq_23_event = val & ETM_EVENT_MASK; | ||
| 844 | return size; | ||
| 845 | } | ||
| 846 | static DEVICE_ATTR_RW(seq_23_event); | ||
| 847 | |||
| 848 | static ssize_t seq_31_event_show(struct device *dev, | ||
| 849 | struct device_attribute *attr, char *buf) | ||
| 850 | { | ||
| 851 | unsigned long val; | ||
| 852 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 853 | struct etm_config *config = &drvdata->config; | ||
| 854 | |||
| 855 | val = config->seq_31_event; | ||
| 856 | return sprintf(buf, "%#lx\n", val); | ||
| 857 | } | ||
| 858 | |||
| 859 | static ssize_t seq_31_event_store(struct device *dev, | ||
| 860 | struct device_attribute *attr, | ||
| 861 | const char *buf, size_t size) | ||
| 862 | { | ||
| 863 | int ret; | ||
| 864 | unsigned long val; | ||
| 865 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 866 | struct etm_config *config = &drvdata->config; | ||
| 867 | |||
| 868 | ret = kstrtoul(buf, 16, &val); | ||
| 869 | if (ret) | ||
| 870 | return ret; | ||
| 871 | |||
| 872 | config->seq_31_event = val & ETM_EVENT_MASK; | ||
| 873 | return size; | ||
| 874 | } | ||
| 875 | static DEVICE_ATTR_RW(seq_31_event); | ||
| 876 | |||
| 877 | static ssize_t seq_32_event_show(struct device *dev, | ||
| 878 | struct device_attribute *attr, char *buf) | ||
| 879 | { | ||
| 880 | unsigned long val; | ||
| 881 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 882 | struct etm_config *config = &drvdata->config; | ||
| 883 | |||
| 884 | val = config->seq_32_event; | ||
| 885 | return sprintf(buf, "%#lx\n", val); | ||
| 886 | } | ||
| 887 | |||
| 888 | static ssize_t seq_32_event_store(struct device *dev, | ||
| 889 | struct device_attribute *attr, | ||
| 890 | const char *buf, size_t size) | ||
| 891 | { | ||
| 892 | int ret; | ||
| 893 | unsigned long val; | ||
| 894 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 895 | struct etm_config *config = &drvdata->config; | ||
| 896 | |||
| 897 | ret = kstrtoul(buf, 16, &val); | ||
| 898 | if (ret) | ||
| 899 | return ret; | ||
| 900 | |||
| 901 | config->seq_32_event = val & ETM_EVENT_MASK; | ||
| 902 | return size; | ||
| 903 | } | ||
| 904 | static DEVICE_ATTR_RW(seq_32_event); | ||
| 905 | |||
| 906 | static ssize_t seq_13_event_show(struct device *dev, | ||
| 907 | struct device_attribute *attr, char *buf) | ||
| 908 | { | ||
| 909 | unsigned long val; | ||
| 910 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 911 | struct etm_config *config = &drvdata->config; | ||
| 912 | |||
| 913 | val = config->seq_13_event; | ||
| 914 | return sprintf(buf, "%#lx\n", val); | ||
| 915 | } | ||
| 916 | |||
| 917 | static ssize_t seq_13_event_store(struct device *dev, | ||
| 918 | struct device_attribute *attr, | ||
| 919 | const char *buf, size_t size) | ||
| 920 | { | ||
| 921 | int ret; | ||
| 922 | unsigned long val; | ||
| 923 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 924 | struct etm_config *config = &drvdata->config; | ||
| 925 | |||
| 926 | ret = kstrtoul(buf, 16, &val); | ||
| 927 | if (ret) | ||
| 928 | return ret; | ||
| 929 | |||
| 930 | config->seq_13_event = val & ETM_EVENT_MASK; | ||
| 931 | return size; | ||
| 932 | } | ||
| 933 | static DEVICE_ATTR_RW(seq_13_event); | ||
| 934 | |||
| 935 | static ssize_t seq_curr_state_show(struct device *dev, | ||
| 936 | struct device_attribute *attr, char *buf) | ||
| 937 | { | ||
| 938 | unsigned long val, flags; | ||
| 939 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 940 | struct etm_config *config = &drvdata->config; | ||
| 941 | |||
| 942 | if (!local_read(&drvdata->mode)) { | ||
| 943 | val = config->seq_curr_state; | ||
| 944 | goto out; | ||
| 945 | } | ||
| 946 | |||
| 947 | pm_runtime_get_sync(drvdata->dev); | ||
| 948 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 949 | |||
| 950 | CS_UNLOCK(drvdata->base); | ||
| 951 | val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); | ||
| 952 | CS_LOCK(drvdata->base); | ||
| 953 | |||
| 954 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 955 | pm_runtime_put(drvdata->dev); | ||
| 956 | out: | ||
| 957 | return sprintf(buf, "%#lx\n", val); | ||
| 958 | } | ||
| 959 | |||
| 960 | static ssize_t seq_curr_state_store(struct device *dev, | ||
| 961 | struct device_attribute *attr, | ||
| 962 | const char *buf, size_t size) | ||
| 963 | { | ||
| 964 | int ret; | ||
| 965 | unsigned long val; | ||
| 966 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 967 | struct etm_config *config = &drvdata->config; | ||
| 968 | |||
| 969 | ret = kstrtoul(buf, 16, &val); | ||
| 970 | if (ret) | ||
| 971 | return ret; | ||
| 972 | |||
| 973 | if (val > ETM_SEQ_STATE_MAX_VAL) | ||
| 974 | return -EINVAL; | ||
| 975 | |||
| 976 | config->seq_curr_state = val; | ||
| 977 | |||
| 978 | return size; | ||
| 979 | } | ||
| 980 | static DEVICE_ATTR_RW(seq_curr_state); | ||
| 981 | |||
| 982 | static ssize_t ctxid_idx_show(struct device *dev, | ||
| 983 | struct device_attribute *attr, char *buf) | ||
| 984 | { | ||
| 985 | unsigned long val; | ||
| 986 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 987 | struct etm_config *config = &drvdata->config; | ||
| 988 | |||
| 989 | val = config->ctxid_idx; | ||
| 990 | return sprintf(buf, "%#lx\n", val); | ||
| 991 | } | ||
| 992 | |||
| 993 | static ssize_t ctxid_idx_store(struct device *dev, | ||
| 994 | struct device_attribute *attr, | ||
| 995 | const char *buf, size_t size) | ||
| 996 | { | ||
| 997 | int ret; | ||
| 998 | unsigned long val; | ||
| 999 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1000 | struct etm_config *config = &drvdata->config; | ||
| 1001 | |||
| 1002 | ret = kstrtoul(buf, 16, &val); | ||
| 1003 | if (ret) | ||
| 1004 | return ret; | ||
| 1005 | |||
| 1006 | if (val >= drvdata->nr_ctxid_cmp) | ||
| 1007 | return -EINVAL; | ||
| 1008 | |||
| 1009 | /* | ||
| 1010 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1011 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1012 | */ | ||
| 1013 | spin_lock(&drvdata->spinlock); | ||
| 1014 | config->ctxid_idx = val; | ||
| 1015 | spin_unlock(&drvdata->spinlock); | ||
| 1016 | |||
| 1017 | return size; | ||
| 1018 | } | ||
| 1019 | static DEVICE_ATTR_RW(ctxid_idx); | ||
| 1020 | |||
| 1021 | static ssize_t ctxid_pid_show(struct device *dev, | ||
| 1022 | struct device_attribute *attr, char *buf) | ||
| 1023 | { | ||
| 1024 | unsigned long val; | ||
| 1025 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1026 | struct etm_config *config = &drvdata->config; | ||
| 1027 | |||
| 1028 | spin_lock(&drvdata->spinlock); | ||
| 1029 | val = config->ctxid_vpid[config->ctxid_idx]; | ||
| 1030 | spin_unlock(&drvdata->spinlock); | ||
| 1031 | |||
| 1032 | return sprintf(buf, "%#lx\n", val); | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | static ssize_t ctxid_pid_store(struct device *dev, | ||
| 1036 | struct device_attribute *attr, | ||
| 1037 | const char *buf, size_t size) | ||
| 1038 | { | ||
| 1039 | int ret; | ||
| 1040 | unsigned long vpid, pid; | ||
| 1041 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1042 | struct etm_config *config = &drvdata->config; | ||
| 1043 | |||
| 1044 | ret = kstrtoul(buf, 16, &vpid); | ||
| 1045 | if (ret) | ||
| 1046 | return ret; | ||
| 1047 | |||
| 1048 | pid = coresight_vpid_to_pid(vpid); | ||
| 1049 | |||
| 1050 | spin_lock(&drvdata->spinlock); | ||
| 1051 | config->ctxid_pid[config->ctxid_idx] = pid; | ||
| 1052 | config->ctxid_vpid[config->ctxid_idx] = vpid; | ||
| 1053 | spin_unlock(&drvdata->spinlock); | ||
| 1054 | |||
| 1055 | return size; | ||
| 1056 | } | ||
| 1057 | static DEVICE_ATTR_RW(ctxid_pid); | ||
| 1058 | |||
| 1059 | static ssize_t ctxid_mask_show(struct device *dev, | ||
| 1060 | struct device_attribute *attr, char *buf) | ||
| 1061 | { | ||
| 1062 | unsigned long val; | ||
| 1063 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1064 | struct etm_config *config = &drvdata->config; | ||
| 1065 | |||
| 1066 | val = config->ctxid_mask; | ||
| 1067 | return sprintf(buf, "%#lx\n", val); | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | static ssize_t ctxid_mask_store(struct device *dev, | ||
| 1071 | struct device_attribute *attr, | ||
| 1072 | const char *buf, size_t size) | ||
| 1073 | { | ||
| 1074 | int ret; | ||
| 1075 | unsigned long val; | ||
| 1076 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1077 | struct etm_config *config = &drvdata->config; | ||
| 1078 | |||
| 1079 | ret = kstrtoul(buf, 16, &val); | ||
| 1080 | if (ret) | ||
| 1081 | return ret; | ||
| 1082 | |||
| 1083 | config->ctxid_mask = val; | ||
| 1084 | return size; | ||
| 1085 | } | ||
| 1086 | static DEVICE_ATTR_RW(ctxid_mask); | ||
| 1087 | |||
| 1088 | static ssize_t sync_freq_show(struct device *dev, | ||
| 1089 | struct device_attribute *attr, char *buf) | ||
| 1090 | { | ||
| 1091 | unsigned long val; | ||
| 1092 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1093 | struct etm_config *config = &drvdata->config; | ||
| 1094 | |||
| 1095 | val = config->sync_freq; | ||
| 1096 | return sprintf(buf, "%#lx\n", val); | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | static ssize_t sync_freq_store(struct device *dev, | ||
| 1100 | struct device_attribute *attr, | ||
| 1101 | const char *buf, size_t size) | ||
| 1102 | { | ||
| 1103 | int ret; | ||
| 1104 | unsigned long val; | ||
| 1105 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1106 | struct etm_config *config = &drvdata->config; | ||
| 1107 | |||
| 1108 | ret = kstrtoul(buf, 16, &val); | ||
| 1109 | if (ret) | ||
| 1110 | return ret; | ||
| 1111 | |||
| 1112 | config->sync_freq = val & ETM_SYNC_MASK; | ||
| 1113 | return size; | ||
| 1114 | } | ||
| 1115 | static DEVICE_ATTR_RW(sync_freq); | ||
| 1116 | |||
| 1117 | static ssize_t timestamp_event_show(struct device *dev, | ||
| 1118 | struct device_attribute *attr, char *buf) | ||
| 1119 | { | ||
| 1120 | unsigned long val; | ||
| 1121 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1122 | struct etm_config *config = &drvdata->config; | ||
| 1123 | |||
| 1124 | val = config->timestamp_event; | ||
| 1125 | return sprintf(buf, "%#lx\n", val); | ||
| 1126 | } | ||
| 1127 | |||
| 1128 | static ssize_t timestamp_event_store(struct device *dev, | ||
| 1129 | struct device_attribute *attr, | ||
| 1130 | const char *buf, size_t size) | ||
| 1131 | { | ||
| 1132 | int ret; | ||
| 1133 | unsigned long val; | ||
| 1134 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1135 | struct etm_config *config = &drvdata->config; | ||
| 1136 | |||
| 1137 | ret = kstrtoul(buf, 16, &val); | ||
| 1138 | if (ret) | ||
| 1139 | return ret; | ||
| 1140 | |||
| 1141 | config->timestamp_event = val & ETM_EVENT_MASK; | ||
| 1142 | return size; | ||
| 1143 | } | ||
| 1144 | static DEVICE_ATTR_RW(timestamp_event); | ||
| 1145 | |||
| 1146 | static ssize_t cpu_show(struct device *dev, | ||
| 1147 | struct device_attribute *attr, char *buf) | ||
| 1148 | { | ||
| 1149 | int val; | ||
| 1150 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1151 | |||
| 1152 | val = drvdata->cpu; | ||
| 1153 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | ||
| 1154 | |||
| 1155 | } | ||
| 1156 | static DEVICE_ATTR_RO(cpu); | ||
| 1157 | |||
| 1158 | static ssize_t traceid_show(struct device *dev, | ||
| 1159 | struct device_attribute *attr, char *buf) | ||
| 1160 | { | ||
| 1161 | unsigned long val; | ||
| 1162 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1163 | |||
| 1164 | val = etm_get_trace_id(drvdata); | ||
| 1165 | |||
| 1166 | return sprintf(buf, "%#lx\n", val); | ||
| 1167 | } | ||
| 1168 | |||
| 1169 | static ssize_t traceid_store(struct device *dev, | ||
| 1170 | struct device_attribute *attr, | ||
| 1171 | const char *buf, size_t size) | ||
| 1172 | { | ||
| 1173 | int ret; | ||
| 1174 | unsigned long val; | ||
| 1175 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1176 | |||
| 1177 | ret = kstrtoul(buf, 16, &val); | ||
| 1178 | if (ret) | ||
| 1179 | return ret; | ||
| 1180 | |||
| 1181 | drvdata->traceid = val & ETM_TRACEID_MASK; | ||
| 1182 | return size; | ||
| 1183 | } | ||
| 1184 | static DEVICE_ATTR_RW(traceid); | ||
| 1185 | |||
| 1186 | static struct attribute *coresight_etm_attrs[] = { | ||
| 1187 | &dev_attr_nr_addr_cmp.attr, | ||
| 1188 | &dev_attr_nr_cntr.attr, | ||
| 1189 | &dev_attr_nr_ctxid_cmp.attr, | ||
| 1190 | &dev_attr_etmsr.attr, | ||
| 1191 | &dev_attr_reset.attr, | ||
| 1192 | &dev_attr_mode.attr, | ||
| 1193 | &dev_attr_trigger_event.attr, | ||
| 1194 | &dev_attr_enable_event.attr, | ||
| 1195 | &dev_attr_fifofull_level.attr, | ||
| 1196 | &dev_attr_addr_idx.attr, | ||
| 1197 | &dev_attr_addr_single.attr, | ||
| 1198 | &dev_attr_addr_range.attr, | ||
| 1199 | &dev_attr_addr_start.attr, | ||
| 1200 | &dev_attr_addr_stop.attr, | ||
| 1201 | &dev_attr_addr_acctype.attr, | ||
| 1202 | &dev_attr_cntr_idx.attr, | ||
| 1203 | &dev_attr_cntr_rld_val.attr, | ||
| 1204 | &dev_attr_cntr_event.attr, | ||
| 1205 | &dev_attr_cntr_rld_event.attr, | ||
| 1206 | &dev_attr_cntr_val.attr, | ||
| 1207 | &dev_attr_seq_12_event.attr, | ||
| 1208 | &dev_attr_seq_21_event.attr, | ||
| 1209 | &dev_attr_seq_23_event.attr, | ||
| 1210 | &dev_attr_seq_31_event.attr, | ||
| 1211 | &dev_attr_seq_32_event.attr, | ||
| 1212 | &dev_attr_seq_13_event.attr, | ||
| 1213 | &dev_attr_seq_curr_state.attr, | ||
| 1214 | &dev_attr_ctxid_idx.attr, | ||
| 1215 | &dev_attr_ctxid_pid.attr, | ||
| 1216 | &dev_attr_ctxid_mask.attr, | ||
| 1217 | &dev_attr_sync_freq.attr, | ||
| 1218 | &dev_attr_timestamp_event.attr, | ||
| 1219 | &dev_attr_traceid.attr, | ||
| 1220 | &dev_attr_cpu.attr, | ||
| 1221 | NULL, | ||
| 1222 | }; | ||
| 1223 | |||
| 1224 | #define coresight_simple_func(name, offset) \ | ||
| 1225 | static ssize_t name##_show(struct device *_dev, \ | ||
| 1226 | struct device_attribute *attr, char *buf) \ | ||
| 1227 | { \ | ||
| 1228 | struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ | ||
| 1229 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | ||
| 1230 | readl_relaxed(drvdata->base + offset)); \ | ||
| 1231 | } \ | ||
| 1232 | DEVICE_ATTR_RO(name) | ||
| 1233 | |||
| 1234 | coresight_simple_func(etmccr, ETMCCR); | ||
| 1235 | coresight_simple_func(etmccer, ETMCCER); | ||
| 1236 | coresight_simple_func(etmscr, ETMSCR); | ||
| 1237 | coresight_simple_func(etmidr, ETMIDR); | ||
| 1238 | coresight_simple_func(etmcr, ETMCR); | ||
| 1239 | coresight_simple_func(etmtraceidr, ETMTRACEIDR); | ||
| 1240 | coresight_simple_func(etmteevr, ETMTEEVR); | ||
| 1241 | coresight_simple_func(etmtssvr, ETMTSSCR); | ||
| 1242 | coresight_simple_func(etmtecr1, ETMTECR1); | ||
| 1243 | coresight_simple_func(etmtecr2, ETMTECR2); | ||
| 1244 | |||
| 1245 | static struct attribute *coresight_etm_mgmt_attrs[] = { | ||
| 1246 | &dev_attr_etmccr.attr, | ||
| 1247 | &dev_attr_etmccer.attr, | ||
| 1248 | &dev_attr_etmscr.attr, | ||
| 1249 | &dev_attr_etmidr.attr, | ||
| 1250 | &dev_attr_etmcr.attr, | ||
| 1251 | &dev_attr_etmtraceidr.attr, | ||
| 1252 | &dev_attr_etmteevr.attr, | ||
| 1253 | &dev_attr_etmtssvr.attr, | ||
| 1254 | &dev_attr_etmtecr1.attr, | ||
| 1255 | &dev_attr_etmtecr2.attr, | ||
| 1256 | NULL, | ||
| 1257 | }; | ||
| 1258 | |||
| 1259 | static const struct attribute_group coresight_etm_group = { | ||
| 1260 | .attrs = coresight_etm_attrs, | ||
| 1261 | }; | ||
| 1262 | |||
| 1263 | static const struct attribute_group coresight_etm_mgmt_group = { | ||
| 1264 | .attrs = coresight_etm_mgmt_attrs, | ||
| 1265 | .name = "mgmt", | ||
| 1266 | }; | ||
| 1267 | |||
| 1268 | const struct attribute_group *coresight_etm_groups[] = { | ||
| 1269 | &coresight_etm_group, | ||
| 1270 | &coresight_etm_mgmt_group, | ||
| 1271 | NULL, | ||
| 1272 | }; | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index d630b7ece735..d83ab82672e4 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
| 2 | * | 2 | * |
| 3 | * Description: CoreSight Program Flow Trace driver | ||
| 4 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
| @@ -11,7 +13,7 @@ | |||
| 11 | */ | 13 | */ |
| 12 | 14 | ||
| 13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | 16 | #include <linux/moduleparam.h> |
| 15 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 16 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 17 | #include <linux/device.h> | 19 | #include <linux/device.h> |
| @@ -27,14 +29,21 @@ | |||
| 27 | #include <linux/cpu.h> | 29 | #include <linux/cpu.h> |
| 28 | #include <linux/of.h> | 30 | #include <linux/of.h> |
| 29 | #include <linux/coresight.h> | 31 | #include <linux/coresight.h> |
| 32 | #include <linux/coresight-pmu.h> | ||
| 30 | #include <linux/amba/bus.h> | 33 | #include <linux/amba/bus.h> |
| 31 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
| 32 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
| 33 | #include <linux/clk.h> | 36 | #include <linux/clk.h> |
| 37 | #include <linux/perf_event.h> | ||
| 34 | #include <asm/sections.h> | 38 | #include <asm/sections.h> |
| 35 | 39 | ||
| 36 | #include "coresight-etm.h" | 40 | #include "coresight-etm.h" |
| 41 | #include "coresight-etm-perf.h" | ||
| 37 | 42 | ||
| 43 | /* | ||
| 44 | * Not really modular but using module_param is the easiest way to | ||
| 45 | * remain consistent with existing use cases for now. | ||
| 46 | */ | ||
| 38 | static int boot_enable; | 47 | static int boot_enable; |
| 39 | module_param_named(boot_enable, boot_enable, int, S_IRUGO); | 48 | module_param_named(boot_enable, boot_enable, int, S_IRUGO); |
| 40 | 49 | ||
| @@ -42,45 +51,16 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); | |||
| 42 | static int etm_count; | 51 | static int etm_count; |
| 43 | static struct etm_drvdata *etmdrvdata[NR_CPUS]; | 52 | static struct etm_drvdata *etmdrvdata[NR_CPUS]; |
| 44 | 53 | ||
| 45 | static inline void etm_writel(struct etm_drvdata *drvdata, | ||
| 46 | u32 val, u32 off) | ||
| 47 | { | ||
| 48 | if (drvdata->use_cp14) { | ||
| 49 | if (etm_writel_cp14(off, val)) { | ||
| 50 | dev_err(drvdata->dev, | ||
| 51 | "invalid CP14 access to ETM reg: %#x", off); | ||
| 52 | } | ||
| 53 | } else { | ||
| 54 | writel_relaxed(val, drvdata->base + off); | ||
| 55 | } | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off) | ||
| 59 | { | ||
| 60 | u32 val; | ||
| 61 | |||
| 62 | if (drvdata->use_cp14) { | ||
| 63 | if (etm_readl_cp14(off, &val)) { | ||
| 64 | dev_err(drvdata->dev, | ||
| 65 | "invalid CP14 access to ETM reg: %#x", off); | ||
| 66 | } | ||
| 67 | } else { | ||
| 68 | val = readl_relaxed(drvdata->base + off); | ||
| 69 | } | ||
| 70 | |||
| 71 | return val; | ||
| 72 | } | ||
| 73 | |||
| 74 | /* | 54 | /* |
| 75 | * Memory mapped writes to clear os lock are not supported on some processors | 55 | * Memory mapped writes to clear os lock are not supported on some processors |
| 76 | * and OS lock must be unlocked before any memory mapped access on such | 56 | * and OS lock must be unlocked before any memory mapped access on such |
| 77 | * processors, otherwise memory mapped reads/writes will be invalid. | 57 | * processors, otherwise memory mapped reads/writes will be invalid. |
| 78 | */ | 58 | */ |
| 79 | static void etm_os_unlock(void *info) | 59 | static void etm_os_unlock(struct etm_drvdata *drvdata) |
| 80 | { | 60 | { |
| 81 | struct etm_drvdata *drvdata = (struct etm_drvdata *)info; | ||
| 82 | /* Writing any value to ETMOSLAR unlocks the trace registers */ | 61 | /* Writing any value to ETMOSLAR unlocks the trace registers */ |
| 83 | etm_writel(drvdata, 0x0, ETMOSLAR); | 62 | etm_writel(drvdata, 0x0, ETMOSLAR); |
| 63 | drvdata->os_unlock = true; | ||
| 84 | isb(); | 64 | isb(); |
| 85 | } | 65 | } |
| 86 | 66 | ||
| @@ -215,36 +195,156 @@ static void etm_clr_prog(struct etm_drvdata *drvdata) | |||
| 215 | } | 195 | } |
| 216 | } | 196 | } |
| 217 | 197 | ||
| 218 | static void etm_set_default(struct etm_drvdata *drvdata) | 198 | void etm_set_default(struct etm_config *config) |
| 219 | { | 199 | { |
| 220 | int i; | 200 | int i; |
| 221 | 201 | ||
| 222 | drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; | 202 | if (WARN_ON_ONCE(!config)) |
| 223 | drvdata->enable_event = ETM_HARD_WIRE_RES_A; | 203 | return; |
| 224 | 204 | ||
| 225 | drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL; | 205 | /* |
| 226 | drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL; | 206 | * Taken verbatim from the TRM: |
| 227 | drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL; | 207 | * |
| 228 | drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL; | 208 | * To trace all memory: |
| 229 | drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL; | 209 | * set bit [24] in register 0x009, the ETMTECR1, to 1 |
| 230 | drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL; | 210 | * set all other bits in register 0x009, the ETMTECR1, to 0 |
| 231 | drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL; | 211 | * set all bits in register 0x007, the ETMTECR2, to 0 |
| 212 | * set register 0x008, the ETMTEEVR, to 0x6F (TRUE). | ||
| 213 | */ | ||
| 214 | config->enable_ctrl1 = BIT(24); | ||
| 215 | config->enable_ctrl2 = 0x0; | ||
| 216 | config->enable_event = ETM_HARD_WIRE_RES_A; | ||
| 232 | 217 | ||
| 233 | for (i = 0; i < drvdata->nr_cntr; i++) { | 218 | config->trigger_event = ETM_DEFAULT_EVENT_VAL; |
| 234 | drvdata->cntr_rld_val[i] = 0x0; | 219 | config->enable_event = ETM_HARD_WIRE_RES_A; |
| 235 | drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; | 220 | |
| 236 | drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; | 221 | config->seq_12_event = ETM_DEFAULT_EVENT_VAL; |
| 237 | drvdata->cntr_val[i] = 0x0; | 222 | config->seq_21_event = ETM_DEFAULT_EVENT_VAL; |
| 223 | config->seq_23_event = ETM_DEFAULT_EVENT_VAL; | ||
| 224 | config->seq_31_event = ETM_DEFAULT_EVENT_VAL; | ||
| 225 | config->seq_32_event = ETM_DEFAULT_EVENT_VAL; | ||
| 226 | config->seq_13_event = ETM_DEFAULT_EVENT_VAL; | ||
| 227 | config->timestamp_event = ETM_DEFAULT_EVENT_VAL; | ||
| 228 | |||
| 229 | for (i = 0; i < ETM_MAX_CNTR; i++) { | ||
| 230 | config->cntr_rld_val[i] = 0x0; | ||
| 231 | config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; | ||
| 232 | config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; | ||
| 233 | config->cntr_val[i] = 0x0; | ||
| 238 | } | 234 | } |
| 239 | 235 | ||
| 240 | drvdata->seq_curr_state = 0x0; | 236 | config->seq_curr_state = 0x0; |
| 241 | drvdata->ctxid_idx = 0x0; | 237 | config->ctxid_idx = 0x0; |
| 242 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) { | 238 | for (i = 0; i < ETM_MAX_CTXID_CMP; i++) { |
| 243 | drvdata->ctxid_pid[i] = 0x0; | 239 | config->ctxid_pid[i] = 0x0; |
| 244 | drvdata->ctxid_vpid[i] = 0x0; | 240 | config->ctxid_vpid[i] = 0x0; |
| 245 | } | 241 | } |
| 246 | 242 | ||
| 247 | drvdata->ctxid_mask = 0x0; | 243 | config->ctxid_mask = 0x0; |
| 244 | } | ||
| 245 | |||
| 246 | void etm_config_trace_mode(struct etm_config *config) | ||
| 247 | { | ||
| 248 | u32 flags, mode; | ||
| 249 | |||
| 250 | mode = config->mode; | ||
| 251 | |||
| 252 | mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); | ||
| 253 | |||
| 254 | /* excluding kernel AND user space doesn't make sense */ | ||
| 255 | if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) | ||
| 256 | return; | ||
| 257 | |||
| 258 | /* nothing to do if neither flags are set */ | ||
| 259 | if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) | ||
| 260 | return; | ||
| 261 | |||
| 262 | flags = (1 << 0 | /* instruction execute */ | ||
| 263 | 3 << 3 | /* ARM instruction */ | ||
| 264 | 0 << 5 | /* No data value comparison */ | ||
| 265 | 0 << 7 | /* No exact mach */ | ||
| 266 | 0 << 8); /* Ignore context ID */ | ||
| 267 | |||
| 268 | /* No need to worry about single address comparators. */ | ||
| 269 | config->enable_ctrl2 = 0x0; | ||
| 270 | |||
| 271 | /* Bit 0 is address range comparator 1 */ | ||
| 272 | config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; | ||
| 273 | |||
| 274 | /* | ||
| 275 | * On ETMv3.5: | ||
| 276 | * ETMACTRn[13,11] == Non-secure state comparison control | ||
| 277 | * ETMACTRn[12,10] == Secure state comparison control | ||
| 278 | * | ||
| 279 | * b00 == Match in all modes in this state | ||
| 280 | * b01 == Do not match in any more in this state | ||
| 281 | * b10 == Match in all modes excepts user mode in this state | ||
| 282 | * b11 == Match only in user mode in this state | ||
| 283 | */ | ||
| 284 | |||
| 285 | /* Tracing in secure mode is not supported at this time */ | ||
| 286 | flags |= (0 << 12 | 1 << 10); | ||
| 287 | |||
| 288 | if (mode & ETM_MODE_EXCL_USER) { | ||
| 289 | /* exclude user, match all modes except user mode */ | ||
| 290 | flags |= (1 << 13 | 0 << 11); | ||
| 291 | } else { | ||
| 292 | /* exclude kernel, match only in user mode */ | ||
| 293 | flags |= (1 << 13 | 1 << 11); | ||
| 294 | } | ||
| 295 | |||
| 296 | /* | ||
| 297 | * The ETMEEVR register is already set to "hard wire A". As such | ||
| 298 | * all there is to do is setup an address comparator that spans | ||
| 299 | * the entire address range and configure the state and mode bits. | ||
| 300 | */ | ||
| 301 | config->addr_val[0] = (u32) 0x0; | ||
| 302 | config->addr_val[1] = (u32) ~0x0; | ||
| 303 | config->addr_acctype[0] = flags; | ||
| 304 | config->addr_acctype[1] = flags; | ||
| 305 | config->addr_type[0] = ETM_ADDR_TYPE_RANGE; | ||
| 306 | config->addr_type[1] = ETM_ADDR_TYPE_RANGE; | ||
| 307 | } | ||
| 308 | |||
| 309 | #define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN) | ||
| 310 | |||
| 311 | static int etm_parse_event_config(struct etm_drvdata *drvdata, | ||
| 312 | struct perf_event_attr *attr) | ||
| 313 | { | ||
| 314 | struct etm_config *config = &drvdata->config; | ||
| 315 | |||
| 316 | if (!attr) | ||
| 317 | return -EINVAL; | ||
| 318 | |||
| 319 | /* Clear configuration from previous run */ | ||
| 320 | memset(config, 0, sizeof(struct etm_config)); | ||
| 321 | |||
| 322 | if (attr->exclude_kernel) | ||
| 323 | config->mode = ETM_MODE_EXCL_KERN; | ||
| 324 | |||
| 325 | if (attr->exclude_user) | ||
| 326 | config->mode = ETM_MODE_EXCL_USER; | ||
| 327 | |||
| 328 | /* Always start from the default config */ | ||
| 329 | etm_set_default(config); | ||
| 330 | |||
| 331 | /* | ||
| 332 | * By default the tracers are configured to trace the whole address | ||
| 333 | * range. Narrow the field only if requested by user space. | ||
| 334 | */ | ||
| 335 | if (config->mode) | ||
| 336 | etm_config_trace_mode(config); | ||
| 337 | |||
| 338 | /* | ||
| 339 | * At this time only cycle accurate and timestamp options are | ||
| 340 | * available. | ||
| 341 | */ | ||
| 342 | if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) | ||
| 343 | return -EINVAL; | ||
| 344 | |||
| 345 | config->ctrl = attr->config; | ||
| 346 | |||
| 347 | return 0; | ||
| 248 | } | 348 | } |
| 249 | 349 | ||
| 250 | static void etm_enable_hw(void *info) | 350 | static void etm_enable_hw(void *info) |
| @@ -252,6 +352,7 @@ static void etm_enable_hw(void *info) | |||
| 252 | int i; | 352 | int i; |
| 253 | u32 etmcr; | 353 | u32 etmcr; |
| 254 | struct etm_drvdata *drvdata = info; | 354 | struct etm_drvdata *drvdata = info; |
| 355 | struct etm_config *config = &drvdata->config; | ||
| 255 | 356 | ||
| 256 | CS_UNLOCK(drvdata->base); | 357 | CS_UNLOCK(drvdata->base); |
| 257 | 358 | ||
| @@ -265,65 +366,74 @@ static void etm_enable_hw(void *info) | |||
| 265 | etm_set_prog(drvdata); | 366 | etm_set_prog(drvdata); |
| 266 | 367 | ||
| 267 | etmcr = etm_readl(drvdata, ETMCR); | 368 | etmcr = etm_readl(drvdata, ETMCR); |
| 268 | etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG); | 369 | /* Clear setting from a previous run if need be */ |
| 370 | etmcr &= ~ETM3X_SUPPORTED_OPTIONS; | ||
| 269 | etmcr |= drvdata->port_size; | 371 | etmcr |= drvdata->port_size; |
| 270 | etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR); | 372 | etmcr |= ETMCR_ETM_EN; |
| 271 | etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER); | 373 | etm_writel(drvdata, config->ctrl | etmcr, ETMCR); |
| 272 | etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR); | 374 | etm_writel(drvdata, config->trigger_event, ETMTRIGGER); |
| 273 | etm_writel(drvdata, drvdata->enable_event, ETMTEEVR); | 375 | etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR); |
| 274 | etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1); | 376 | etm_writel(drvdata, config->enable_event, ETMTEEVR); |
| 275 | etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR); | 377 | etm_writel(drvdata, config->enable_ctrl1, ETMTECR1); |
| 378 | etm_writel(drvdata, config->fifofull_level, ETMFFLR); | ||
| 276 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | 379 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { |
| 277 | etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i)); | 380 | etm_writel(drvdata, config->addr_val[i], ETMACVRn(i)); |
| 278 | etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i)); | 381 | etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i)); |
| 279 | } | 382 | } |
| 280 | for (i = 0; i < drvdata->nr_cntr; i++) { | 383 | for (i = 0; i < drvdata->nr_cntr; i++) { |
| 281 | etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i)); | 384 | etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i)); |
| 282 | etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i)); | 385 | etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i)); |
| 283 | etm_writel(drvdata, drvdata->cntr_rld_event[i], | 386 | etm_writel(drvdata, config->cntr_rld_event[i], |
| 284 | ETMCNTRLDEVRn(i)); | 387 | ETMCNTRLDEVRn(i)); |
| 285 | etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i)); | 388 | etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i)); |
| 286 | } | 389 | } |
| 287 | etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR); | 390 | etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR); |
| 288 | etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR); | 391 | etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR); |
| 289 | etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR); | 392 | etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR); |
| 290 | etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR); | 393 | etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR); |
| 291 | etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR); | 394 | etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR); |
| 292 | etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR); | 395 | etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR); |
| 293 | etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR); | 396 | etm_writel(drvdata, config->seq_curr_state, ETMSQR); |
| 294 | for (i = 0; i < drvdata->nr_ext_out; i++) | 397 | for (i = 0; i < drvdata->nr_ext_out; i++) |
| 295 | etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); | 398 | etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); |
| 296 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) | 399 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) |
| 297 | etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i)); | 400 | etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i)); |
| 298 | etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR); | 401 | etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR); |
| 299 | etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR); | 402 | etm_writel(drvdata, config->sync_freq, ETMSYNCFR); |
| 300 | /* No external input selected */ | 403 | /* No external input selected */ |
| 301 | etm_writel(drvdata, 0x0, ETMEXTINSELR); | 404 | etm_writel(drvdata, 0x0, ETMEXTINSELR); |
| 302 | etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR); | 405 | etm_writel(drvdata, config->timestamp_event, ETMTSEVR); |
| 303 | /* No auxiliary control selected */ | 406 | /* No auxiliary control selected */ |
| 304 | etm_writel(drvdata, 0x0, ETMAUXCR); | 407 | etm_writel(drvdata, 0x0, ETMAUXCR); |
| 305 | etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); | 408 | etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); |
| 306 | /* No VMID comparator value selected */ | 409 | /* No VMID comparator value selected */ |
| 307 | etm_writel(drvdata, 0x0, ETMVMIDCVR); | 410 | etm_writel(drvdata, 0x0, ETMVMIDCVR); |
| 308 | 411 | ||
| 309 | /* Ensures trace output is enabled from this ETM */ | ||
| 310 | etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR); | ||
| 311 | |||
| 312 | etm_clr_prog(drvdata); | 412 | etm_clr_prog(drvdata); |
| 313 | CS_LOCK(drvdata->base); | 413 | CS_LOCK(drvdata->base); |
| 314 | 414 | ||
| 315 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); | 415 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); |
| 316 | } | 416 | } |
| 317 | 417 | ||
| 318 | static int etm_trace_id(struct coresight_device *csdev) | 418 | static int etm_cpu_id(struct coresight_device *csdev) |
| 319 | { | 419 | { |
| 320 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 420 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 421 | |||
| 422 | return drvdata->cpu; | ||
| 423 | } | ||
| 424 | |||
| 425 | int etm_get_trace_id(struct etm_drvdata *drvdata) | ||
| 426 | { | ||
| 321 | unsigned long flags; | 427 | unsigned long flags; |
| 322 | int trace_id = -1; | 428 | int trace_id = -1; |
| 323 | 429 | ||
| 324 | if (!drvdata->enable) | 430 | if (!drvdata) |
| 431 | goto out; | ||
| 432 | |||
| 433 | if (!local_read(&drvdata->mode)) | ||
| 325 | return drvdata->traceid; | 434 | return drvdata->traceid; |
| 326 | pm_runtime_get_sync(csdev->dev.parent); | 435 | |
| 436 | pm_runtime_get_sync(drvdata->dev); | ||
| 327 | 437 | ||
| 328 | spin_lock_irqsave(&drvdata->spinlock, flags); | 438 | spin_lock_irqsave(&drvdata->spinlock, flags); |
| 329 | 439 | ||
| @@ -332,17 +442,41 @@ static int etm_trace_id(struct coresight_device *csdev) | |||
| 332 | CS_LOCK(drvdata->base); | 442 | CS_LOCK(drvdata->base); |
| 333 | 443 | ||
| 334 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 444 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
| 335 | pm_runtime_put(csdev->dev.parent); | 445 | pm_runtime_put(drvdata->dev); |
| 336 | 446 | ||
| 447 | out: | ||
| 337 | return trace_id; | 448 | return trace_id; |
| 449 | |||
| 450 | } | ||
| 451 | |||
| 452 | static int etm_trace_id(struct coresight_device *csdev) | ||
| 453 | { | ||
| 454 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 455 | |||
| 456 | return etm_get_trace_id(drvdata); | ||
| 338 | } | 457 | } |
| 339 | 458 | ||
| 340 | static int etm_enable(struct coresight_device *csdev) | 459 | static int etm_enable_perf(struct coresight_device *csdev, |
| 460 | struct perf_event_attr *attr) | ||
| 461 | { | ||
| 462 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 463 | |||
| 464 | if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) | ||
| 465 | return -EINVAL; | ||
| 466 | |||
| 467 | /* Configure the tracer based on the session's specifics */ | ||
| 468 | etm_parse_event_config(drvdata, attr); | ||
| 469 | /* And enable it */ | ||
| 470 | etm_enable_hw(drvdata); | ||
| 471 | |||
| 472 | return 0; | ||
| 473 | } | ||
| 474 | |||
| 475 | static int etm_enable_sysfs(struct coresight_device *csdev) | ||
| 341 | { | 476 | { |
| 342 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 477 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 343 | int ret; | 478 | int ret; |
| 344 | 479 | ||
| 345 | pm_runtime_get_sync(csdev->dev.parent); | ||
| 346 | spin_lock(&drvdata->spinlock); | 480 | spin_lock(&drvdata->spinlock); |
| 347 | 481 | ||
| 348 | /* | 482 | /* |
| @@ -357,16 +491,45 @@ static int etm_enable(struct coresight_device *csdev) | |||
| 357 | goto err; | 491 | goto err; |
| 358 | } | 492 | } |
| 359 | 493 | ||
| 360 | drvdata->enable = true; | ||
| 361 | drvdata->sticky_enable = true; | 494 | drvdata->sticky_enable = true; |
| 362 | |||
| 363 | spin_unlock(&drvdata->spinlock); | 495 | spin_unlock(&drvdata->spinlock); |
| 364 | 496 | ||
| 365 | dev_info(drvdata->dev, "ETM tracing enabled\n"); | 497 | dev_info(drvdata->dev, "ETM tracing enabled\n"); |
| 366 | return 0; | 498 | return 0; |
| 499 | |||
| 367 | err: | 500 | err: |
| 368 | spin_unlock(&drvdata->spinlock); | 501 | spin_unlock(&drvdata->spinlock); |
| 369 | pm_runtime_put(csdev->dev.parent); | 502 | return ret; |
| 503 | } | ||
| 504 | |||
| 505 | static int etm_enable(struct coresight_device *csdev, | ||
| 506 | struct perf_event_attr *attr, u32 mode) | ||
| 507 | { | ||
| 508 | int ret; | ||
| 509 | u32 val; | ||
| 510 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 511 | |||
| 512 | val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); | ||
| 513 | |||
| 514 | /* Someone is already using the tracer */ | ||
| 515 | if (val) | ||
| 516 | return -EBUSY; | ||
| 517 | |||
| 518 | switch (mode) { | ||
| 519 | case CS_MODE_SYSFS: | ||
| 520 | ret = etm_enable_sysfs(csdev); | ||
| 521 | break; | ||
| 522 | case CS_MODE_PERF: | ||
| 523 | ret = etm_enable_perf(csdev, attr); | ||
| 524 | break; | ||
| 525 | default: | ||
| 526 | ret = -EINVAL; | ||
| 527 | } | ||
| 528 | |||
| 529 | /* The tracer didn't start */ | ||
| 530 | if (ret) | ||
| 531 | local_set(&drvdata->mode, CS_MODE_DISABLED); | ||
| 532 | |||
| 370 | return ret; | 533 | return ret; |
| 371 | } | 534 | } |
| 372 | 535 | ||
| @@ -374,18 +537,16 @@ static void etm_disable_hw(void *info) | |||
| 374 | { | 537 | { |
| 375 | int i; | 538 | int i; |
| 376 | struct etm_drvdata *drvdata = info; | 539 | struct etm_drvdata *drvdata = info; |
| 540 | struct etm_config *config = &drvdata->config; | ||
| 377 | 541 | ||
| 378 | CS_UNLOCK(drvdata->base); | 542 | CS_UNLOCK(drvdata->base); |
| 379 | etm_set_prog(drvdata); | 543 | etm_set_prog(drvdata); |
| 380 | 544 | ||
| 381 | /* Program trace enable to low by using always false event */ | ||
| 382 | etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR); | ||
| 383 | |||
| 384 | /* Read back sequencer and counters for post trace analysis */ | 545 | /* Read back sequencer and counters for post trace analysis */ |
| 385 | drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); | 546 | config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); |
| 386 | 547 | ||
| 387 | for (i = 0; i < drvdata->nr_cntr; i++) | 548 | for (i = 0; i < drvdata->nr_cntr; i++) |
| 388 | drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); | 549 | config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); |
| 389 | 550 | ||
| 390 | etm_set_pwrdwn(drvdata); | 551 | etm_set_pwrdwn(drvdata); |
| 391 | CS_LOCK(drvdata->base); | 552 | CS_LOCK(drvdata->base); |
| @@ -393,7 +554,28 @@ static void etm_disable_hw(void *info) | |||
| 393 | dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); | 554 | dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); |
| 394 | } | 555 | } |
| 395 | 556 | ||
| 396 | static void etm_disable(struct coresight_device *csdev) | 557 | static void etm_disable_perf(struct coresight_device *csdev) |
| 558 | { | ||
| 559 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 560 | |||
| 561 | if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) | ||
| 562 | return; | ||
| 563 | |||
| 564 | CS_UNLOCK(drvdata->base); | ||
| 565 | |||
| 566 | /* Setting the prog bit disables tracing immediately */ | ||
| 567 | etm_set_prog(drvdata); | ||
| 568 | |||
| 569 | /* | ||
| 570 | * There is no way to know when the tracer will be used again so | ||
| 571 | * power down the tracer. | ||
| 572 | */ | ||
| 573 | etm_set_pwrdwn(drvdata); | ||
| 574 | |||
| 575 | CS_LOCK(drvdata->base); | ||
| 576 | } | ||
| 577 | |||
| 578 | static void etm_disable_sysfs(struct coresight_device *csdev) | ||
| 397 | { | 579 | { |
| 398 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 580 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 399 | 581 | ||
| @@ -411,1235 +593,52 @@ static void etm_disable(struct coresight_device *csdev) | |||
| 411 | * ensures that register writes occur when cpu is powered. | 593 | * ensures that register writes occur when cpu is powered. |
| 412 | */ | 594 | */ |
| 413 | smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); | 595 | smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); |
| 414 | drvdata->enable = false; | ||
| 415 | 596 | ||
| 416 | spin_unlock(&drvdata->spinlock); | 597 | spin_unlock(&drvdata->spinlock); |
| 417 | put_online_cpus(); | 598 | put_online_cpus(); |
| 418 | pm_runtime_put(csdev->dev.parent); | ||
| 419 | 599 | ||
| 420 | dev_info(drvdata->dev, "ETM tracing disabled\n"); | 600 | dev_info(drvdata->dev, "ETM tracing disabled\n"); |
| 421 | } | 601 | } |
| 422 | 602 | ||
| 423 | static const struct coresight_ops_source etm_source_ops = { | 603 | static void etm_disable(struct coresight_device *csdev) |
| 424 | .trace_id = etm_trace_id, | ||
| 425 | .enable = etm_enable, | ||
| 426 | .disable = etm_disable, | ||
| 427 | }; | ||
| 428 | |||
| 429 | static const struct coresight_ops etm_cs_ops = { | ||
| 430 | .source_ops = &etm_source_ops, | ||
| 431 | }; | ||
| 432 | |||
| 433 | static ssize_t nr_addr_cmp_show(struct device *dev, | ||
| 434 | struct device_attribute *attr, char *buf) | ||
| 435 | { | ||
| 436 | unsigned long val; | ||
| 437 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 438 | |||
| 439 | val = drvdata->nr_addr_cmp; | ||
| 440 | return sprintf(buf, "%#lx\n", val); | ||
| 441 | } | ||
| 442 | static DEVICE_ATTR_RO(nr_addr_cmp); | ||
| 443 | |||
| 444 | static ssize_t nr_cntr_show(struct device *dev, | ||
| 445 | struct device_attribute *attr, char *buf) | ||
| 446 | { unsigned long val; | ||
| 447 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 448 | |||
| 449 | val = drvdata->nr_cntr; | ||
| 450 | return sprintf(buf, "%#lx\n", val); | ||
| 451 | } | ||
| 452 | static DEVICE_ATTR_RO(nr_cntr); | ||
| 453 | |||
| 454 | static ssize_t nr_ctxid_cmp_show(struct device *dev, | ||
| 455 | struct device_attribute *attr, char *buf) | ||
| 456 | { | ||
| 457 | unsigned long val; | ||
| 458 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 459 | |||
| 460 | val = drvdata->nr_ctxid_cmp; | ||
| 461 | return sprintf(buf, "%#lx\n", val); | ||
| 462 | } | ||
| 463 | static DEVICE_ATTR_RO(nr_ctxid_cmp); | ||
| 464 | |||
| 465 | static ssize_t etmsr_show(struct device *dev, | ||
| 466 | struct device_attribute *attr, char *buf) | ||
| 467 | { | ||
| 468 | unsigned long flags, val; | ||
| 469 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 470 | |||
| 471 | pm_runtime_get_sync(drvdata->dev); | ||
| 472 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 473 | CS_UNLOCK(drvdata->base); | ||
| 474 | |||
| 475 | val = etm_readl(drvdata, ETMSR); | ||
| 476 | |||
| 477 | CS_LOCK(drvdata->base); | ||
| 478 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 479 | pm_runtime_put(drvdata->dev); | ||
| 480 | |||
| 481 | return sprintf(buf, "%#lx\n", val); | ||
| 482 | } | ||
| 483 | static DEVICE_ATTR_RO(etmsr); | ||
| 484 | |||
| 485 | static ssize_t reset_store(struct device *dev, | ||
| 486 | struct device_attribute *attr, | ||
| 487 | const char *buf, size_t size) | ||
| 488 | { | ||
| 489 | int i, ret; | ||
| 490 | unsigned long val; | ||
| 491 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 492 | |||
| 493 | ret = kstrtoul(buf, 16, &val); | ||
| 494 | if (ret) | ||
| 495 | return ret; | ||
| 496 | |||
| 497 | if (val) { | ||
| 498 | spin_lock(&drvdata->spinlock); | ||
| 499 | drvdata->mode = ETM_MODE_EXCLUDE; | ||
| 500 | drvdata->ctrl = 0x0; | ||
| 501 | drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; | ||
| 502 | drvdata->startstop_ctrl = 0x0; | ||
| 503 | drvdata->addr_idx = 0x0; | ||
| 504 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | ||
| 505 | drvdata->addr_val[i] = 0x0; | ||
| 506 | drvdata->addr_acctype[i] = 0x0; | ||
| 507 | drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; | ||
| 508 | } | ||
| 509 | drvdata->cntr_idx = 0x0; | ||
| 510 | |||
| 511 | etm_set_default(drvdata); | ||
| 512 | spin_unlock(&drvdata->spinlock); | ||
| 513 | } | ||
| 514 | |||
| 515 | return size; | ||
| 516 | } | ||
| 517 | static DEVICE_ATTR_WO(reset); | ||
| 518 | |||
| 519 | static ssize_t mode_show(struct device *dev, | ||
| 520 | struct device_attribute *attr, char *buf) | ||
| 521 | { | ||
| 522 | unsigned long val; | ||
| 523 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 524 | |||
| 525 | val = drvdata->mode; | ||
| 526 | return sprintf(buf, "%#lx\n", val); | ||
| 527 | } | ||
| 528 | |||
| 529 | static ssize_t mode_store(struct device *dev, | ||
| 530 | struct device_attribute *attr, | ||
| 531 | const char *buf, size_t size) | ||
| 532 | { | ||
| 533 | int ret; | ||
| 534 | unsigned long val; | ||
| 535 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 536 | |||
| 537 | ret = kstrtoul(buf, 16, &val); | ||
| 538 | if (ret) | ||
| 539 | return ret; | ||
| 540 | |||
| 541 | spin_lock(&drvdata->spinlock); | ||
| 542 | drvdata->mode = val & ETM_MODE_ALL; | ||
| 543 | |||
| 544 | if (drvdata->mode & ETM_MODE_EXCLUDE) | ||
| 545 | drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC; | ||
| 546 | else | ||
| 547 | drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC; | ||
| 548 | |||
| 549 | if (drvdata->mode & ETM_MODE_CYCACC) | ||
| 550 | drvdata->ctrl |= ETMCR_CYC_ACC; | ||
| 551 | else | ||
| 552 | drvdata->ctrl &= ~ETMCR_CYC_ACC; | ||
| 553 | |||
| 554 | if (drvdata->mode & ETM_MODE_STALL) { | ||
| 555 | if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { | ||
| 556 | dev_warn(drvdata->dev, "stall mode not supported\n"); | ||
| 557 | ret = -EINVAL; | ||
| 558 | goto err_unlock; | ||
| 559 | } | ||
| 560 | drvdata->ctrl |= ETMCR_STALL_MODE; | ||
| 561 | } else | ||
| 562 | drvdata->ctrl &= ~ETMCR_STALL_MODE; | ||
| 563 | |||
| 564 | if (drvdata->mode & ETM_MODE_TIMESTAMP) { | ||
| 565 | if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { | ||
| 566 | dev_warn(drvdata->dev, "timestamp not supported\n"); | ||
| 567 | ret = -EINVAL; | ||
| 568 | goto err_unlock; | ||
| 569 | } | ||
| 570 | drvdata->ctrl |= ETMCR_TIMESTAMP_EN; | ||
| 571 | } else | ||
| 572 | drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN; | ||
| 573 | |||
| 574 | if (drvdata->mode & ETM_MODE_CTXID) | ||
| 575 | drvdata->ctrl |= ETMCR_CTXID_SIZE; | ||
| 576 | else | ||
| 577 | drvdata->ctrl &= ~ETMCR_CTXID_SIZE; | ||
| 578 | spin_unlock(&drvdata->spinlock); | ||
| 579 | |||
| 580 | return size; | ||
| 581 | |||
| 582 | err_unlock: | ||
| 583 | spin_unlock(&drvdata->spinlock); | ||
| 584 | return ret; | ||
| 585 | } | ||
| 586 | static DEVICE_ATTR_RW(mode); | ||
| 587 | |||
| 588 | static ssize_t trigger_event_show(struct device *dev, | ||
| 589 | struct device_attribute *attr, char *buf) | ||
| 590 | { | ||
| 591 | unsigned long val; | ||
| 592 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 593 | |||
| 594 | val = drvdata->trigger_event; | ||
| 595 | return sprintf(buf, "%#lx\n", val); | ||
| 596 | } | ||
| 597 | |||
| 598 | static ssize_t trigger_event_store(struct device *dev, | ||
| 599 | struct device_attribute *attr, | ||
| 600 | const char *buf, size_t size) | ||
| 601 | { | ||
| 602 | int ret; | ||
| 603 | unsigned long val; | ||
| 604 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 605 | |||
| 606 | ret = kstrtoul(buf, 16, &val); | ||
| 607 | if (ret) | ||
| 608 | return ret; | ||
| 609 | |||
| 610 | drvdata->trigger_event = val & ETM_EVENT_MASK; | ||
| 611 | |||
| 612 | return size; | ||
| 613 | } | ||
| 614 | static DEVICE_ATTR_RW(trigger_event); | ||
| 615 | |||
| 616 | static ssize_t enable_event_show(struct device *dev, | ||
| 617 | struct device_attribute *attr, char *buf) | ||
| 618 | { | ||
| 619 | unsigned long val; | ||
| 620 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 621 | |||
| 622 | val = drvdata->enable_event; | ||
| 623 | return sprintf(buf, "%#lx\n", val); | ||
| 624 | } | ||
| 625 | |||
| 626 | static ssize_t enable_event_store(struct device *dev, | ||
| 627 | struct device_attribute *attr, | ||
| 628 | const char *buf, size_t size) | ||
| 629 | { | ||
| 630 | int ret; | ||
| 631 | unsigned long val; | ||
| 632 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 633 | |||
| 634 | ret = kstrtoul(buf, 16, &val); | ||
| 635 | if (ret) | ||
| 636 | return ret; | ||
| 637 | |||
| 638 | drvdata->enable_event = val & ETM_EVENT_MASK; | ||
| 639 | |||
| 640 | return size; | ||
| 641 | } | ||
| 642 | static DEVICE_ATTR_RW(enable_event); | ||
| 643 | |||
| 644 | static ssize_t fifofull_level_show(struct device *dev, | ||
| 645 | struct device_attribute *attr, char *buf) | ||
| 646 | { | ||
| 647 | unsigned long val; | ||
| 648 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 649 | |||
| 650 | val = drvdata->fifofull_level; | ||
| 651 | return sprintf(buf, "%#lx\n", val); | ||
| 652 | } | ||
| 653 | |||
| 654 | static ssize_t fifofull_level_store(struct device *dev, | ||
| 655 | struct device_attribute *attr, | ||
| 656 | const char *buf, size_t size) | ||
| 657 | { | ||
| 658 | int ret; | ||
| 659 | unsigned long val; | ||
| 660 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 661 | |||
| 662 | ret = kstrtoul(buf, 16, &val); | ||
| 663 | if (ret) | ||
| 664 | return ret; | ||
| 665 | |||
| 666 | drvdata->fifofull_level = val; | ||
| 667 | |||
| 668 | return size; | ||
| 669 | } | ||
| 670 | static DEVICE_ATTR_RW(fifofull_level); | ||
| 671 | |||
| 672 | static ssize_t addr_idx_show(struct device *dev, | ||
| 673 | struct device_attribute *attr, char *buf) | ||
| 674 | { | ||
| 675 | unsigned long val; | ||
| 676 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 677 | |||
| 678 | val = drvdata->addr_idx; | ||
| 679 | return sprintf(buf, "%#lx\n", val); | ||
| 680 | } | ||
| 681 | |||
| 682 | static ssize_t addr_idx_store(struct device *dev, | ||
| 683 | struct device_attribute *attr, | ||
| 684 | const char *buf, size_t size) | ||
| 685 | { | ||
| 686 | int ret; | ||
| 687 | unsigned long val; | ||
| 688 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 689 | |||
| 690 | ret = kstrtoul(buf, 16, &val); | ||
| 691 | if (ret) | ||
| 692 | return ret; | ||
| 693 | |||
| 694 | if (val >= drvdata->nr_addr_cmp) | ||
| 695 | return -EINVAL; | ||
| 696 | |||
| 697 | /* | ||
| 698 | * Use spinlock to ensure index doesn't change while it gets | ||
| 699 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 700 | */ | ||
| 701 | spin_lock(&drvdata->spinlock); | ||
| 702 | drvdata->addr_idx = val; | ||
| 703 | spin_unlock(&drvdata->spinlock); | ||
| 704 | |||
| 705 | return size; | ||
| 706 | } | ||
| 707 | static DEVICE_ATTR_RW(addr_idx); | ||
| 708 | |||
| 709 | static ssize_t addr_single_show(struct device *dev, | ||
| 710 | struct device_attribute *attr, char *buf) | ||
| 711 | { | ||
| 712 | u8 idx; | ||
| 713 | unsigned long val; | ||
| 714 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 715 | |||
| 716 | spin_lock(&drvdata->spinlock); | ||
| 717 | idx = drvdata->addr_idx; | ||
| 718 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 719 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
| 720 | spin_unlock(&drvdata->spinlock); | ||
| 721 | return -EINVAL; | ||
| 722 | } | ||
| 723 | |||
| 724 | val = drvdata->addr_val[idx]; | ||
| 725 | spin_unlock(&drvdata->spinlock); | ||
| 726 | |||
| 727 | return sprintf(buf, "%#lx\n", val); | ||
| 728 | } | ||
| 729 | |||
| 730 | static ssize_t addr_single_store(struct device *dev, | ||
| 731 | struct device_attribute *attr, | ||
| 732 | const char *buf, size_t size) | ||
| 733 | { | ||
| 734 | u8 idx; | ||
| 735 | int ret; | ||
| 736 | unsigned long val; | ||
| 737 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 738 | |||
| 739 | ret = kstrtoul(buf, 16, &val); | ||
| 740 | if (ret) | ||
| 741 | return ret; | ||
| 742 | |||
| 743 | spin_lock(&drvdata->spinlock); | ||
| 744 | idx = drvdata->addr_idx; | ||
| 745 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 746 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
| 747 | spin_unlock(&drvdata->spinlock); | ||
| 748 | return -EINVAL; | ||
| 749 | } | ||
| 750 | |||
| 751 | drvdata->addr_val[idx] = val; | ||
| 752 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; | ||
| 753 | spin_unlock(&drvdata->spinlock); | ||
| 754 | |||
| 755 | return size; | ||
| 756 | } | ||
| 757 | static DEVICE_ATTR_RW(addr_single); | ||
| 758 | |||
| 759 | static ssize_t addr_range_show(struct device *dev, | ||
| 760 | struct device_attribute *attr, char *buf) | ||
| 761 | { | ||
| 762 | u8 idx; | ||
| 763 | unsigned long val1, val2; | ||
| 764 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 765 | |||
| 766 | spin_lock(&drvdata->spinlock); | ||
| 767 | idx = drvdata->addr_idx; | ||
| 768 | if (idx % 2 != 0) { | ||
| 769 | spin_unlock(&drvdata->spinlock); | ||
| 770 | return -EPERM; | ||
| 771 | } | ||
| 772 | if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
| 773 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
| 774 | (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
| 775 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
| 776 | spin_unlock(&drvdata->spinlock); | ||
| 777 | return -EPERM; | ||
| 778 | } | ||
| 779 | |||
| 780 | val1 = drvdata->addr_val[idx]; | ||
| 781 | val2 = drvdata->addr_val[idx + 1]; | ||
| 782 | spin_unlock(&drvdata->spinlock); | ||
| 783 | |||
| 784 | return sprintf(buf, "%#lx %#lx\n", val1, val2); | ||
| 785 | } | ||
| 786 | |||
| 787 | static ssize_t addr_range_store(struct device *dev, | ||
| 788 | struct device_attribute *attr, | ||
| 789 | const char *buf, size_t size) | ||
| 790 | { | ||
| 791 | u8 idx; | ||
| 792 | unsigned long val1, val2; | ||
| 793 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 794 | |||
| 795 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
| 796 | return -EINVAL; | ||
| 797 | /* Lower address comparator cannot have a higher address value */ | ||
| 798 | if (val1 > val2) | ||
| 799 | return -EINVAL; | ||
| 800 | |||
| 801 | spin_lock(&drvdata->spinlock); | ||
| 802 | idx = drvdata->addr_idx; | ||
| 803 | if (idx % 2 != 0) { | ||
| 804 | spin_unlock(&drvdata->spinlock); | ||
| 805 | return -EPERM; | ||
| 806 | } | ||
| 807 | if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
| 808 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
| 809 | (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
| 810 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
| 811 | spin_unlock(&drvdata->spinlock); | ||
| 812 | return -EPERM; | ||
| 813 | } | ||
| 814 | |||
| 815 | drvdata->addr_val[idx] = val1; | ||
| 816 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE; | ||
| 817 | drvdata->addr_val[idx + 1] = val2; | ||
| 818 | drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; | ||
| 819 | drvdata->enable_ctrl1 |= (1 << (idx/2)); | ||
| 820 | spin_unlock(&drvdata->spinlock); | ||
| 821 | |||
| 822 | return size; | ||
| 823 | } | ||
| 824 | static DEVICE_ATTR_RW(addr_range); | ||
| 825 | |||
| 826 | static ssize_t addr_start_show(struct device *dev, | ||
| 827 | struct device_attribute *attr, char *buf) | ||
| 828 | { | ||
| 829 | u8 idx; | ||
| 830 | unsigned long val; | ||
| 831 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 832 | |||
| 833 | spin_lock(&drvdata->spinlock); | ||
| 834 | idx = drvdata->addr_idx; | ||
| 835 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 836 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
| 837 | spin_unlock(&drvdata->spinlock); | ||
| 838 | return -EPERM; | ||
| 839 | } | ||
| 840 | |||
| 841 | val = drvdata->addr_val[idx]; | ||
| 842 | spin_unlock(&drvdata->spinlock); | ||
| 843 | |||
| 844 | return sprintf(buf, "%#lx\n", val); | ||
| 845 | } | ||
| 846 | |||
| 847 | static ssize_t addr_start_store(struct device *dev, | ||
| 848 | struct device_attribute *attr, | ||
| 849 | const char *buf, size_t size) | ||
| 850 | { | ||
| 851 | u8 idx; | ||
| 852 | int ret; | ||
| 853 | unsigned long val; | ||
| 854 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 855 | |||
| 856 | ret = kstrtoul(buf, 16, &val); | ||
| 857 | if (ret) | ||
| 858 | return ret; | ||
| 859 | |||
| 860 | spin_lock(&drvdata->spinlock); | ||
| 861 | idx = drvdata->addr_idx; | ||
| 862 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 863 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
| 864 | spin_unlock(&drvdata->spinlock); | ||
| 865 | return -EPERM; | ||
| 866 | } | ||
| 867 | |||
| 868 | drvdata->addr_val[idx] = val; | ||
| 869 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_START; | ||
| 870 | drvdata->startstop_ctrl |= (1 << idx); | ||
| 871 | drvdata->enable_ctrl1 |= BIT(25); | ||
| 872 | spin_unlock(&drvdata->spinlock); | ||
| 873 | |||
| 874 | return size; | ||
| 875 | } | ||
| 876 | static DEVICE_ATTR_RW(addr_start); | ||
| 877 | |||
| 878 | static ssize_t addr_stop_show(struct device *dev, | ||
| 879 | struct device_attribute *attr, char *buf) | ||
| 880 | { | ||
| 881 | u8 idx; | ||
| 882 | unsigned long val; | ||
| 883 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 884 | |||
| 885 | spin_lock(&drvdata->spinlock); | ||
| 886 | idx = drvdata->addr_idx; | ||
| 887 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 888 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
| 889 | spin_unlock(&drvdata->spinlock); | ||
| 890 | return -EPERM; | ||
| 891 | } | ||
| 892 | |||
| 893 | val = drvdata->addr_val[idx]; | ||
| 894 | spin_unlock(&drvdata->spinlock); | ||
| 895 | |||
| 896 | return sprintf(buf, "%#lx\n", val); | ||
| 897 | } | ||
| 898 | |||
| 899 | static ssize_t addr_stop_store(struct device *dev, | ||
| 900 | struct device_attribute *attr, | ||
| 901 | const char *buf, size_t size) | ||
| 902 | { | ||
| 903 | u8 idx; | ||
| 904 | int ret; | ||
| 905 | unsigned long val; | ||
| 906 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 907 | |||
| 908 | ret = kstrtoul(buf, 16, &val); | ||
| 909 | if (ret) | ||
| 910 | return ret; | ||
| 911 | |||
| 912 | spin_lock(&drvdata->spinlock); | ||
| 913 | idx = drvdata->addr_idx; | ||
| 914 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 915 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
| 916 | spin_unlock(&drvdata->spinlock); | ||
| 917 | return -EPERM; | ||
| 918 | } | ||
| 919 | |||
| 920 | drvdata->addr_val[idx] = val; | ||
| 921 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP; | ||
| 922 | drvdata->startstop_ctrl |= (1 << (idx + 16)); | ||
| 923 | drvdata->enable_ctrl1 |= ETMTECR1_START_STOP; | ||
| 924 | spin_unlock(&drvdata->spinlock); | ||
| 925 | |||
| 926 | return size; | ||
| 927 | } | ||
| 928 | static DEVICE_ATTR_RW(addr_stop); | ||
| 929 | |||
| 930 | static ssize_t addr_acctype_show(struct device *dev, | ||
| 931 | struct device_attribute *attr, char *buf) | ||
| 932 | { | ||
| 933 | unsigned long val; | ||
| 934 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 935 | |||
| 936 | spin_lock(&drvdata->spinlock); | ||
| 937 | val = drvdata->addr_acctype[drvdata->addr_idx]; | ||
| 938 | spin_unlock(&drvdata->spinlock); | ||
| 939 | |||
| 940 | return sprintf(buf, "%#lx\n", val); | ||
| 941 | } | ||
| 942 | |||
| 943 | static ssize_t addr_acctype_store(struct device *dev, | ||
| 944 | struct device_attribute *attr, | ||
| 945 | const char *buf, size_t size) | ||
| 946 | { | ||
| 947 | int ret; | ||
| 948 | unsigned long val; | ||
| 949 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 950 | |||
| 951 | ret = kstrtoul(buf, 16, &val); | ||
| 952 | if (ret) | ||
| 953 | return ret; | ||
| 954 | |||
| 955 | spin_lock(&drvdata->spinlock); | ||
| 956 | drvdata->addr_acctype[drvdata->addr_idx] = val; | ||
| 957 | spin_unlock(&drvdata->spinlock); | ||
| 958 | |||
| 959 | return size; | ||
| 960 | } | ||
| 961 | static DEVICE_ATTR_RW(addr_acctype); | ||
| 962 | |||
| 963 | static ssize_t cntr_idx_show(struct device *dev, | ||
| 964 | struct device_attribute *attr, char *buf) | ||
| 965 | { | ||
| 966 | unsigned long val; | ||
| 967 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 968 | |||
| 969 | val = drvdata->cntr_idx; | ||
| 970 | return sprintf(buf, "%#lx\n", val); | ||
| 971 | } | ||
| 972 | |||
| 973 | static ssize_t cntr_idx_store(struct device *dev, | ||
| 974 | struct device_attribute *attr, | ||
| 975 | const char *buf, size_t size) | ||
| 976 | { | ||
| 977 | int ret; | ||
| 978 | unsigned long val; | ||
| 979 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 980 | |||
| 981 | ret = kstrtoul(buf, 16, &val); | ||
| 982 | if (ret) | ||
| 983 | return ret; | ||
| 984 | |||
| 985 | if (val >= drvdata->nr_cntr) | ||
| 986 | return -EINVAL; | ||
| 987 | /* | ||
| 988 | * Use spinlock to ensure index doesn't change while it gets | ||
| 989 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 990 | */ | ||
| 991 | spin_lock(&drvdata->spinlock); | ||
| 992 | drvdata->cntr_idx = val; | ||
| 993 | spin_unlock(&drvdata->spinlock); | ||
| 994 | |||
| 995 | return size; | ||
| 996 | } | ||
| 997 | static DEVICE_ATTR_RW(cntr_idx); | ||
| 998 | |||
| 999 | static ssize_t cntr_rld_val_show(struct device *dev, | ||
| 1000 | struct device_attribute *attr, char *buf) | ||
| 1001 | { | ||
| 1002 | unsigned long val; | ||
| 1003 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1004 | |||
| 1005 | spin_lock(&drvdata->spinlock); | ||
| 1006 | val = drvdata->cntr_rld_val[drvdata->cntr_idx]; | ||
| 1007 | spin_unlock(&drvdata->spinlock); | ||
| 1008 | |||
| 1009 | return sprintf(buf, "%#lx\n", val); | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | static ssize_t cntr_rld_val_store(struct device *dev, | ||
| 1013 | struct device_attribute *attr, | ||
| 1014 | const char *buf, size_t size) | ||
| 1015 | { | ||
| 1016 | int ret; | ||
| 1017 | unsigned long val; | ||
| 1018 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1019 | |||
| 1020 | ret = kstrtoul(buf, 16, &val); | ||
| 1021 | if (ret) | ||
| 1022 | return ret; | ||
| 1023 | |||
| 1024 | spin_lock(&drvdata->spinlock); | ||
| 1025 | drvdata->cntr_rld_val[drvdata->cntr_idx] = val; | ||
| 1026 | spin_unlock(&drvdata->spinlock); | ||
| 1027 | |||
| 1028 | return size; | ||
| 1029 | } | ||
| 1030 | static DEVICE_ATTR_RW(cntr_rld_val); | ||
| 1031 | |||
| 1032 | static ssize_t cntr_event_show(struct device *dev, | ||
| 1033 | struct device_attribute *attr, char *buf) | ||
| 1034 | { | ||
| 1035 | unsigned long val; | ||
| 1036 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1037 | |||
| 1038 | spin_lock(&drvdata->spinlock); | ||
| 1039 | val = drvdata->cntr_event[drvdata->cntr_idx]; | ||
| 1040 | spin_unlock(&drvdata->spinlock); | ||
| 1041 | |||
| 1042 | return sprintf(buf, "%#lx\n", val); | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | static ssize_t cntr_event_store(struct device *dev, | ||
| 1046 | struct device_attribute *attr, | ||
| 1047 | const char *buf, size_t size) | ||
| 1048 | { | ||
| 1049 | int ret; | ||
| 1050 | unsigned long val; | ||
| 1051 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1052 | |||
| 1053 | ret = kstrtoul(buf, 16, &val); | ||
| 1054 | if (ret) | ||
| 1055 | return ret; | ||
| 1056 | |||
| 1057 | spin_lock(&drvdata->spinlock); | ||
| 1058 | drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; | ||
| 1059 | spin_unlock(&drvdata->spinlock); | ||
| 1060 | |||
| 1061 | return size; | ||
| 1062 | } | ||
| 1063 | static DEVICE_ATTR_RW(cntr_event); | ||
| 1064 | |||
| 1065 | static ssize_t cntr_rld_event_show(struct device *dev, | ||
| 1066 | struct device_attribute *attr, char *buf) | ||
| 1067 | { | ||
| 1068 | unsigned long val; | ||
| 1069 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1070 | |||
| 1071 | spin_lock(&drvdata->spinlock); | ||
| 1072 | val = drvdata->cntr_rld_event[drvdata->cntr_idx]; | ||
| 1073 | spin_unlock(&drvdata->spinlock); | ||
| 1074 | |||
| 1075 | return sprintf(buf, "%#lx\n", val); | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | static ssize_t cntr_rld_event_store(struct device *dev, | ||
| 1079 | struct device_attribute *attr, | ||
| 1080 | const char *buf, size_t size) | ||
| 1081 | { | ||
| 1082 | int ret; | ||
| 1083 | unsigned long val; | ||
| 1084 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1085 | |||
| 1086 | ret = kstrtoul(buf, 16, &val); | ||
| 1087 | if (ret) | ||
| 1088 | return ret; | ||
| 1089 | |||
| 1090 | spin_lock(&drvdata->spinlock); | ||
| 1091 | drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; | ||
| 1092 | spin_unlock(&drvdata->spinlock); | ||
| 1093 | |||
| 1094 | return size; | ||
| 1095 | } | ||
| 1096 | static DEVICE_ATTR_RW(cntr_rld_event); | ||
| 1097 | |||
| 1098 | static ssize_t cntr_val_show(struct device *dev, | ||
| 1099 | struct device_attribute *attr, char *buf) | ||
| 1100 | { | ||
| 1101 | int i, ret = 0; | ||
| 1102 | u32 val; | ||
| 1103 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1104 | |||
| 1105 | if (!drvdata->enable) { | ||
| 1106 | spin_lock(&drvdata->spinlock); | ||
| 1107 | for (i = 0; i < drvdata->nr_cntr; i++) | ||
| 1108 | ret += sprintf(buf, "counter %d: %x\n", | ||
| 1109 | i, drvdata->cntr_val[i]); | ||
| 1110 | spin_unlock(&drvdata->spinlock); | ||
| 1111 | return ret; | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | for (i = 0; i < drvdata->nr_cntr; i++) { | ||
| 1115 | val = etm_readl(drvdata, ETMCNTVRn(i)); | ||
| 1116 | ret += sprintf(buf, "counter %d: %x\n", i, val); | ||
| 1117 | } | ||
| 1118 | |||
| 1119 | return ret; | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | static ssize_t cntr_val_store(struct device *dev, | ||
| 1123 | struct device_attribute *attr, | ||
| 1124 | const char *buf, size_t size) | ||
| 1125 | { | ||
| 1126 | int ret; | ||
| 1127 | unsigned long val; | ||
| 1128 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1129 | |||
| 1130 | ret = kstrtoul(buf, 16, &val); | ||
| 1131 | if (ret) | ||
| 1132 | return ret; | ||
| 1133 | |||
| 1134 | spin_lock(&drvdata->spinlock); | ||
| 1135 | drvdata->cntr_val[drvdata->cntr_idx] = val; | ||
| 1136 | spin_unlock(&drvdata->spinlock); | ||
| 1137 | |||
| 1138 | return size; | ||
| 1139 | } | ||
| 1140 | static DEVICE_ATTR_RW(cntr_val); | ||
| 1141 | |||
| 1142 | static ssize_t seq_12_event_show(struct device *dev, | ||
| 1143 | struct device_attribute *attr, char *buf) | ||
| 1144 | { | ||
| 1145 | unsigned long val; | ||
| 1146 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1147 | |||
| 1148 | val = drvdata->seq_12_event; | ||
| 1149 | return sprintf(buf, "%#lx\n", val); | ||
| 1150 | } | ||
| 1151 | |||
| 1152 | static ssize_t seq_12_event_store(struct device *dev, | ||
| 1153 | struct device_attribute *attr, | ||
| 1154 | const char *buf, size_t size) | ||
| 1155 | { | ||
| 1156 | int ret; | ||
| 1157 | unsigned long val; | ||
| 1158 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1159 | |||
| 1160 | ret = kstrtoul(buf, 16, &val); | ||
| 1161 | if (ret) | ||
| 1162 | return ret; | ||
| 1163 | |||
| 1164 | drvdata->seq_12_event = val & ETM_EVENT_MASK; | ||
| 1165 | return size; | ||
| 1166 | } | ||
| 1167 | static DEVICE_ATTR_RW(seq_12_event); | ||
| 1168 | |||
| 1169 | static ssize_t seq_21_event_show(struct device *dev, | ||
| 1170 | struct device_attribute *attr, char *buf) | ||
| 1171 | { | ||
| 1172 | unsigned long val; | ||
| 1173 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1174 | |||
| 1175 | val = drvdata->seq_21_event; | ||
| 1176 | return sprintf(buf, "%#lx\n", val); | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | static ssize_t seq_21_event_store(struct device *dev, | ||
| 1180 | struct device_attribute *attr, | ||
| 1181 | const char *buf, size_t size) | ||
| 1182 | { | ||
| 1183 | int ret; | ||
| 1184 | unsigned long val; | ||
| 1185 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1186 | |||
| 1187 | ret = kstrtoul(buf, 16, &val); | ||
| 1188 | if (ret) | ||
| 1189 | return ret; | ||
| 1190 | |||
| 1191 | drvdata->seq_21_event = val & ETM_EVENT_MASK; | ||
| 1192 | return size; | ||
| 1193 | } | ||
| 1194 | static DEVICE_ATTR_RW(seq_21_event); | ||
| 1195 | |||
| 1196 | static ssize_t seq_23_event_show(struct device *dev, | ||
| 1197 | struct device_attribute *attr, char *buf) | ||
| 1198 | { | ||
| 1199 | unsigned long val; | ||
| 1200 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1201 | |||
| 1202 | val = drvdata->seq_23_event; | ||
| 1203 | return sprintf(buf, "%#lx\n", val); | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | static ssize_t seq_23_event_store(struct device *dev, | ||
| 1207 | struct device_attribute *attr, | ||
| 1208 | const char *buf, size_t size) | ||
| 1209 | { | ||
| 1210 | int ret; | ||
| 1211 | unsigned long val; | ||
| 1212 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1213 | |||
| 1214 | ret = kstrtoul(buf, 16, &val); | ||
| 1215 | if (ret) | ||
| 1216 | return ret; | ||
| 1217 | |||
| 1218 | drvdata->seq_23_event = val & ETM_EVENT_MASK; | ||
| 1219 | return size; | ||
| 1220 | } | ||
| 1221 | static DEVICE_ATTR_RW(seq_23_event); | ||
| 1222 | |||
| 1223 | static ssize_t seq_31_event_show(struct device *dev, | ||
| 1224 | struct device_attribute *attr, char *buf) | ||
| 1225 | { | ||
| 1226 | unsigned long val; | ||
| 1227 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1228 | |||
| 1229 | val = drvdata->seq_31_event; | ||
| 1230 | return sprintf(buf, "%#lx\n", val); | ||
| 1231 | } | ||
| 1232 | |||
| 1233 | static ssize_t seq_31_event_store(struct device *dev, | ||
| 1234 | struct device_attribute *attr, | ||
| 1235 | const char *buf, size_t size) | ||
| 1236 | { | ||
| 1237 | int ret; | ||
| 1238 | unsigned long val; | ||
| 1239 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1240 | |||
| 1241 | ret = kstrtoul(buf, 16, &val); | ||
| 1242 | if (ret) | ||
| 1243 | return ret; | ||
| 1244 | |||
| 1245 | drvdata->seq_31_event = val & ETM_EVENT_MASK; | ||
| 1246 | return size; | ||
| 1247 | } | ||
| 1248 | static DEVICE_ATTR_RW(seq_31_event); | ||
| 1249 | |||
| 1250 | static ssize_t seq_32_event_show(struct device *dev, | ||
| 1251 | struct device_attribute *attr, char *buf) | ||
| 1252 | { | ||
| 1253 | unsigned long val; | ||
| 1254 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1255 | |||
| 1256 | val = drvdata->seq_32_event; | ||
| 1257 | return sprintf(buf, "%#lx\n", val); | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | static ssize_t seq_32_event_store(struct device *dev, | ||
| 1261 | struct device_attribute *attr, | ||
| 1262 | const char *buf, size_t size) | ||
| 1263 | { | ||
| 1264 | int ret; | ||
| 1265 | unsigned long val; | ||
| 1266 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1267 | |||
| 1268 | ret = kstrtoul(buf, 16, &val); | ||
| 1269 | if (ret) | ||
| 1270 | return ret; | ||
| 1271 | |||
| 1272 | drvdata->seq_32_event = val & ETM_EVENT_MASK; | ||
| 1273 | return size; | ||
| 1274 | } | ||
| 1275 | static DEVICE_ATTR_RW(seq_32_event); | ||
| 1276 | |||
| 1277 | static ssize_t seq_13_event_show(struct device *dev, | ||
| 1278 | struct device_attribute *attr, char *buf) | ||
| 1279 | { | ||
| 1280 | unsigned long val; | ||
| 1281 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1282 | |||
| 1283 | val = drvdata->seq_13_event; | ||
| 1284 | return sprintf(buf, "%#lx\n", val); | ||
| 1285 | } | ||
| 1286 | |||
| 1287 | static ssize_t seq_13_event_store(struct device *dev, | ||
| 1288 | struct device_attribute *attr, | ||
| 1289 | const char *buf, size_t size) | ||
| 1290 | { | ||
| 1291 | int ret; | ||
| 1292 | unsigned long val; | ||
| 1293 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1294 | |||
| 1295 | ret = kstrtoul(buf, 16, &val); | ||
| 1296 | if (ret) | ||
| 1297 | return ret; | ||
| 1298 | |||
| 1299 | drvdata->seq_13_event = val & ETM_EVENT_MASK; | ||
| 1300 | return size; | ||
| 1301 | } | ||
| 1302 | static DEVICE_ATTR_RW(seq_13_event); | ||
| 1303 | |||
| 1304 | static ssize_t seq_curr_state_show(struct device *dev, | ||
| 1305 | struct device_attribute *attr, char *buf) | ||
| 1306 | { | ||
| 1307 | unsigned long val, flags; | ||
| 1308 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1309 | |||
| 1310 | if (!drvdata->enable) { | ||
| 1311 | val = drvdata->seq_curr_state; | ||
| 1312 | goto out; | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | pm_runtime_get_sync(drvdata->dev); | ||
| 1316 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 1317 | |||
| 1318 | CS_UNLOCK(drvdata->base); | ||
| 1319 | val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); | ||
| 1320 | CS_LOCK(drvdata->base); | ||
| 1321 | |||
| 1322 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 1323 | pm_runtime_put(drvdata->dev); | ||
| 1324 | out: | ||
| 1325 | return sprintf(buf, "%#lx\n", val); | ||
| 1326 | } | ||
| 1327 | |||
| 1328 | static ssize_t seq_curr_state_store(struct device *dev, | ||
| 1329 | struct device_attribute *attr, | ||
| 1330 | const char *buf, size_t size) | ||
| 1331 | { | ||
| 1332 | int ret; | ||
| 1333 | unsigned long val; | ||
| 1334 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1335 | |||
| 1336 | ret = kstrtoul(buf, 16, &val); | ||
| 1337 | if (ret) | ||
| 1338 | return ret; | ||
| 1339 | |||
| 1340 | if (val > ETM_SEQ_STATE_MAX_VAL) | ||
| 1341 | return -EINVAL; | ||
| 1342 | |||
| 1343 | drvdata->seq_curr_state = val; | ||
| 1344 | |||
| 1345 | return size; | ||
| 1346 | } | ||
| 1347 | static DEVICE_ATTR_RW(seq_curr_state); | ||
| 1348 | |||
| 1349 | static ssize_t ctxid_idx_show(struct device *dev, | ||
| 1350 | struct device_attribute *attr, char *buf) | ||
| 1351 | { | ||
| 1352 | unsigned long val; | ||
| 1353 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1354 | |||
| 1355 | val = drvdata->ctxid_idx; | ||
| 1356 | return sprintf(buf, "%#lx\n", val); | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | static ssize_t ctxid_idx_store(struct device *dev, | ||
| 1360 | struct device_attribute *attr, | ||
| 1361 | const char *buf, size_t size) | ||
| 1362 | { | 604 | { |
| 1363 | int ret; | 605 | u32 mode; |
| 1364 | unsigned long val; | 606 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 1365 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1366 | |||
| 1367 | ret = kstrtoul(buf, 16, &val); | ||
| 1368 | if (ret) | ||
| 1369 | return ret; | ||
| 1370 | |||
| 1371 | if (val >= drvdata->nr_ctxid_cmp) | ||
| 1372 | return -EINVAL; | ||
| 1373 | 607 | ||
| 1374 | /* | 608 | /* |
| 1375 | * Use spinlock to ensure index doesn't change while it gets | 609 | * For as long as the tracer isn't disabled another entity can't |
| 1376 | * dereferenced multiple times within a spinlock block elsewhere. | 610 | * change its status. As such we can read the status here without |
| 611 | * fearing it will change under us. | ||
| 1377 | */ | 612 | */ |
| 1378 | spin_lock(&drvdata->spinlock); | 613 | mode = local_read(&drvdata->mode); |
| 1379 | drvdata->ctxid_idx = val; | ||
| 1380 | spin_unlock(&drvdata->spinlock); | ||
| 1381 | |||
| 1382 | return size; | ||
| 1383 | } | ||
| 1384 | static DEVICE_ATTR_RW(ctxid_idx); | ||
| 1385 | |||
| 1386 | static ssize_t ctxid_pid_show(struct device *dev, | ||
| 1387 | struct device_attribute *attr, char *buf) | ||
| 1388 | { | ||
| 1389 | unsigned long val; | ||
| 1390 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1391 | |||
| 1392 | spin_lock(&drvdata->spinlock); | ||
| 1393 | val = drvdata->ctxid_vpid[drvdata->ctxid_idx]; | ||
| 1394 | spin_unlock(&drvdata->spinlock); | ||
| 1395 | 614 | ||
| 1396 | return sprintf(buf, "%#lx\n", val); | 615 | switch (mode) { |
| 1397 | } | 616 | case CS_MODE_DISABLED: |
| 1398 | 617 | break; | |
| 1399 | static ssize_t ctxid_pid_store(struct device *dev, | 618 | case CS_MODE_SYSFS: |
| 1400 | struct device_attribute *attr, | 619 | etm_disable_sysfs(csdev); |
| 1401 | const char *buf, size_t size) | 620 | break; |
| 1402 | { | 621 | case CS_MODE_PERF: |
| 1403 | int ret; | 622 | etm_disable_perf(csdev); |
| 1404 | unsigned long vpid, pid; | 623 | break; |
| 1405 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | 624 | default: |
| 1406 | 625 | WARN_ON_ONCE(mode); | |
| 1407 | ret = kstrtoul(buf, 16, &vpid); | 626 | return; |
| 1408 | if (ret) | ||
| 1409 | return ret; | ||
| 1410 | |||
| 1411 | pid = coresight_vpid_to_pid(vpid); | ||
| 1412 | |||
| 1413 | spin_lock(&drvdata->spinlock); | ||
| 1414 | drvdata->ctxid_pid[drvdata->ctxid_idx] = pid; | ||
| 1415 | drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid; | ||
| 1416 | spin_unlock(&drvdata->spinlock); | ||
| 1417 | |||
| 1418 | return size; | ||
| 1419 | } | ||
| 1420 | static DEVICE_ATTR_RW(ctxid_pid); | ||
| 1421 | |||
| 1422 | static ssize_t ctxid_mask_show(struct device *dev, | ||
| 1423 | struct device_attribute *attr, char *buf) | ||
| 1424 | { | ||
| 1425 | unsigned long val; | ||
| 1426 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1427 | |||
| 1428 | val = drvdata->ctxid_mask; | ||
| 1429 | return sprintf(buf, "%#lx\n", val); | ||
| 1430 | } | ||
| 1431 | |||
| 1432 | static ssize_t ctxid_mask_store(struct device *dev, | ||
| 1433 | struct device_attribute *attr, | ||
| 1434 | const char *buf, size_t size) | ||
| 1435 | { | ||
| 1436 | int ret; | ||
| 1437 | unsigned long val; | ||
| 1438 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1439 | |||
| 1440 | ret = kstrtoul(buf, 16, &val); | ||
| 1441 | if (ret) | ||
| 1442 | return ret; | ||
| 1443 | |||
| 1444 | drvdata->ctxid_mask = val; | ||
| 1445 | return size; | ||
| 1446 | } | ||
| 1447 | static DEVICE_ATTR_RW(ctxid_mask); | ||
| 1448 | |||
| 1449 | static ssize_t sync_freq_show(struct device *dev, | ||
| 1450 | struct device_attribute *attr, char *buf) | ||
| 1451 | { | ||
| 1452 | unsigned long val; | ||
| 1453 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1454 | |||
| 1455 | val = drvdata->sync_freq; | ||
| 1456 | return sprintf(buf, "%#lx\n", val); | ||
| 1457 | } | ||
| 1458 | |||
| 1459 | static ssize_t sync_freq_store(struct device *dev, | ||
| 1460 | struct device_attribute *attr, | ||
| 1461 | const char *buf, size_t size) | ||
| 1462 | { | ||
| 1463 | int ret; | ||
| 1464 | unsigned long val; | ||
| 1465 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1466 | |||
| 1467 | ret = kstrtoul(buf, 16, &val); | ||
| 1468 | if (ret) | ||
| 1469 | return ret; | ||
| 1470 | |||
| 1471 | drvdata->sync_freq = val & ETM_SYNC_MASK; | ||
| 1472 | return size; | ||
| 1473 | } | ||
| 1474 | static DEVICE_ATTR_RW(sync_freq); | ||
| 1475 | |||
| 1476 | static ssize_t timestamp_event_show(struct device *dev, | ||
| 1477 | struct device_attribute *attr, char *buf) | ||
| 1478 | { | ||
| 1479 | unsigned long val; | ||
| 1480 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1481 | |||
| 1482 | val = drvdata->timestamp_event; | ||
| 1483 | return sprintf(buf, "%#lx\n", val); | ||
| 1484 | } | ||
| 1485 | |||
| 1486 | static ssize_t timestamp_event_store(struct device *dev, | ||
| 1487 | struct device_attribute *attr, | ||
| 1488 | const char *buf, size_t size) | ||
| 1489 | { | ||
| 1490 | int ret; | ||
| 1491 | unsigned long val; | ||
| 1492 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1493 | |||
| 1494 | ret = kstrtoul(buf, 16, &val); | ||
| 1495 | if (ret) | ||
| 1496 | return ret; | ||
| 1497 | |||
| 1498 | drvdata->timestamp_event = val & ETM_EVENT_MASK; | ||
| 1499 | return size; | ||
| 1500 | } | ||
| 1501 | static DEVICE_ATTR_RW(timestamp_event); | ||
| 1502 | |||
| 1503 | static ssize_t cpu_show(struct device *dev, | ||
| 1504 | struct device_attribute *attr, char *buf) | ||
| 1505 | { | ||
| 1506 | int val; | ||
| 1507 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1508 | |||
| 1509 | val = drvdata->cpu; | ||
| 1510 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | ||
| 1511 | |||
| 1512 | } | ||
| 1513 | static DEVICE_ATTR_RO(cpu); | ||
| 1514 | |||
| 1515 | static ssize_t traceid_show(struct device *dev, | ||
| 1516 | struct device_attribute *attr, char *buf) | ||
| 1517 | { | ||
| 1518 | unsigned long val, flags; | ||
| 1519 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1520 | |||
| 1521 | if (!drvdata->enable) { | ||
| 1522 | val = drvdata->traceid; | ||
| 1523 | goto out; | ||
| 1524 | } | 627 | } |
| 1525 | 628 | ||
| 1526 | pm_runtime_get_sync(drvdata->dev); | 629 | if (mode) |
| 1527 | spin_lock_irqsave(&drvdata->spinlock, flags); | 630 | local_set(&drvdata->mode, CS_MODE_DISABLED); |
| 1528 | CS_UNLOCK(drvdata->base); | ||
| 1529 | |||
| 1530 | val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); | ||
| 1531 | |||
| 1532 | CS_LOCK(drvdata->base); | ||
| 1533 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 1534 | pm_runtime_put(drvdata->dev); | ||
| 1535 | out: | ||
| 1536 | return sprintf(buf, "%#lx\n", val); | ||
| 1537 | } | ||
| 1538 | |||
| 1539 | static ssize_t traceid_store(struct device *dev, | ||
| 1540 | struct device_attribute *attr, | ||
| 1541 | const char *buf, size_t size) | ||
| 1542 | { | ||
| 1543 | int ret; | ||
| 1544 | unsigned long val; | ||
| 1545 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1546 | |||
| 1547 | ret = kstrtoul(buf, 16, &val); | ||
| 1548 | if (ret) | ||
| 1549 | return ret; | ||
| 1550 | |||
| 1551 | drvdata->traceid = val & ETM_TRACEID_MASK; | ||
| 1552 | return size; | ||
| 1553 | } | 631 | } |
| 1554 | static DEVICE_ATTR_RW(traceid); | ||
| 1555 | |||
| 1556 | static struct attribute *coresight_etm_attrs[] = { | ||
| 1557 | &dev_attr_nr_addr_cmp.attr, | ||
| 1558 | &dev_attr_nr_cntr.attr, | ||
| 1559 | &dev_attr_nr_ctxid_cmp.attr, | ||
| 1560 | &dev_attr_etmsr.attr, | ||
| 1561 | &dev_attr_reset.attr, | ||
| 1562 | &dev_attr_mode.attr, | ||
| 1563 | &dev_attr_trigger_event.attr, | ||
| 1564 | &dev_attr_enable_event.attr, | ||
| 1565 | &dev_attr_fifofull_level.attr, | ||
| 1566 | &dev_attr_addr_idx.attr, | ||
| 1567 | &dev_attr_addr_single.attr, | ||
| 1568 | &dev_attr_addr_range.attr, | ||
| 1569 | &dev_attr_addr_start.attr, | ||
| 1570 | &dev_attr_addr_stop.attr, | ||
| 1571 | &dev_attr_addr_acctype.attr, | ||
| 1572 | &dev_attr_cntr_idx.attr, | ||
| 1573 | &dev_attr_cntr_rld_val.attr, | ||
| 1574 | &dev_attr_cntr_event.attr, | ||
| 1575 | &dev_attr_cntr_rld_event.attr, | ||
| 1576 | &dev_attr_cntr_val.attr, | ||
| 1577 | &dev_attr_seq_12_event.attr, | ||
| 1578 | &dev_attr_seq_21_event.attr, | ||
| 1579 | &dev_attr_seq_23_event.attr, | ||
| 1580 | &dev_attr_seq_31_event.attr, | ||
| 1581 | &dev_attr_seq_32_event.attr, | ||
| 1582 | &dev_attr_seq_13_event.attr, | ||
| 1583 | &dev_attr_seq_curr_state.attr, | ||
| 1584 | &dev_attr_ctxid_idx.attr, | ||
| 1585 | &dev_attr_ctxid_pid.attr, | ||
| 1586 | &dev_attr_ctxid_mask.attr, | ||
| 1587 | &dev_attr_sync_freq.attr, | ||
| 1588 | &dev_attr_timestamp_event.attr, | ||
| 1589 | &dev_attr_traceid.attr, | ||
| 1590 | &dev_attr_cpu.attr, | ||
| 1591 | NULL, | ||
| 1592 | }; | ||
| 1593 | |||
| 1594 | #define coresight_simple_func(name, offset) \ | ||
| 1595 | static ssize_t name##_show(struct device *_dev, \ | ||
| 1596 | struct device_attribute *attr, char *buf) \ | ||
| 1597 | { \ | ||
| 1598 | struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ | ||
| 1599 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | ||
| 1600 | readl_relaxed(drvdata->base + offset)); \ | ||
| 1601 | } \ | ||
| 1602 | DEVICE_ATTR_RO(name) | ||
| 1603 | |||
| 1604 | coresight_simple_func(etmccr, ETMCCR); | ||
| 1605 | coresight_simple_func(etmccer, ETMCCER); | ||
| 1606 | coresight_simple_func(etmscr, ETMSCR); | ||
| 1607 | coresight_simple_func(etmidr, ETMIDR); | ||
| 1608 | coresight_simple_func(etmcr, ETMCR); | ||
| 1609 | coresight_simple_func(etmtraceidr, ETMTRACEIDR); | ||
| 1610 | coresight_simple_func(etmteevr, ETMTEEVR); | ||
| 1611 | coresight_simple_func(etmtssvr, ETMTSSCR); | ||
| 1612 | coresight_simple_func(etmtecr1, ETMTECR1); | ||
| 1613 | coresight_simple_func(etmtecr2, ETMTECR2); | ||
| 1614 | |||
| 1615 | static struct attribute *coresight_etm_mgmt_attrs[] = { | ||
| 1616 | &dev_attr_etmccr.attr, | ||
| 1617 | &dev_attr_etmccer.attr, | ||
| 1618 | &dev_attr_etmscr.attr, | ||
| 1619 | &dev_attr_etmidr.attr, | ||
| 1620 | &dev_attr_etmcr.attr, | ||
| 1621 | &dev_attr_etmtraceidr.attr, | ||
| 1622 | &dev_attr_etmteevr.attr, | ||
| 1623 | &dev_attr_etmtssvr.attr, | ||
| 1624 | &dev_attr_etmtecr1.attr, | ||
| 1625 | &dev_attr_etmtecr2.attr, | ||
| 1626 | NULL, | ||
| 1627 | }; | ||
| 1628 | 632 | ||
| 1629 | static const struct attribute_group coresight_etm_group = { | 633 | static const struct coresight_ops_source etm_source_ops = { |
| 1630 | .attrs = coresight_etm_attrs, | 634 | .cpu_id = etm_cpu_id, |
| 1631 | }; | 635 | .trace_id = etm_trace_id, |
| 1632 | 636 | .enable = etm_enable, | |
| 1633 | 637 | .disable = etm_disable, | |
| 1634 | static const struct attribute_group coresight_etm_mgmt_group = { | ||
| 1635 | .attrs = coresight_etm_mgmt_attrs, | ||
| 1636 | .name = "mgmt", | ||
| 1637 | }; | 638 | }; |
| 1638 | 639 | ||
| 1639 | static const struct attribute_group *coresight_etm_groups[] = { | 640 | static const struct coresight_ops etm_cs_ops = { |
| 1640 | &coresight_etm_group, | 641 | .source_ops = &etm_source_ops, |
| 1641 | &coresight_etm_mgmt_group, | ||
| 1642 | NULL, | ||
| 1643 | }; | 642 | }; |
| 1644 | 643 | ||
| 1645 | static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, | 644 | static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, |
| @@ -1658,7 +657,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, | |||
| 1658 | etmdrvdata[cpu]->os_unlock = true; | 657 | etmdrvdata[cpu]->os_unlock = true; |
| 1659 | } | 658 | } |
| 1660 | 659 | ||
| 1661 | if (etmdrvdata[cpu]->enable) | 660 | if (local_read(&etmdrvdata[cpu]->mode)) |
| 1662 | etm_enable_hw(etmdrvdata[cpu]); | 661 | etm_enable_hw(etmdrvdata[cpu]); |
| 1663 | spin_unlock(&etmdrvdata[cpu]->spinlock); | 662 | spin_unlock(&etmdrvdata[cpu]->spinlock); |
| 1664 | break; | 663 | break; |
| @@ -1671,7 +670,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, | |||
| 1671 | 670 | ||
| 1672 | case CPU_DYING: | 671 | case CPU_DYING: |
| 1673 | spin_lock(&etmdrvdata[cpu]->spinlock); | 672 | spin_lock(&etmdrvdata[cpu]->spinlock); |
| 1674 | if (etmdrvdata[cpu]->enable) | 673 | if (local_read(&etmdrvdata[cpu]->mode)) |
| 1675 | etm_disable_hw(etmdrvdata[cpu]); | 674 | etm_disable_hw(etmdrvdata[cpu]); |
| 1676 | spin_unlock(&etmdrvdata[cpu]->spinlock); | 675 | spin_unlock(&etmdrvdata[cpu]->spinlock); |
| 1677 | break; | 676 | break; |
| @@ -1707,6 +706,9 @@ static void etm_init_arch_data(void *info) | |||
| 1707 | u32 etmccr; | 706 | u32 etmccr; |
| 1708 | struct etm_drvdata *drvdata = info; | 707 | struct etm_drvdata *drvdata = info; |
| 1709 | 708 | ||
| 709 | /* Make sure all registers are accessible */ | ||
| 710 | etm_os_unlock(drvdata); | ||
| 711 | |||
| 1710 | CS_UNLOCK(drvdata->base); | 712 | CS_UNLOCK(drvdata->base); |
| 1711 | 713 | ||
| 1712 | /* First dummy read */ | 714 | /* First dummy read */ |
| @@ -1743,40 +745,9 @@ static void etm_init_arch_data(void *info) | |||
| 1743 | CS_LOCK(drvdata->base); | 745 | CS_LOCK(drvdata->base); |
| 1744 | } | 746 | } |
| 1745 | 747 | ||
| 1746 | static void etm_init_default_data(struct etm_drvdata *drvdata) | 748 | static void etm_init_trace_id(struct etm_drvdata *drvdata) |
| 1747 | { | 749 | { |
| 1748 | /* | 750 | drvdata->traceid = coresight_get_trace_id(drvdata->cpu); |
| 1749 | * A trace ID of value 0 is invalid, so let's start at some | ||
| 1750 | * random value that fits in 7 bits and will be just as good. | ||
| 1751 | */ | ||
| 1752 | static int etm3x_traceid = 0x10; | ||
| 1753 | |||
| 1754 | u32 flags = (1 << 0 | /* instruction execute*/ | ||
| 1755 | 3 << 3 | /* ARM instruction */ | ||
| 1756 | 0 << 5 | /* No data value comparison */ | ||
| 1757 | 0 << 7 | /* No exact mach */ | ||
| 1758 | 0 << 8 | /* Ignore context ID */ | ||
| 1759 | 0 << 10); /* Security ignored */ | ||
| 1760 | |||
| 1761 | /* | ||
| 1762 | * Initial configuration only - guarantees sources handled by | ||
| 1763 | * this driver have a unique ID at startup time but not between | ||
| 1764 | * all other types of sources. For that we lean on the core | ||
| 1765 | * framework. | ||
| 1766 | */ | ||
| 1767 | drvdata->traceid = etm3x_traceid++; | ||
| 1768 | drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN); | ||
| 1769 | drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; | ||
| 1770 | if (drvdata->nr_addr_cmp >= 2) { | ||
| 1771 | drvdata->addr_val[0] = (u32) _stext; | ||
| 1772 | drvdata->addr_val[1] = (u32) _etext; | ||
| 1773 | drvdata->addr_acctype[0] = flags; | ||
| 1774 | drvdata->addr_acctype[1] = flags; | ||
| 1775 | drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; | ||
| 1776 | drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; | ||
| 1777 | } | ||
| 1778 | |||
| 1779 | etm_set_default(drvdata); | ||
| 1780 | } | 751 | } |
| 1781 | 752 | ||
| 1782 | static int etm_probe(struct amba_device *adev, const struct amba_id *id) | 753 | static int etm_probe(struct amba_device *adev, const struct amba_id *id) |
| @@ -1831,9 +802,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 1831 | get_online_cpus(); | 802 | get_online_cpus(); |
| 1832 | etmdrvdata[drvdata->cpu] = drvdata; | 803 | etmdrvdata[drvdata->cpu] = drvdata; |
| 1833 | 804 | ||
| 1834 | if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1)) | ||
| 1835 | drvdata->os_unlock = true; | ||
| 1836 | |||
| 1837 | if (smp_call_function_single(drvdata->cpu, | 805 | if (smp_call_function_single(drvdata->cpu, |
| 1838 | etm_init_arch_data, drvdata, 1)) | 806 | etm_init_arch_data, drvdata, 1)) |
| 1839 | dev_err(dev, "ETM arch init failed\n"); | 807 | dev_err(dev, "ETM arch init failed\n"); |
| @@ -1847,7 +815,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 1847 | ret = -EINVAL; | 815 | ret = -EINVAL; |
| 1848 | goto err_arch_supported; | 816 | goto err_arch_supported; |
| 1849 | } | 817 | } |
| 1850 | etm_init_default_data(drvdata); | 818 | |
| 819 | etm_init_trace_id(drvdata); | ||
| 820 | etm_set_default(&drvdata->config); | ||
| 1851 | 821 | ||
| 1852 | desc->type = CORESIGHT_DEV_TYPE_SOURCE; | 822 | desc->type = CORESIGHT_DEV_TYPE_SOURCE; |
| 1853 | desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; | 823 | desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; |
| @@ -1861,6 +831,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 1861 | goto err_arch_supported; | 831 | goto err_arch_supported; |
| 1862 | } | 832 | } |
| 1863 | 833 | ||
| 834 | ret = etm_perf_symlink(drvdata->csdev, true); | ||
| 835 | if (ret) { | ||
| 836 | coresight_unregister(drvdata->csdev); | ||
| 837 | goto err_arch_supported; | ||
| 838 | } | ||
| 839 | |||
| 1864 | pm_runtime_put(&adev->dev); | 840 | pm_runtime_put(&adev->dev); |
| 1865 | dev_info(dev, "%s initialized\n", (char *)id->data); | 841 | dev_info(dev, "%s initialized\n", (char *)id->data); |
| 1866 | 842 | ||
| @@ -1877,17 +853,6 @@ err_arch_supported: | |||
| 1877 | return ret; | 853 | return ret; |
| 1878 | } | 854 | } |
| 1879 | 855 | ||
| 1880 | static int etm_remove(struct amba_device *adev) | ||
| 1881 | { | ||
| 1882 | struct etm_drvdata *drvdata = amba_get_drvdata(adev); | ||
| 1883 | |||
| 1884 | coresight_unregister(drvdata->csdev); | ||
| 1885 | if (--etm_count == 0) | ||
| 1886 | unregister_hotcpu_notifier(&etm_cpu_notifier); | ||
| 1887 | |||
| 1888 | return 0; | ||
| 1889 | } | ||
| 1890 | |||
| 1891 | #ifdef CONFIG_PM | 856 | #ifdef CONFIG_PM |
| 1892 | static int etm_runtime_suspend(struct device *dev) | 857 | static int etm_runtime_suspend(struct device *dev) |
| 1893 | { | 858 | { |
| @@ -1948,13 +913,9 @@ static struct amba_driver etm_driver = { | |||
| 1948 | .name = "coresight-etm3x", | 913 | .name = "coresight-etm3x", |
| 1949 | .owner = THIS_MODULE, | 914 | .owner = THIS_MODULE, |
| 1950 | .pm = &etm_dev_pm_ops, | 915 | .pm = &etm_dev_pm_ops, |
| 916 | .suppress_bind_attrs = true, | ||
| 1951 | }, | 917 | }, |
| 1952 | .probe = etm_probe, | 918 | .probe = etm_probe, |
| 1953 | .remove = etm_remove, | ||
| 1954 | .id_table = etm_ids, | 919 | .id_table = etm_ids, |
| 1955 | }; | 920 | }; |
| 1956 | 921 | builtin_amba_driver(etm_driver); | |
| 1957 | module_amba_driver(etm_driver); | ||
| 1958 | |||
| 1959 | MODULE_LICENSE("GPL v2"); | ||
| 1960 | MODULE_DESCRIPTION("CoreSight Program Flow Trace driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index a6707642bb23..1c59bd36834c 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| 16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| 20 | #include <linux/err.h> | 19 | #include <linux/err.h> |
| 21 | #include <linux/fs.h> | 20 | #include <linux/fs.h> |
| @@ -32,6 +31,7 @@ | |||
| 32 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
| 33 | #include <linux/uaccess.h> | 32 | #include <linux/uaccess.h> |
| 34 | #include <linux/pm_runtime.h> | 33 | #include <linux/pm_runtime.h> |
| 34 | #include <linux/perf_event.h> | ||
| 35 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
| 36 | 36 | ||
| 37 | #include "coresight-etm4x.h" | 37 | #include "coresight-etm4x.h" |
| @@ -63,6 +63,13 @@ static bool etm4_arch_supported(u8 arch) | |||
| 63 | return true; | 63 | return true; |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | static int etm4_cpu_id(struct coresight_device *csdev) | ||
| 67 | { | ||
| 68 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 69 | |||
| 70 | return drvdata->cpu; | ||
| 71 | } | ||
| 72 | |||
| 66 | static int etm4_trace_id(struct coresight_device *csdev) | 73 | static int etm4_trace_id(struct coresight_device *csdev) |
| 67 | { | 74 | { |
| 68 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 75 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| @@ -72,7 +79,6 @@ static int etm4_trace_id(struct coresight_device *csdev) | |||
| 72 | if (!drvdata->enable) | 79 | if (!drvdata->enable) |
| 73 | return drvdata->trcid; | 80 | return drvdata->trcid; |
| 74 | 81 | ||
| 75 | pm_runtime_get_sync(drvdata->dev); | ||
| 76 | spin_lock_irqsave(&drvdata->spinlock, flags); | 82 | spin_lock_irqsave(&drvdata->spinlock, flags); |
| 77 | 83 | ||
| 78 | CS_UNLOCK(drvdata->base); | 84 | CS_UNLOCK(drvdata->base); |
| @@ -81,7 +87,6 @@ static int etm4_trace_id(struct coresight_device *csdev) | |||
| 81 | CS_LOCK(drvdata->base); | 87 | CS_LOCK(drvdata->base); |
| 82 | 88 | ||
| 83 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 89 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
| 84 | pm_runtime_put(drvdata->dev); | ||
| 85 | 90 | ||
| 86 | return trace_id; | 91 | return trace_id; |
| 87 | } | 92 | } |
| @@ -182,12 +187,12 @@ static void etm4_enable_hw(void *info) | |||
| 182 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); | 187 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); |
| 183 | } | 188 | } |
| 184 | 189 | ||
| 185 | static int etm4_enable(struct coresight_device *csdev) | 190 | static int etm4_enable(struct coresight_device *csdev, |
| 191 | struct perf_event_attr *attr, u32 mode) | ||
| 186 | { | 192 | { |
| 187 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 193 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 188 | int ret; | 194 | int ret; |
| 189 | 195 | ||
| 190 | pm_runtime_get_sync(drvdata->dev); | ||
| 191 | spin_lock(&drvdata->spinlock); | 196 | spin_lock(&drvdata->spinlock); |
| 192 | 197 | ||
| 193 | /* | 198 | /* |
| @@ -207,7 +212,6 @@ static int etm4_enable(struct coresight_device *csdev) | |||
| 207 | return 0; | 212 | return 0; |
| 208 | err: | 213 | err: |
| 209 | spin_unlock(&drvdata->spinlock); | 214 | spin_unlock(&drvdata->spinlock); |
| 210 | pm_runtime_put(drvdata->dev); | ||
| 211 | return ret; | 215 | return ret; |
| 212 | } | 216 | } |
| 213 | 217 | ||
| @@ -256,12 +260,11 @@ static void etm4_disable(struct coresight_device *csdev) | |||
| 256 | spin_unlock(&drvdata->spinlock); | 260 | spin_unlock(&drvdata->spinlock); |
| 257 | put_online_cpus(); | 261 | put_online_cpus(); |
| 258 | 262 | ||
| 259 | pm_runtime_put(drvdata->dev); | ||
| 260 | |||
| 261 | dev_info(drvdata->dev, "ETM tracing disabled\n"); | 263 | dev_info(drvdata->dev, "ETM tracing disabled\n"); |
| 262 | } | 264 | } |
| 263 | 265 | ||
| 264 | static const struct coresight_ops_source etm4_source_ops = { | 266 | static const struct coresight_ops_source etm4_source_ops = { |
| 267 | .cpu_id = etm4_cpu_id, | ||
| 265 | .trace_id = etm4_trace_id, | 268 | .trace_id = etm4_trace_id, |
| 266 | .enable = etm4_enable, | 269 | .enable = etm4_enable, |
| 267 | .disable = etm4_disable, | 270 | .disable = etm4_disable, |
| @@ -2219,7 +2222,7 @@ static ssize_t name##_show(struct device *_dev, \ | |||
| 2219 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | 2222 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ |
| 2220 | readl_relaxed(drvdata->base + offset)); \ | 2223 | readl_relaxed(drvdata->base + offset)); \ |
| 2221 | } \ | 2224 | } \ |
| 2222 | DEVICE_ATTR_RO(name) | 2225 | static DEVICE_ATTR_RO(name) |
| 2223 | 2226 | ||
| 2224 | coresight_simple_func(trcoslsr, TRCOSLSR); | 2227 | coresight_simple_func(trcoslsr, TRCOSLSR); |
| 2225 | coresight_simple_func(trcpdcr, TRCPDCR); | 2228 | coresight_simple_func(trcpdcr, TRCPDCR); |
| @@ -2684,17 +2687,6 @@ err_coresight_register: | |||
| 2684 | return ret; | 2687 | return ret; |
| 2685 | } | 2688 | } |
| 2686 | 2689 | ||
| 2687 | static int etm4_remove(struct amba_device *adev) | ||
| 2688 | { | ||
| 2689 | struct etmv4_drvdata *drvdata = amba_get_drvdata(adev); | ||
| 2690 | |||
| 2691 | coresight_unregister(drvdata->csdev); | ||
| 2692 | if (--etm4_count == 0) | ||
| 2693 | unregister_hotcpu_notifier(&etm4_cpu_notifier); | ||
| 2694 | |||
| 2695 | return 0; | ||
| 2696 | } | ||
| 2697 | |||
| 2698 | static struct amba_id etm4_ids[] = { | 2690 | static struct amba_id etm4_ids[] = { |
| 2699 | { /* ETM 4.0 - Qualcomm */ | 2691 | { /* ETM 4.0 - Qualcomm */ |
| 2700 | .id = 0x0003b95d, | 2692 | .id = 0x0003b95d, |
| @@ -2712,10 +2704,9 @@ static struct amba_id etm4_ids[] = { | |||
| 2712 | static struct amba_driver etm4x_driver = { | 2704 | static struct amba_driver etm4x_driver = { |
| 2713 | .drv = { | 2705 | .drv = { |
| 2714 | .name = "coresight-etm4x", | 2706 | .name = "coresight-etm4x", |
| 2707 | .suppress_bind_attrs = true, | ||
| 2715 | }, | 2708 | }, |
| 2716 | .probe = etm4_probe, | 2709 | .probe = etm4_probe, |
| 2717 | .remove = etm4_remove, | ||
| 2718 | .id_table = etm4_ids, | 2710 | .id_table = etm4_ids, |
| 2719 | }; | 2711 | }; |
| 2720 | 2712 | builtin_amba_driver(etm4x_driver); | |
| 2721 | module_amba_driver(etm4x_driver); | ||
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 2e36bde7fcb4..0600ca30649d 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
| 2 | * | 2 | * |
| 3 | * Description: CoreSight Funnel driver | ||
| 4 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
| @@ -11,7 +13,6 @@ | |||
| 11 | */ | 13 | */ |
| 12 | 14 | ||
| 13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 16 | #include <linux/types.h> | 17 | #include <linux/types.h> |
| 17 | #include <linux/device.h> | 18 | #include <linux/device.h> |
| @@ -69,7 +70,6 @@ static int funnel_enable(struct coresight_device *csdev, int inport, | |||
| 69 | { | 70 | { |
| 70 | struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 71 | struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 71 | 72 | ||
| 72 | pm_runtime_get_sync(drvdata->dev); | ||
| 73 | funnel_enable_hw(drvdata, inport); | 73 | funnel_enable_hw(drvdata, inport); |
| 74 | 74 | ||
| 75 | dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); | 75 | dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); |
| @@ -95,7 +95,6 @@ static void funnel_disable(struct coresight_device *csdev, int inport, | |||
| 95 | struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 95 | struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 96 | 96 | ||
| 97 | funnel_disable_hw(drvdata, inport); | 97 | funnel_disable_hw(drvdata, inport); |
| 98 | pm_runtime_put(drvdata->dev); | ||
| 99 | 98 | ||
| 100 | dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); | 99 | dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); |
| 101 | } | 100 | } |
| @@ -226,14 +225,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 226 | return 0; | 225 | return 0; |
| 227 | } | 226 | } |
| 228 | 227 | ||
| 229 | static int funnel_remove(struct amba_device *adev) | ||
| 230 | { | ||
| 231 | struct funnel_drvdata *drvdata = amba_get_drvdata(adev); | ||
| 232 | |||
| 233 | coresight_unregister(drvdata->csdev); | ||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | |||
| 237 | #ifdef CONFIG_PM | 228 | #ifdef CONFIG_PM |
| 238 | static int funnel_runtime_suspend(struct device *dev) | 229 | static int funnel_runtime_suspend(struct device *dev) |
| 239 | { | 230 | { |
| @@ -273,13 +264,9 @@ static struct amba_driver funnel_driver = { | |||
| 273 | .name = "coresight-funnel", | 264 | .name = "coresight-funnel", |
| 274 | .owner = THIS_MODULE, | 265 | .owner = THIS_MODULE, |
| 275 | .pm = &funnel_dev_pm_ops, | 266 | .pm = &funnel_dev_pm_ops, |
| 267 | .suppress_bind_attrs = true, | ||
| 276 | }, | 268 | }, |
| 277 | .probe = funnel_probe, | 269 | .probe = funnel_probe, |
| 278 | .remove = funnel_remove, | ||
| 279 | .id_table = funnel_ids, | 270 | .id_table = funnel_ids, |
| 280 | }; | 271 | }; |
| 281 | 272 | builtin_amba_driver(funnel_driver); | |
| 282 | module_amba_driver(funnel_driver); | ||
| 283 | |||
| 284 | MODULE_LICENSE("GPL v2"); | ||
| 285 | MODULE_DESCRIPTION("CoreSight Funnel driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 62fcd98cc7cf..333eddaed339 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h | |||
| @@ -34,6 +34,15 @@ | |||
| 34 | #define TIMEOUT_US 100 | 34 | #define TIMEOUT_US 100 |
| 35 | #define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb) | 35 | #define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb) |
| 36 | 36 | ||
| 37 | #define ETM_MODE_EXCL_KERN BIT(30) | ||
| 38 | #define ETM_MODE_EXCL_USER BIT(31) | ||
| 39 | |||
| 40 | enum cs_mode { | ||
| 41 | CS_MODE_DISABLED, | ||
| 42 | CS_MODE_SYSFS, | ||
| 43 | CS_MODE_PERF, | ||
| 44 | }; | ||
| 45 | |||
| 37 | static inline void CS_LOCK(void __iomem *addr) | 46 | static inline void CS_LOCK(void __iomem *addr) |
| 38 | { | 47 | { |
| 39 | do { | 48 | do { |
| @@ -52,6 +61,12 @@ static inline void CS_UNLOCK(void __iomem *addr) | |||
| 52 | } while (0); | 61 | } while (0); |
| 53 | } | 62 | } |
| 54 | 63 | ||
| 64 | void coresight_disable_path(struct list_head *path); | ||
| 65 | int coresight_enable_path(struct list_head *path, u32 mode); | ||
| 66 | struct coresight_device *coresight_get_sink(struct list_head *path); | ||
| 67 | struct list_head *coresight_build_path(struct coresight_device *csdev); | ||
| 68 | void coresight_release_path(struct list_head *path); | ||
| 69 | |||
| 55 | #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X | 70 | #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X |
| 56 | extern int etm_readl_cp14(u32 off, unsigned int *val); | 71 | extern int etm_readl_cp14(u32 off, unsigned int *val); |
| 57 | extern int etm_writel_cp14(u32 off, u32 val); | 72 | extern int etm_writel_cp14(u32 off, u32 val); |
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c index 584059e9e866..700f710e4bfa 100644 --- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c +++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
| 16 | #include <linux/coresight.h> | 16 | #include <linux/coresight.h> |
| 17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 21 | #include <linux/io.h> | 20 | #include <linux/io.h> |
| @@ -48,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport, | |||
| 48 | { | 47 | { |
| 49 | struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); | 48 | struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 50 | 49 | ||
| 51 | pm_runtime_get_sync(drvdata->dev); | ||
| 52 | |||
| 53 | CS_UNLOCK(drvdata->base); | 50 | CS_UNLOCK(drvdata->base); |
| 54 | 51 | ||
| 55 | /* | 52 | /* |
| @@ -86,8 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport, | |||
| 86 | 83 | ||
| 87 | CS_LOCK(drvdata->base); | 84 | CS_LOCK(drvdata->base); |
| 88 | 85 | ||
| 89 | pm_runtime_put(drvdata->dev); | ||
| 90 | |||
| 91 | dev_info(drvdata->dev, "REPLICATOR disabled\n"); | 86 | dev_info(drvdata->dev, "REPLICATOR disabled\n"); |
| 92 | } | 87 | } |
| 93 | 88 | ||
| @@ -156,15 +151,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 156 | return 0; | 151 | return 0; |
| 157 | } | 152 | } |
| 158 | 153 | ||
| 159 | static int replicator_remove(struct amba_device *adev) | ||
| 160 | { | ||
| 161 | struct replicator_state *drvdata = amba_get_drvdata(adev); | ||
| 162 | |||
| 163 | pm_runtime_disable(&adev->dev); | ||
| 164 | coresight_unregister(drvdata->csdev); | ||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | |||
| 168 | #ifdef CONFIG_PM | 154 | #ifdef CONFIG_PM |
| 169 | static int replicator_runtime_suspend(struct device *dev) | 155 | static int replicator_runtime_suspend(struct device *dev) |
| 170 | { | 156 | { |
| @@ -206,10 +192,9 @@ static struct amba_driver replicator_driver = { | |||
| 206 | .drv = { | 192 | .drv = { |
| 207 | .name = "coresight-replicator-qcom", | 193 | .name = "coresight-replicator-qcom", |
| 208 | .pm = &replicator_dev_pm_ops, | 194 | .pm = &replicator_dev_pm_ops, |
| 195 | .suppress_bind_attrs = true, | ||
| 209 | }, | 196 | }, |
| 210 | .probe = replicator_probe, | 197 | .probe = replicator_probe, |
| 211 | .remove = replicator_remove, | ||
| 212 | .id_table = replicator_ids, | 198 | .id_table = replicator_ids, |
| 213 | }; | 199 | }; |
| 214 | 200 | builtin_amba_driver(replicator_driver); | |
| 215 | module_amba_driver(replicator_driver); | ||
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 963ac197c253..4299c0569340 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
| 2 | * | 2 | * |
| 3 | * Description: CoreSight Replicator driver | ||
| 4 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
| @@ -11,7 +13,6 @@ | |||
| 11 | */ | 13 | */ |
| 12 | 14 | ||
| 13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/device.h> | 16 | #include <linux/device.h> |
| 16 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 17 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| @@ -41,7 +42,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport, | |||
| 41 | { | 42 | { |
| 42 | struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 43 | struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 43 | 44 | ||
| 44 | pm_runtime_get_sync(drvdata->dev); | ||
| 45 | dev_info(drvdata->dev, "REPLICATOR enabled\n"); | 45 | dev_info(drvdata->dev, "REPLICATOR enabled\n"); |
| 46 | return 0; | 46 | return 0; |
| 47 | } | 47 | } |
| @@ -51,7 +51,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport, | |||
| 51 | { | 51 | { |
| 52 | struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 52 | struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 53 | 53 | ||
| 54 | pm_runtime_put(drvdata->dev); | ||
| 55 | dev_info(drvdata->dev, "REPLICATOR disabled\n"); | 54 | dev_info(drvdata->dev, "REPLICATOR disabled\n"); |
| 56 | } | 55 | } |
| 57 | 56 | ||
| @@ -127,20 +126,6 @@ out_disable_pm: | |||
| 127 | return ret; | 126 | return ret; |
| 128 | } | 127 | } |
| 129 | 128 | ||
| 130 | static int replicator_remove(struct platform_device *pdev) | ||
| 131 | { | ||
| 132 | struct replicator_drvdata *drvdata = platform_get_drvdata(pdev); | ||
| 133 | |||
| 134 | coresight_unregister(drvdata->csdev); | ||
| 135 | pm_runtime_get_sync(&pdev->dev); | ||
| 136 | if (!IS_ERR(drvdata->atclk)) | ||
| 137 | clk_disable_unprepare(drvdata->atclk); | ||
| 138 | pm_runtime_put_noidle(&pdev->dev); | ||
| 139 | pm_runtime_disable(&pdev->dev); | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 144 | #ifdef CONFIG_PM | 129 | #ifdef CONFIG_PM |
| 145 | static int replicator_runtime_suspend(struct device *dev) | 130 | static int replicator_runtime_suspend(struct device *dev) |
| 146 | { | 131 | { |
| @@ -175,15 +160,11 @@ static const struct of_device_id replicator_match[] = { | |||
| 175 | 160 | ||
| 176 | static struct platform_driver replicator_driver = { | 161 | static struct platform_driver replicator_driver = { |
| 177 | .probe = replicator_probe, | 162 | .probe = replicator_probe, |
| 178 | .remove = replicator_remove, | ||
| 179 | .driver = { | 163 | .driver = { |
| 180 | .name = "coresight-replicator", | 164 | .name = "coresight-replicator", |
| 181 | .of_match_table = replicator_match, | 165 | .of_match_table = replicator_match, |
| 182 | .pm = &replicator_dev_pm_ops, | 166 | .pm = &replicator_dev_pm_ops, |
| 167 | .suppress_bind_attrs = true, | ||
| 183 | }, | 168 | }, |
| 184 | }; | 169 | }; |
| 185 | |||
| 186 | builtin_platform_driver(replicator_driver); | 170 | builtin_platform_driver(replicator_driver); |
| 187 | |||
| 188 | MODULE_LICENSE("GPL v2"); | ||
| 189 | MODULE_DESCRIPTION("CoreSight Replicator driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index a57c7ec1661f..1be191f5d39c 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved. |
| 2 | * | 2 | * |
| 3 | * Description: CoreSight Trace Memory Controller driver | ||
| 4 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
| @@ -11,7 +13,6 @@ | |||
| 11 | */ | 13 | */ |
| 12 | 14 | ||
| 13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 16 | #include <linux/types.h> | 17 | #include <linux/types.h> |
| 17 | #include <linux/device.h> | 18 | #include <linux/device.h> |
| @@ -124,7 +125,7 @@ struct tmc_drvdata { | |||
| 124 | bool reading; | 125 | bool reading; |
| 125 | char *buf; | 126 | char *buf; |
| 126 | dma_addr_t paddr; | 127 | dma_addr_t paddr; |
| 127 | void __iomem *vaddr; | 128 | void *vaddr; |
| 128 | u32 size; | 129 | u32 size; |
| 129 | bool enable; | 130 | bool enable; |
| 130 | enum tmc_config_type config_type; | 131 | enum tmc_config_type config_type; |
| @@ -242,12 +243,9 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) | |||
| 242 | { | 243 | { |
| 243 | unsigned long flags; | 244 | unsigned long flags; |
| 244 | 245 | ||
| 245 | pm_runtime_get_sync(drvdata->dev); | ||
| 246 | |||
| 247 | spin_lock_irqsave(&drvdata->spinlock, flags); | 246 | spin_lock_irqsave(&drvdata->spinlock, flags); |
| 248 | if (drvdata->reading) { | 247 | if (drvdata->reading) { |
| 249 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 248 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
| 250 | pm_runtime_put(drvdata->dev); | ||
| 251 | return -EBUSY; | 249 | return -EBUSY; |
| 252 | } | 250 | } |
| 253 | 251 | ||
| @@ -268,7 +266,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) | |||
| 268 | return 0; | 266 | return 0; |
| 269 | } | 267 | } |
| 270 | 268 | ||
| 271 | static int tmc_enable_sink(struct coresight_device *csdev) | 269 | static int tmc_enable_sink(struct coresight_device *csdev, u32 mode) |
| 272 | { | 270 | { |
| 273 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 271 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 274 | 272 | ||
| @@ -381,8 +379,6 @@ out: | |||
| 381 | drvdata->enable = false; | 379 | drvdata->enable = false; |
| 382 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 380 | spin_unlock_irqrestore(&drvdata->spinlock, flags); |
| 383 | 381 | ||
| 384 | pm_runtime_put(drvdata->dev); | ||
| 385 | |||
| 386 | dev_info(drvdata->dev, "TMC disabled\n"); | 382 | dev_info(drvdata->dev, "TMC disabled\n"); |
| 387 | } | 383 | } |
| 388 | 384 | ||
| @@ -766,23 +762,10 @@ err_misc_register: | |||
| 766 | err_devm_kzalloc: | 762 | err_devm_kzalloc: |
| 767 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) | 763 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) |
| 768 | dma_free_coherent(dev, drvdata->size, | 764 | dma_free_coherent(dev, drvdata->size, |
| 769 | &drvdata->paddr, GFP_KERNEL); | 765 | drvdata->vaddr, drvdata->paddr); |
| 770 | return ret; | 766 | return ret; |
| 771 | } | 767 | } |
| 772 | 768 | ||
| 773 | static int tmc_remove(struct amba_device *adev) | ||
| 774 | { | ||
| 775 | struct tmc_drvdata *drvdata = amba_get_drvdata(adev); | ||
| 776 | |||
| 777 | misc_deregister(&drvdata->miscdev); | ||
| 778 | coresight_unregister(drvdata->csdev); | ||
| 779 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) | ||
| 780 | dma_free_coherent(drvdata->dev, drvdata->size, | ||
| 781 | &drvdata->paddr, GFP_KERNEL); | ||
| 782 | |||
| 783 | return 0; | ||
| 784 | } | ||
| 785 | |||
| 786 | static struct amba_id tmc_ids[] = { | 769 | static struct amba_id tmc_ids[] = { |
| 787 | { | 770 | { |
| 788 | .id = 0x0003b961, | 771 | .id = 0x0003b961, |
| @@ -795,13 +778,9 @@ static struct amba_driver tmc_driver = { | |||
| 795 | .drv = { | 778 | .drv = { |
| 796 | .name = "coresight-tmc", | 779 | .name = "coresight-tmc", |
| 797 | .owner = THIS_MODULE, | 780 | .owner = THIS_MODULE, |
| 781 | .suppress_bind_attrs = true, | ||
| 798 | }, | 782 | }, |
| 799 | .probe = tmc_probe, | 783 | .probe = tmc_probe, |
| 800 | .remove = tmc_remove, | ||
| 801 | .id_table = tmc_ids, | 784 | .id_table = tmc_ids, |
| 802 | }; | 785 | }; |
| 803 | 786 | builtin_amba_driver(tmc_driver); | |
| 804 | module_amba_driver(tmc_driver); | ||
| 805 | |||
| 806 | MODULE_LICENSE("GPL v2"); | ||
| 807 | MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 7214efd10db5..8fb09d9237ab 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. | 1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
| 2 | * | 2 | * |
| 3 | * Description: CoreSight Trace Port Interface Unit driver | ||
| 4 | * | ||
| 3 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and | 6 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. | 7 | * only version 2 as published by the Free Software Foundation. |
| @@ -11,7 +13,6 @@ | |||
| 11 | */ | 13 | */ |
| 12 | 14 | ||
| 13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 16 | #include <linux/device.h> | 17 | #include <linux/device.h> |
| 17 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| @@ -70,11 +71,10 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata) | |||
| 70 | CS_LOCK(drvdata->base); | 71 | CS_LOCK(drvdata->base); |
| 71 | } | 72 | } |
| 72 | 73 | ||
| 73 | static int tpiu_enable(struct coresight_device *csdev) | 74 | static int tpiu_enable(struct coresight_device *csdev, u32 mode) |
| 74 | { | 75 | { |
| 75 | struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 76 | struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 76 | 77 | ||
| 77 | pm_runtime_get_sync(csdev->dev.parent); | ||
| 78 | tpiu_enable_hw(drvdata); | 78 | tpiu_enable_hw(drvdata); |
| 79 | 79 | ||
| 80 | dev_info(drvdata->dev, "TPIU enabled\n"); | 80 | dev_info(drvdata->dev, "TPIU enabled\n"); |
| @@ -98,7 +98,6 @@ static void tpiu_disable(struct coresight_device *csdev) | |||
| 98 | struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 98 | struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 99 | 99 | ||
| 100 | tpiu_disable_hw(drvdata); | 100 | tpiu_disable_hw(drvdata); |
| 101 | pm_runtime_put(csdev->dev.parent); | ||
| 102 | 101 | ||
| 103 | dev_info(drvdata->dev, "TPIU disabled\n"); | 102 | dev_info(drvdata->dev, "TPIU disabled\n"); |
| 104 | } | 103 | } |
| @@ -172,14 +171,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 172 | return 0; | 171 | return 0; |
| 173 | } | 172 | } |
| 174 | 173 | ||
| 175 | static int tpiu_remove(struct amba_device *adev) | ||
| 176 | { | ||
| 177 | struct tpiu_drvdata *drvdata = amba_get_drvdata(adev); | ||
| 178 | |||
| 179 | coresight_unregister(drvdata->csdev); | ||
| 180 | return 0; | ||
| 181 | } | ||
| 182 | |||
| 183 | #ifdef CONFIG_PM | 174 | #ifdef CONFIG_PM |
| 184 | static int tpiu_runtime_suspend(struct device *dev) | 175 | static int tpiu_runtime_suspend(struct device *dev) |
| 185 | { | 176 | { |
| @@ -223,13 +214,9 @@ static struct amba_driver tpiu_driver = { | |||
| 223 | .name = "coresight-tpiu", | 214 | .name = "coresight-tpiu", |
| 224 | .owner = THIS_MODULE, | 215 | .owner = THIS_MODULE, |
| 225 | .pm = &tpiu_dev_pm_ops, | 216 | .pm = &tpiu_dev_pm_ops, |
| 217 | .suppress_bind_attrs = true, | ||
| 226 | }, | 218 | }, |
| 227 | .probe = tpiu_probe, | 219 | .probe = tpiu_probe, |
| 228 | .remove = tpiu_remove, | ||
| 229 | .id_table = tpiu_ids, | 220 | .id_table = tpiu_ids, |
| 230 | }; | 221 | }; |
| 231 | 222 | builtin_amba_driver(tpiu_driver); | |
| 232 | module_amba_driver(tpiu_driver); | ||
| 233 | |||
| 234 | MODULE_LICENSE("GPL v2"); | ||
| 235 | MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver"); | ||
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 93738dfbf631..2ea5961092c1 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 16 | #include <linux/types.h> | 15 | #include <linux/types.h> |
| 17 | #include <linux/device.h> | 16 | #include <linux/device.h> |
| @@ -24,11 +23,28 @@ | |||
| 24 | #include <linux/coresight.h> | 23 | #include <linux/coresight.h> |
| 25 | #include <linux/of_platform.h> | 24 | #include <linux/of_platform.h> |
| 26 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
| 26 | #include <linux/pm_runtime.h> | ||
| 27 | 27 | ||
| 28 | #include "coresight-priv.h" | 28 | #include "coresight-priv.h" |
| 29 | 29 | ||
| 30 | static DEFINE_MUTEX(coresight_mutex); | 30 | static DEFINE_MUTEX(coresight_mutex); |
| 31 | 31 | ||
| 32 | /** | ||
| 33 | * struct coresight_node - elements of a path, from source to sink | ||
| 34 | * @csdev: Address of an element. | ||
| 35 | * @link: hook to the list. | ||
| 36 | */ | ||
| 37 | struct coresight_node { | ||
| 38 | struct coresight_device *csdev; | ||
| 39 | struct list_head link; | ||
| 40 | }; | ||
| 41 | |||
| 42 | /* | ||
| 43 | * When operating Coresight drivers from the sysFS interface, only a single | ||
| 44 | * path can exist from a tracer (associated to a CPU) to a sink. | ||
| 45 | */ | ||
| 46 | static DEFINE_PER_CPU(struct list_head *, sysfs_path); | ||
| 47 | |||
| 32 | static int coresight_id_match(struct device *dev, void *data) | 48 | static int coresight_id_match(struct device *dev, void *data) |
| 33 | { | 49 | { |
| 34 | int trace_id, i_trace_id; | 50 | int trace_id, i_trace_id; |
| @@ -68,15 +84,12 @@ static int coresight_source_is_unique(struct coresight_device *csdev) | |||
| 68 | csdev, coresight_id_match); | 84 | csdev, coresight_id_match); |
| 69 | } | 85 | } |
| 70 | 86 | ||
| 71 | static int coresight_find_link_inport(struct coresight_device *csdev) | 87 | static int coresight_find_link_inport(struct coresight_device *csdev, |
| 88 | struct coresight_device *parent) | ||
| 72 | { | 89 | { |
| 73 | int i; | 90 | int i; |
| 74 | struct coresight_device *parent; | ||
| 75 | struct coresight_connection *conn; | 91 | struct coresight_connection *conn; |
| 76 | 92 | ||
| 77 | parent = container_of(csdev->path_link.next, | ||
| 78 | struct coresight_device, path_link); | ||
| 79 | |||
| 80 | for (i = 0; i < parent->nr_outport; i++) { | 93 | for (i = 0; i < parent->nr_outport; i++) { |
| 81 | conn = &parent->conns[i]; | 94 | conn = &parent->conns[i]; |
| 82 | if (conn->child_dev == csdev) | 95 | if (conn->child_dev == csdev) |
| @@ -89,15 +102,12 @@ static int coresight_find_link_inport(struct coresight_device *csdev) | |||
| 89 | return 0; | 102 | return 0; |
| 90 | } | 103 | } |
| 91 | 104 | ||
| 92 | static int coresight_find_link_outport(struct coresight_device *csdev) | 105 | static int coresight_find_link_outport(struct coresight_device *csdev, |
| 106 | struct coresight_device *child) | ||
| 93 | { | 107 | { |
| 94 | int i; | 108 | int i; |
| 95 | struct coresight_device *child; | ||
| 96 | struct coresight_connection *conn; | 109 | struct coresight_connection *conn; |
| 97 | 110 | ||
| 98 | child = container_of(csdev->path_link.prev, | ||
| 99 | struct coresight_device, path_link); | ||
| 100 | |||
| 101 | for (i = 0; i < csdev->nr_outport; i++) { | 111 | for (i = 0; i < csdev->nr_outport; i++) { |
| 102 | conn = &csdev->conns[i]; | 112 | conn = &csdev->conns[i]; |
| 103 | if (conn->child_dev == child) | 113 | if (conn->child_dev == child) |
| @@ -110,13 +120,13 @@ static int coresight_find_link_outport(struct coresight_device *csdev) | |||
| 110 | return 0; | 120 | return 0; |
| 111 | } | 121 | } |
| 112 | 122 | ||
| 113 | static int coresight_enable_sink(struct coresight_device *csdev) | 123 | static int coresight_enable_sink(struct coresight_device *csdev, u32 mode) |
| 114 | { | 124 | { |
| 115 | int ret; | 125 | int ret; |
| 116 | 126 | ||
| 117 | if (!csdev->enable) { | 127 | if (!csdev->enable) { |
| 118 | if (sink_ops(csdev)->enable) { | 128 | if (sink_ops(csdev)->enable) { |
| 119 | ret = sink_ops(csdev)->enable(csdev); | 129 | ret = sink_ops(csdev)->enable(csdev, mode); |
| 120 | if (ret) | 130 | if (ret) |
| 121 | return ret; | 131 | return ret; |
| 122 | } | 132 | } |
| @@ -138,14 +148,19 @@ static void coresight_disable_sink(struct coresight_device *csdev) | |||
| 138 | } | 148 | } |
| 139 | } | 149 | } |
| 140 | 150 | ||
| 141 | static int coresight_enable_link(struct coresight_device *csdev) | 151 | static int coresight_enable_link(struct coresight_device *csdev, |
| 152 | struct coresight_device *parent, | ||
| 153 | struct coresight_device *child) | ||
| 142 | { | 154 | { |
| 143 | int ret; | 155 | int ret; |
| 144 | int link_subtype; | 156 | int link_subtype; |
| 145 | int refport, inport, outport; | 157 | int refport, inport, outport; |
| 146 | 158 | ||
| 147 | inport = coresight_find_link_inport(csdev); | 159 | if (!parent || !child) |
| 148 | outport = coresight_find_link_outport(csdev); | 160 | return -EINVAL; |
| 161 | |||
| 162 | inport = coresight_find_link_inport(csdev, parent); | ||
| 163 | outport = coresight_find_link_outport(csdev, child); | ||
| 149 | link_subtype = csdev->subtype.link_subtype; | 164 | link_subtype = csdev->subtype.link_subtype; |
| 150 | 165 | ||
| 151 | if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) | 166 | if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) |
| @@ -168,14 +183,19 @@ static int coresight_enable_link(struct coresight_device *csdev) | |||
| 168 | return 0; | 183 | return 0; |
| 169 | } | 184 | } |
| 170 | 185 | ||
| 171 | static void coresight_disable_link(struct coresight_device *csdev) | 186 | static void coresight_disable_link(struct coresight_device *csdev, |
| 187 | struct coresight_device *parent, | ||
| 188 | struct coresight_device *child) | ||
| 172 | { | 189 | { |
| 173 | int i, nr_conns; | 190 | int i, nr_conns; |
| 174 | int link_subtype; | 191 | int link_subtype; |
| 175 | int refport, inport, outport; | 192 | int refport, inport, outport; |
| 176 | 193 | ||
| 177 | inport = coresight_find_link_inport(csdev); | 194 | if (!parent || !child) |
| 178 | outport = coresight_find_link_outport(csdev); | 195 | return; |
| 196 | |||
| 197 | inport = coresight_find_link_inport(csdev, parent); | ||
| 198 | outport = coresight_find_link_outport(csdev, child); | ||
| 179 | link_subtype = csdev->subtype.link_subtype; | 199 | link_subtype = csdev->subtype.link_subtype; |
| 180 | 200 | ||
| 181 | if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { | 201 | if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) { |
| @@ -201,7 +221,7 @@ static void coresight_disable_link(struct coresight_device *csdev) | |||
| 201 | csdev->enable = false; | 221 | csdev->enable = false; |
| 202 | } | 222 | } |
| 203 | 223 | ||
| 204 | static int coresight_enable_source(struct coresight_device *csdev) | 224 | static int coresight_enable_source(struct coresight_device *csdev, u32 mode) |
| 205 | { | 225 | { |
| 206 | int ret; | 226 | int ret; |
| 207 | 227 | ||
| @@ -213,7 +233,7 @@ static int coresight_enable_source(struct coresight_device *csdev) | |||
| 213 | 233 | ||
| 214 | if (!csdev->enable) { | 234 | if (!csdev->enable) { |
| 215 | if (source_ops(csdev)->enable) { | 235 | if (source_ops(csdev)->enable) { |
| 216 | ret = source_ops(csdev)->enable(csdev); | 236 | ret = source_ops(csdev)->enable(csdev, NULL, mode); |
| 217 | if (ret) | 237 | if (ret) |
| 218 | return ret; | 238 | return ret; |
| 219 | } | 239 | } |
| @@ -235,109 +255,188 @@ static void coresight_disable_source(struct coresight_device *csdev) | |||
| 235 | } | 255 | } |
| 236 | } | 256 | } |
| 237 | 257 | ||
| 238 | static int coresight_enable_path(struct list_head *path) | 258 | void coresight_disable_path(struct list_head *path) |
| 239 | { | 259 | { |
| 240 | int ret = 0; | 260 | struct coresight_node *nd; |
| 241 | struct coresight_device *cd; | 261 | struct coresight_device *csdev, *parent, *child; |
| 242 | 262 | ||
| 243 | /* | 263 | list_for_each_entry(nd, path, link) { |
| 244 | * At this point we have a full @path, from source to sink. The | 264 | csdev = nd->csdev; |
| 245 | * sink is the first entry and the source the last one. Go through | 265 | |
| 246 | * all the components and enable them one by one. | 266 | switch (csdev->type) { |
| 247 | */ | 267 | case CORESIGHT_DEV_TYPE_SINK: |
| 248 | list_for_each_entry(cd, path, path_link) { | 268 | case CORESIGHT_DEV_TYPE_LINKSINK: |
| 249 | if (cd == list_first_entry(path, struct coresight_device, | 269 | coresight_disable_sink(csdev); |
| 250 | path_link)) { | 270 | break; |
| 251 | ret = coresight_enable_sink(cd); | 271 | case CORESIGHT_DEV_TYPE_SOURCE: |
| 252 | } else if (list_is_last(&cd->path_link, path)) { | 272 | /* sources are disabled from either sysFS or Perf */ |
| 253 | /* | 273 | break; |
| 254 | * Don't enable the source just yet - this needs to | 274 | case CORESIGHT_DEV_TYPE_LINK: |
| 255 | * happen at the very end when all links and sink | 275 | parent = list_prev_entry(nd, link)->csdev; |
| 256 | * along the path have been configured properly. | 276 | child = list_next_entry(nd, link)->csdev; |
| 257 | */ | 277 | coresight_disable_link(csdev, parent, child); |
| 258 | ; | 278 | break; |
| 259 | } else { | 279 | default: |
| 260 | ret = coresight_enable_link(cd); | 280 | break; |
| 261 | } | 281 | } |
| 262 | if (ret) | ||
| 263 | goto err; | ||
| 264 | } | 282 | } |
| 283 | } | ||
| 265 | 284 | ||
| 266 | return 0; | 285 | int coresight_enable_path(struct list_head *path, u32 mode) |
| 267 | err: | 286 | { |
| 268 | list_for_each_entry_continue_reverse(cd, path, path_link) { | 287 | |
| 269 | if (cd == list_first_entry(path, struct coresight_device, | 288 | int ret = 0; |
| 270 | path_link)) { | 289 | struct coresight_node *nd; |
| 271 | coresight_disable_sink(cd); | 290 | struct coresight_device *csdev, *parent, *child; |
| 272 | } else if (list_is_last(&cd->path_link, path)) { | 291 | |
| 273 | ; | 292 | list_for_each_entry_reverse(nd, path, link) { |
| 274 | } else { | 293 | csdev = nd->csdev; |
| 275 | coresight_disable_link(cd); | 294 | |
| 295 | switch (csdev->type) { | ||
| 296 | case CORESIGHT_DEV_TYPE_SINK: | ||
| 297 | case CORESIGHT_DEV_TYPE_LINKSINK: | ||
| 298 | ret = coresight_enable_sink(csdev, mode); | ||
| 299 | if (ret) | ||
| 300 | goto err; | ||
| 301 | break; | ||
| 302 | case CORESIGHT_DEV_TYPE_SOURCE: | ||
| 303 | /* sources are enabled from either sysFS or Perf */ | ||
| 304 | break; | ||
| 305 | case CORESIGHT_DEV_TYPE_LINK: | ||
| 306 | parent = list_prev_entry(nd, link)->csdev; | ||
| 307 | child = list_next_entry(nd, link)->csdev; | ||
| 308 | ret = coresight_enable_link(csdev, parent, child); | ||
| 309 | if (ret) | ||
| 310 | goto err; | ||
| 311 | break; | ||
| 312 | default: | ||
| 313 | goto err; | ||
| 276 | } | 314 | } |
| 277 | } | 315 | } |
| 278 | 316 | ||
| 317 | out: | ||
| 279 | return ret; | 318 | return ret; |
| 319 | err: | ||
| 320 | coresight_disable_path(path); | ||
| 321 | goto out; | ||
| 280 | } | 322 | } |
| 281 | 323 | ||
| 282 | static int coresight_disable_path(struct list_head *path) | 324 | struct coresight_device *coresight_get_sink(struct list_head *path) |
| 283 | { | 325 | { |
| 284 | struct coresight_device *cd; | 326 | struct coresight_device *csdev; |
| 285 | 327 | ||
| 286 | list_for_each_entry_reverse(cd, path, path_link) { | 328 | if (!path) |
| 287 | if (cd == list_first_entry(path, struct coresight_device, | 329 | return NULL; |
| 288 | path_link)) { | 330 | |
| 289 | coresight_disable_sink(cd); | 331 | csdev = list_last_entry(path, struct coresight_node, link)->csdev; |
| 290 | } else if (list_is_last(&cd->path_link, path)) { | 332 | if (csdev->type != CORESIGHT_DEV_TYPE_SINK && |
| 291 | /* | 333 | csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) |
| 292 | * The source has already been stopped, no need | 334 | return NULL; |
| 293 | * to do it again here. | 335 | |
| 294 | */ | 336 | return csdev; |
| 295 | ; | 337 | } |
| 296 | } else { | 338 | |
| 297 | coresight_disable_link(cd); | 339 | /** |
| 340 | * _coresight_build_path - recursively build a path from a @csdev to a sink. | ||
| 341 | * @csdev: The device to start from. | ||
| 342 | * @path: The list to add devices to. | ||
| 343 | * | ||
| 344 | * The tree of Coresight device is traversed until an activated sink is | ||
| 345 | * found. From there the sink is added to the list along with all the | ||
| 346 | * devices that led to that point - the end result is a list from source | ||
| 347 | * to sink. In that list the source is the first device and the sink the | ||
| 348 | * last one. | ||
| 349 | */ | ||
| 350 | static int _coresight_build_path(struct coresight_device *csdev, | ||
| 351 | struct list_head *path) | ||
| 352 | { | ||
| 353 | int i; | ||
| 354 | bool found = false; | ||
| 355 | struct coresight_node *node; | ||
| 356 | struct coresight_connection *conn; | ||
| 357 | |||
| 358 | /* An activated sink has been found. Enqueue the element */ | ||
| 359 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || | ||
| 360 | csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated) | ||
| 361 | goto out; | ||
| 362 | |||
| 363 | /* Not a sink - recursively explore each port found on this element */ | ||
| 364 | for (i = 0; i < csdev->nr_outport; i++) { | ||
| 365 | conn = &csdev->conns[i]; | ||
| 366 | if (_coresight_build_path(conn->child_dev, path) == 0) { | ||
| 367 | found = true; | ||
| 368 | break; | ||
| 298 | } | 369 | } |
| 299 | } | 370 | } |
| 300 | 371 | ||
| 372 | if (!found) | ||
| 373 | return -ENODEV; | ||
| 374 | |||
| 375 | out: | ||
| 376 | /* | ||
| 377 | * A path from this element to a sink has been found. The elements | ||
| 378 | * leading to the sink are already enqueued, all that is left to do | ||
| 379 | * is tell the PM runtime core we need this element and add a node | ||
| 380 | * for it. | ||
| 381 | */ | ||
| 382 | node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL); | ||
| 383 | if (!node) | ||
| 384 | return -ENOMEM; | ||
| 385 | |||
| 386 | node->csdev = csdev; | ||
| 387 | list_add(&node->link, path); | ||
| 388 | pm_runtime_get_sync(csdev->dev.parent); | ||
| 389 | |||
| 301 | return 0; | 390 | return 0; |
| 302 | } | 391 | } |
| 303 | 392 | ||
| 304 | static int coresight_build_paths(struct coresight_device *csdev, | 393 | struct list_head *coresight_build_path(struct coresight_device *csdev) |
| 305 | struct list_head *path, | ||
| 306 | bool enable) | ||
| 307 | { | 394 | { |
| 308 | int i, ret = -EINVAL; | 395 | struct list_head *path; |
| 309 | struct coresight_connection *conn; | ||
| 310 | 396 | ||
| 311 | list_add(&csdev->path_link, path); | 397 | path = kzalloc(sizeof(struct list_head), GFP_KERNEL); |
| 398 | if (!path) | ||
| 399 | return NULL; | ||
| 312 | 400 | ||
| 313 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || | 401 | INIT_LIST_HEAD(path); |
| 314 | csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && | 402 | |
| 315 | csdev->activated) { | 403 | if (_coresight_build_path(csdev, path)) { |
| 316 | if (enable) | 404 | kfree(path); |
| 317 | ret = coresight_enable_path(path); | 405 | path = NULL; |
| 318 | else | ||
| 319 | ret = coresight_disable_path(path); | ||
| 320 | } else { | ||
| 321 | for (i = 0; i < csdev->nr_outport; i++) { | ||
| 322 | conn = &csdev->conns[i]; | ||
| 323 | if (coresight_build_paths(conn->child_dev, | ||
| 324 | path, enable) == 0) | ||
| 325 | ret = 0; | ||
| 326 | } | ||
| 327 | } | 406 | } |
| 328 | 407 | ||
| 329 | if (list_first_entry(path, struct coresight_device, path_link) != csdev) | 408 | return path; |
| 330 | dev_err(&csdev->dev, "wrong device in %s\n", __func__); | 409 | } |
| 331 | 410 | ||
| 332 | list_del(&csdev->path_link); | 411 | /** |
| 412 | * coresight_release_path - release a previously built path. | ||
| 413 | * @path: the path to release. | ||
| 414 | * | ||
| 415 | * Go through all the elements of a path and 1) removed it from the list and | ||
| 416 | * 2) free the memory allocated for each node. | ||
| 417 | */ | ||
| 418 | void coresight_release_path(struct list_head *path) | ||
| 419 | { | ||
| 420 | struct coresight_device *csdev; | ||
| 421 | struct coresight_node *nd, *next; | ||
| 333 | 422 | ||
| 334 | return ret; | 423 | list_for_each_entry_safe(nd, next, path, link) { |
| 424 | csdev = nd->csdev; | ||
| 425 | |||
| 426 | pm_runtime_put_sync(csdev->dev.parent); | ||
| 427 | list_del(&nd->link); | ||
| 428 | kfree(nd); | ||
| 429 | } | ||
| 430 | |||
| 431 | kfree(path); | ||
| 432 | path = NULL; | ||
| 335 | } | 433 | } |
| 336 | 434 | ||
| 337 | int coresight_enable(struct coresight_device *csdev) | 435 | int coresight_enable(struct coresight_device *csdev) |
| 338 | { | 436 | { |
| 339 | int ret = 0; | 437 | int ret = 0; |
| 340 | LIST_HEAD(path); | 438 | int cpu; |
| 439 | struct list_head *path; | ||
| 341 | 440 | ||
| 342 | mutex_lock(&coresight_mutex); | 441 | mutex_lock(&coresight_mutex); |
| 343 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { | 442 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { |
| @@ -348,22 +447,47 @@ int coresight_enable(struct coresight_device *csdev) | |||
| 348 | if (csdev->enable) | 447 | if (csdev->enable) |
| 349 | goto out; | 448 | goto out; |
| 350 | 449 | ||
| 351 | if (coresight_build_paths(csdev, &path, true)) { | 450 | path = coresight_build_path(csdev); |
| 352 | dev_err(&csdev->dev, "building path(s) failed\n"); | 451 | if (!path) { |
| 452 | pr_err("building path(s) failed\n"); | ||
| 353 | goto out; | 453 | goto out; |
| 354 | } | 454 | } |
| 355 | 455 | ||
| 356 | if (coresight_enable_source(csdev)) | 456 | ret = coresight_enable_path(path, CS_MODE_SYSFS); |
| 357 | dev_err(&csdev->dev, "source enable failed\n"); | 457 | if (ret) |
| 458 | goto err_path; | ||
| 459 | |||
| 460 | ret = coresight_enable_source(csdev, CS_MODE_SYSFS); | ||
| 461 | if (ret) | ||
| 462 | goto err_source; | ||
| 463 | |||
| 464 | /* | ||
| 465 | * When working from sysFS it is important to keep track | ||
| 466 | * of the paths that were created so that they can be | ||
| 467 | * undone in 'coresight_disable()'. Since there can only | ||
| 468 | * be a single session per tracer (when working from sysFS) | ||
| 469 | * a per-cpu variable will do just fine. | ||
| 470 | */ | ||
| 471 | cpu = source_ops(csdev)->cpu_id(csdev); | ||
| 472 | per_cpu(sysfs_path, cpu) = path; | ||
| 473 | |||
| 358 | out: | 474 | out: |
| 359 | mutex_unlock(&coresight_mutex); | 475 | mutex_unlock(&coresight_mutex); |
| 360 | return ret; | 476 | return ret; |
| 477 | |||
| 478 | err_source: | ||
| 479 | coresight_disable_path(path); | ||
| 480 | |||
| 481 | err_path: | ||
| 482 | coresight_release_path(path); | ||
| 483 | goto out; | ||
| 361 | } | 484 | } |
| 362 | EXPORT_SYMBOL_GPL(coresight_enable); | 485 | EXPORT_SYMBOL_GPL(coresight_enable); |
| 363 | 486 | ||
| 364 | void coresight_disable(struct coresight_device *csdev) | 487 | void coresight_disable(struct coresight_device *csdev) |
| 365 | { | 488 | { |
| 366 | LIST_HEAD(path); | 489 | int cpu; |
| 490 | struct list_head *path; | ||
| 367 | 491 | ||
| 368 | mutex_lock(&coresight_mutex); | 492 | mutex_lock(&coresight_mutex); |
| 369 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { | 493 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { |
| @@ -373,9 +497,12 @@ void coresight_disable(struct coresight_device *csdev) | |||
| 373 | if (!csdev->enable) | 497 | if (!csdev->enable) |
| 374 | goto out; | 498 | goto out; |
| 375 | 499 | ||
| 500 | cpu = source_ops(csdev)->cpu_id(csdev); | ||
| 501 | path = per_cpu(sysfs_path, cpu); | ||
| 376 | coresight_disable_source(csdev); | 502 | coresight_disable_source(csdev); |
| 377 | if (coresight_build_paths(csdev, &path, false)) | 503 | coresight_disable_path(path); |
| 378 | dev_err(&csdev->dev, "releasing path(s) failed\n"); | 504 | coresight_release_path(path); |
| 505 | per_cpu(sysfs_path, cpu) = NULL; | ||
| 379 | 506 | ||
| 380 | out: | 507 | out: |
| 381 | mutex_unlock(&coresight_mutex); | 508 | mutex_unlock(&coresight_mutex); |
| @@ -481,6 +608,8 @@ static void coresight_device_release(struct device *dev) | |||
| 481 | { | 608 | { |
| 482 | struct coresight_device *csdev = to_coresight_device(dev); | 609 | struct coresight_device *csdev = to_coresight_device(dev); |
| 483 | 610 | ||
| 611 | kfree(csdev->conns); | ||
| 612 | kfree(csdev->refcnt); | ||
| 484 | kfree(csdev); | 613 | kfree(csdev); |
| 485 | } | 614 | } |
| 486 | 615 | ||
| @@ -536,7 +665,7 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev) | |||
| 536 | * are hooked-up with each newly added component. | 665 | * are hooked-up with each newly added component. |
| 537 | */ | 666 | */ |
| 538 | bus_for_each_dev(&coresight_bustype, NULL, | 667 | bus_for_each_dev(&coresight_bustype, NULL, |
| 539 | csdev, coresight_orphan_match); | 668 | csdev, coresight_orphan_match); |
| 540 | } | 669 | } |
| 541 | 670 | ||
| 542 | 671 | ||
| @@ -568,6 +697,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev) | |||
| 568 | 697 | ||
| 569 | if (dev) { | 698 | if (dev) { |
| 570 | conn->child_dev = to_coresight_device(dev); | 699 | conn->child_dev = to_coresight_device(dev); |
| 700 | /* and put reference from 'bus_find_device()' */ | ||
| 701 | put_device(dev); | ||
| 571 | } else { | 702 | } else { |
| 572 | csdev->orphan = true; | 703 | csdev->orphan = true; |
| 573 | conn->child_dev = NULL; | 704 | conn->child_dev = NULL; |
| @@ -575,6 +706,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev) | |||
| 575 | } | 706 | } |
| 576 | } | 707 | } |
| 577 | 708 | ||
| 709 | static int coresight_remove_match(struct device *dev, void *data) | ||
| 710 | { | ||
| 711 | int i; | ||
| 712 | struct coresight_device *csdev, *iterator; | ||
| 713 | struct coresight_connection *conn; | ||
| 714 | |||
| 715 | csdev = data; | ||
| 716 | iterator = to_coresight_device(dev); | ||
| 717 | |||
| 718 | /* No need to check oneself */ | ||
| 719 | if (csdev == iterator) | ||
| 720 | return 0; | ||
| 721 | |||
| 722 | /* | ||
| 723 | * Circle throuch all the connection of that component. If we find | ||
| 724 | * a connection whose name matches @csdev, remove it. | ||
| 725 | */ | ||
| 726 | for (i = 0; i < iterator->nr_outport; i++) { | ||
| 727 | conn = &iterator->conns[i]; | ||
| 728 | |||
| 729 | if (conn->child_dev == NULL) | ||
| 730 | continue; | ||
| 731 | |||
| 732 | if (!strcmp(dev_name(&csdev->dev), conn->child_name)) { | ||
| 733 | iterator->orphan = true; | ||
| 734 | conn->child_dev = NULL; | ||
| 735 | /* No need to continue */ | ||
| 736 | break; | ||
| 737 | } | ||
| 738 | } | ||
| 739 | |||
| 740 | /* | ||
| 741 | * Returning '0' ensures that all known component on the | ||
| 742 | * bus will be checked. | ||
| 743 | */ | ||
| 744 | return 0; | ||
| 745 | } | ||
| 746 | |||
| 747 | static void coresight_remove_conns(struct coresight_device *csdev) | ||
| 748 | { | ||
| 749 | bus_for_each_dev(&coresight_bustype, NULL, | ||
| 750 | csdev, coresight_remove_match); | ||
| 751 | } | ||
| 752 | |||
| 578 | /** | 753 | /** |
| 579 | * coresight_timeout - loop until a bit has changed to a specific state. | 754 | * coresight_timeout - loop until a bit has changed to a specific state. |
| 580 | * @addr: base address of the area of interest. | 755 | * @addr: base address of the area of interest. |
| @@ -713,13 +888,8 @@ EXPORT_SYMBOL_GPL(coresight_register); | |||
| 713 | 888 | ||
| 714 | void coresight_unregister(struct coresight_device *csdev) | 889 | void coresight_unregister(struct coresight_device *csdev) |
| 715 | { | 890 | { |
| 716 | mutex_lock(&coresight_mutex); | 891 | /* Remove references of that device in the topology */ |
| 717 | 892 | coresight_remove_conns(csdev); | |
| 718 | kfree(csdev->conns); | ||
| 719 | device_unregister(&csdev->dev); | 893 | device_unregister(&csdev->dev); |
| 720 | |||
| 721 | mutex_unlock(&coresight_mutex); | ||
| 722 | } | 894 | } |
| 723 | EXPORT_SYMBOL_GPL(coresight_unregister); | 895 | EXPORT_SYMBOL_GPL(coresight_unregister); |
| 724 | |||
| 725 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c index b0973617826f..b68da1888fd5 100644 --- a/drivers/hwtracing/coresight/of_coresight.c +++ b/drivers/hwtracing/coresight/of_coresight.c | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | * GNU General Public License for more details. | 10 | * GNU General Public License for more details. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/types.h> | 13 | #include <linux/types.h> |
| 15 | #include <linux/err.h> | 14 | #include <linux/err.h> |
| 16 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| @@ -86,7 +85,7 @@ static int of_coresight_alloc_memory(struct device *dev, | |||
| 86 | return -ENOMEM; | 85 | return -ENOMEM; |
| 87 | 86 | ||
| 88 | /* Children connected to this component via @outports */ | 87 | /* Children connected to this component via @outports */ |
| 89 | pdata->child_names = devm_kzalloc(dev, pdata->nr_outport * | 88 | pdata->child_names = devm_kzalloc(dev, pdata->nr_outport * |
| 90 | sizeof(*pdata->child_names), | 89 | sizeof(*pdata->child_names), |
| 91 | GFP_KERNEL); | 90 | GFP_KERNEL); |
| 92 | if (!pdata->child_names) | 91 | if (!pdata->child_names) |
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig index b7a9073d968b..1b412f8a56b5 100644 --- a/drivers/hwtracing/intel_th/Kconfig +++ b/drivers/hwtracing/intel_th/Kconfig | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | config INTEL_TH | 1 | config INTEL_TH |
| 2 | tristate "Intel(R) Trace Hub controller" | 2 | tristate "Intel(R) Trace Hub controller" |
| 3 | depends on HAS_DMA && HAS_IOMEM | ||
| 3 | help | 4 | help |
| 4 | Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that | 5 | Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that |
| 5 | produce, switch and output trace data from multiple hardware and | 6 | produce, switch and output trace data from multiple hardware and |
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 165d3001c301..4272f2ce5f6e 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c | |||
| @@ -124,17 +124,34 @@ static struct device_type intel_th_source_device_type = { | |||
| 124 | .release = intel_th_device_release, | 124 | .release = intel_th_device_release, |
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| 127 | static struct intel_th *to_intel_th(struct intel_th_device *thdev) | ||
| 128 | { | ||
| 129 | /* | ||
| 130 | * subdevice tree is flat: if this one is not a switch, its | ||
| 131 | * parent must be | ||
| 132 | */ | ||
| 133 | if (thdev->type != INTEL_TH_SWITCH) | ||
| 134 | thdev = to_intel_th_hub(thdev); | ||
| 135 | |||
| 136 | if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH)) | ||
| 137 | return NULL; | ||
| 138 | |||
| 139 | return dev_get_drvdata(thdev->dev.parent); | ||
| 140 | } | ||
| 141 | |||
| 127 | static char *intel_th_output_devnode(struct device *dev, umode_t *mode, | 142 | static char *intel_th_output_devnode(struct device *dev, umode_t *mode, |
| 128 | kuid_t *uid, kgid_t *gid) | 143 | kuid_t *uid, kgid_t *gid) |
| 129 | { | 144 | { |
| 130 | struct intel_th_device *thdev = to_intel_th_device(dev); | 145 | struct intel_th_device *thdev = to_intel_th_device(dev); |
| 146 | struct intel_th *th = to_intel_th(thdev); | ||
| 131 | char *node; | 147 | char *node; |
| 132 | 148 | ||
| 133 | if (thdev->id >= 0) | 149 | if (thdev->id >= 0) |
| 134 | node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", 0, thdev->name, | 150 | node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", th->id, |
| 135 | thdev->id); | 151 | thdev->name, thdev->id); |
| 136 | else | 152 | else |
| 137 | node = kasprintf(GFP_KERNEL, "intel_th%d/%s", 0, thdev->name); | 153 | node = kasprintf(GFP_KERNEL, "intel_th%d/%s", th->id, |
| 154 | thdev->name); | ||
| 138 | 155 | ||
| 139 | return node; | 156 | return node; |
| 140 | } | 157 | } |
| @@ -319,6 +336,7 @@ static struct intel_th_subdevice { | |||
| 319 | unsigned nres; | 336 | unsigned nres; |
| 320 | unsigned type; | 337 | unsigned type; |
| 321 | unsigned otype; | 338 | unsigned otype; |
| 339 | unsigned scrpd; | ||
| 322 | int id; | 340 | int id; |
| 323 | } intel_th_subdevices[TH_SUBDEVICE_MAX] = { | 341 | } intel_th_subdevices[TH_SUBDEVICE_MAX] = { |
| 324 | { | 342 | { |
| @@ -352,6 +370,7 @@ static struct intel_th_subdevice { | |||
| 352 | .id = 0, | 370 | .id = 0, |
| 353 | .type = INTEL_TH_OUTPUT, | 371 | .type = INTEL_TH_OUTPUT, |
| 354 | .otype = GTH_MSU, | 372 | .otype = GTH_MSU, |
| 373 | .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED, | ||
| 355 | }, | 374 | }, |
| 356 | { | 375 | { |
| 357 | .nres = 2, | 376 | .nres = 2, |
| @@ -371,6 +390,7 @@ static struct intel_th_subdevice { | |||
| 371 | .id = 1, | 390 | .id = 1, |
| 372 | .type = INTEL_TH_OUTPUT, | 391 | .type = INTEL_TH_OUTPUT, |
| 373 | .otype = GTH_MSU, | 392 | .otype = GTH_MSU, |
| 393 | .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED, | ||
| 374 | }, | 394 | }, |
| 375 | { | 395 | { |
| 376 | .nres = 2, | 396 | .nres = 2, |
| @@ -403,6 +423,7 @@ static struct intel_th_subdevice { | |||
| 403 | .name = "pti", | 423 | .name = "pti", |
| 404 | .type = INTEL_TH_OUTPUT, | 424 | .type = INTEL_TH_OUTPUT, |
| 405 | .otype = GTH_PTI, | 425 | .otype = GTH_PTI, |
| 426 | .scrpd = SCRPD_PTI_IS_PRIM_DEST, | ||
| 406 | }, | 427 | }, |
| 407 | { | 428 | { |
| 408 | .nres = 1, | 429 | .nres = 1, |
| @@ -477,6 +498,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres, | |||
| 477 | thdev->dev.devt = MKDEV(th->major, i); | 498 | thdev->dev.devt = MKDEV(th->major, i); |
| 478 | thdev->output.type = subdev->otype; | 499 | thdev->output.type = subdev->otype; |
| 479 | thdev->output.port = -1; | 500 | thdev->output.port = -1; |
| 501 | thdev->output.scratchpad = subdev->scrpd; | ||
| 480 | } | 502 | } |
| 481 | 503 | ||
| 482 | err = device_add(&thdev->dev); | 504 | err = device_add(&thdev->dev); |
| @@ -579,6 +601,8 @@ intel_th_alloc(struct device *dev, struct resource *devres, | |||
| 579 | } | 601 | } |
| 580 | th->dev = dev; | 602 | th->dev = dev; |
| 581 | 603 | ||
| 604 | dev_set_drvdata(dev, th); | ||
| 605 | |||
| 582 | err = intel_th_populate(th, devres, ndevres, irq); | 606 | err = intel_th_populate(th, devres, ndevres, irq); |
| 583 | if (err) | 607 | if (err) |
| 584 | goto err_chrdev; | 608 | goto err_chrdev; |
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index 2dc5378ccd3a..9beea0b54231 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c | |||
| @@ -146,24 +146,6 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port) | |||
| 146 | iowrite32(val, gth->base + reg); | 146 | iowrite32(val, gth->base + reg); |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | /*static int gth_master_get(struct gth_device *gth, unsigned int master) | ||
| 150 | { | ||
| 151 | unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u); | ||
| 152 | unsigned int shift = (master & 0x7) * 4; | ||
| 153 | u32 val; | ||
| 154 | |||
| 155 | if (master >= 256) { | ||
| 156 | reg = REG_GTH_GSWTDEST; | ||
| 157 | shift = 0; | ||
| 158 | } | ||
| 159 | |||
| 160 | val = ioread32(gth->base + reg); | ||
| 161 | val &= (0xf << shift); | ||
| 162 | val >>= shift; | ||
| 163 | |||
| 164 | return val ? val & 0x7 : -1; | ||
| 165 | }*/ | ||
| 166 | |||
| 167 | static ssize_t master_attr_show(struct device *dev, | 149 | static ssize_t master_attr_show(struct device *dev, |
| 168 | struct device_attribute *attr, | 150 | struct device_attribute *attr, |
| 169 | char *buf) | 151 | char *buf) |
| @@ -304,6 +286,10 @@ static int intel_th_gth_reset(struct gth_device *gth) | |||
| 304 | if (scratchpad & SCRPD_DEBUGGER_IN_USE) | 286 | if (scratchpad & SCRPD_DEBUGGER_IN_USE) |
| 305 | return -EBUSY; | 287 | return -EBUSY; |
| 306 | 288 | ||
| 289 | /* Always save/restore STH and TU registers in S0ix entry/exit */ | ||
| 290 | scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED; | ||
| 291 | iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0); | ||
| 292 | |||
| 307 | /* output ports */ | 293 | /* output ports */ |
| 308 | for (port = 0; port < 8; port++) { | 294 | for (port = 0; port < 8; port++) { |
| 309 | if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) == | 295 | if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) == |
| @@ -506,6 +492,10 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, | |||
| 506 | if (!count) | 492 | if (!count) |
| 507 | dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n", | 493 | dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n", |
| 508 | output->port); | 494 | output->port); |
| 495 | |||
| 496 | reg = ioread32(gth->base + REG_GTH_SCRPD0); | ||
| 497 | reg &= ~output->scratchpad; | ||
| 498 | iowrite32(reg, gth->base + REG_GTH_SCRPD0); | ||
| 509 | } | 499 | } |
| 510 | 500 | ||
| 511 | /** | 501 | /** |
| @@ -520,7 +510,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, | |||
| 520 | struct intel_th_output *output) | 510 | struct intel_th_output *output) |
| 521 | { | 511 | { |
| 522 | struct gth_device *gth = dev_get_drvdata(&thdev->dev); | 512 | struct gth_device *gth = dev_get_drvdata(&thdev->dev); |
| 523 | u32 scr = 0xfc0000; | 513 | u32 scr = 0xfc0000, scrpd; |
| 524 | int master; | 514 | int master; |
| 525 | 515 | ||
| 526 | spin_lock(>h->gth_lock); | 516 | spin_lock(>h->gth_lock); |
| @@ -535,6 +525,10 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, | |||
| 535 | output->active = true; | 525 | output->active = true; |
| 536 | spin_unlock(>h->gth_lock); | 526 | spin_unlock(>h->gth_lock); |
| 537 | 527 | ||
| 528 | scrpd = ioread32(gth->base + REG_GTH_SCRPD0); | ||
| 529 | scrpd |= output->scratchpad; | ||
| 530 | iowrite32(scrpd, gth->base + REG_GTH_SCRPD0); | ||
| 531 | |||
| 538 | iowrite32(scr, gth->base + REG_GTH_SCR); | 532 | iowrite32(scr, gth->base + REG_GTH_SCR); |
| 539 | iowrite32(0, gth->base + REG_GTH_SCR2); | 533 | iowrite32(0, gth->base + REG_GTH_SCR2); |
| 540 | } | 534 | } |
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h index 3b714b7a61db..56f0d2620577 100644 --- a/drivers/hwtracing/intel_th/gth.h +++ b/drivers/hwtracing/intel_th/gth.h | |||
| @@ -57,9 +57,6 @@ enum { | |||
| 57 | REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ | 57 | REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ |
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | /* Externall debugger is using Intel TH */ | ||
| 61 | #define SCRPD_DEBUGGER_IN_USE BIT(24) | ||
| 62 | |||
| 63 | /* waiting for Pipeline Empty bit(s) to assert for GTH */ | 60 | /* waiting for Pipeline Empty bit(s) to assert for GTH */ |
| 64 | #define GTH_PLE_WAITLOOP_DEPTH 10000 | 61 | #define GTH_PLE_WAITLOOP_DEPTH 10000 |
| 65 | 62 | ||
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 57fd72b20fae..eedd09332db6 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h | |||
| @@ -30,6 +30,7 @@ enum { | |||
| 30 | * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices | 30 | * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices |
| 31 | * @port: output port number, assigned by the switch | 31 | * @port: output port number, assigned by the switch |
| 32 | * @type: GTH_{MSU,CTP,PTI} | 32 | * @type: GTH_{MSU,CTP,PTI} |
| 33 | * @scratchpad: scratchpad bits to flag when this output is enabled | ||
| 33 | * @multiblock: true for multiblock output configuration | 34 | * @multiblock: true for multiblock output configuration |
| 34 | * @active: true when this output is enabled | 35 | * @active: true when this output is enabled |
| 35 | * | 36 | * |
| @@ -41,6 +42,7 @@ enum { | |||
| 41 | struct intel_th_output { | 42 | struct intel_th_output { |
| 42 | int port; | 43 | int port; |
| 43 | unsigned int type; | 44 | unsigned int type; |
| 45 | unsigned int scratchpad; | ||
| 44 | bool multiblock; | 46 | bool multiblock; |
| 45 | bool active; | 47 | bool active; |
| 46 | }; | 48 | }; |
| @@ -241,4 +243,43 @@ enum { | |||
| 241 | GTH_PTI = 4, /* MIPI-PTI */ | 243 | GTH_PTI = 4, /* MIPI-PTI */ |
| 242 | }; | 244 | }; |
| 243 | 245 | ||
| 246 | /* | ||
| 247 | * Scratchpad bits: tell firmware and external debuggers | ||
| 248 | * what we are up to. | ||
| 249 | */ | ||
| 250 | enum { | ||
| 251 | /* Memory is the primary destination */ | ||
| 252 | SCRPD_MEM_IS_PRIM_DEST = BIT(0), | ||
| 253 | /* XHCI DbC is the primary destination */ | ||
| 254 | SCRPD_DBC_IS_PRIM_DEST = BIT(1), | ||
| 255 | /* PTI is the primary destination */ | ||
| 256 | SCRPD_PTI_IS_PRIM_DEST = BIT(2), | ||
| 257 | /* BSSB is the primary destination */ | ||
| 258 | SCRPD_BSSB_IS_PRIM_DEST = BIT(3), | ||
| 259 | /* PTI is the alternate destination */ | ||
| 260 | SCRPD_PTI_IS_ALT_DEST = BIT(4), | ||
| 261 | /* BSSB is the alternate destination */ | ||
| 262 | SCRPD_BSSB_IS_ALT_DEST = BIT(5), | ||
| 263 | /* DeepSx exit occurred */ | ||
| 264 | SCRPD_DEEPSX_EXIT = BIT(6), | ||
| 265 | /* S4 exit occurred */ | ||
| 266 | SCRPD_S4_EXIT = BIT(7), | ||
| 267 | /* S5 exit occurred */ | ||
| 268 | SCRPD_S5_EXIT = BIT(8), | ||
| 269 | /* MSU controller 0/1 is enabled */ | ||
| 270 | SCRPD_MSC0_IS_ENABLED = BIT(9), | ||
| 271 | SCRPD_MSC1_IS_ENABLED = BIT(10), | ||
| 272 | /* Sx exit occurred */ | ||
| 273 | SCRPD_SX_EXIT = BIT(11), | ||
| 274 | /* Trigger Unit is enabled */ | ||
| 275 | SCRPD_TRIGGER_IS_ENABLED = BIT(12), | ||
| 276 | SCRPD_ODLA_IS_ENABLED = BIT(13), | ||
| 277 | SCRPD_SOCHAP_IS_ENABLED = BIT(14), | ||
| 278 | SCRPD_STH_IS_ENABLED = BIT(15), | ||
| 279 | SCRPD_DCIH_IS_ENABLED = BIT(16), | ||
| 280 | SCRPD_VER_IS_ENABLED = BIT(17), | ||
| 281 | /* External debugger is using Intel TH */ | ||
| 282 | SCRPD_DEBUGGER_IN_USE = BIT(24), | ||
| 283 | }; | ||
| 284 | |||
| 244 | #endif | 285 | #endif |
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 70ca27e45602..d9d6022c5aca 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c | |||
| @@ -408,7 +408,7 @@ msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data, | |||
| 408 | * Second time (wrap_count==1), it's just like any other block, | 408 | * Second time (wrap_count==1), it's just like any other block, |
| 409 | * containing data in the range of [MSC_BDESC..data_bytes]. | 409 | * containing data in the range of [MSC_BDESC..data_bytes]. |
| 410 | */ | 410 | */ |
| 411 | if (iter->block == iter->start_block && iter->wrap_count) { | 411 | if (iter->block == iter->start_block && iter->wrap_count == 2) { |
| 412 | tocopy = DATA_IN_PAGE - data_bytes; | 412 | tocopy = DATA_IN_PAGE - data_bytes; |
| 413 | src += data_bytes; | 413 | src += data_bytes; |
| 414 | } | 414 | } |
| @@ -1112,12 +1112,11 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, | |||
| 1112 | size = msc->nr_pages << PAGE_SHIFT; | 1112 | size = msc->nr_pages << PAGE_SHIFT; |
| 1113 | 1113 | ||
| 1114 | if (!size) | 1114 | if (!size) |
| 1115 | return 0; | 1115 | goto put_count; |
| 1116 | 1116 | ||
| 1117 | if (off >= size) { | 1117 | if (off >= size) |
| 1118 | len = 0; | ||
| 1119 | goto put_count; | 1118 | goto put_count; |
| 1120 | } | 1119 | |
| 1121 | if (off + len >= size) | 1120 | if (off + len >= size) |
| 1122 | len = size - off; | 1121 | len = size - off; |
| 1123 | 1122 | ||
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 641e87936064..bca7a2ac00d6 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c | |||
| @@ -46,8 +46,6 @@ static int intel_th_pci_probe(struct pci_dev *pdev, | |||
| 46 | if (IS_ERR(th)) | 46 | if (IS_ERR(th)) |
| 47 | return PTR_ERR(th); | 47 | return PTR_ERR(th); |
| 48 | 48 | ||
| 49 | pci_set_drvdata(pdev, th); | ||
| 50 | |||
| 51 | return 0; | 49 | return 0; |
| 52 | } | 50 | } |
| 53 | 51 | ||
| @@ -67,6 +65,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
| 67 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126), | 65 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126), |
| 68 | .driver_data = (kernel_ulong_t)0, | 66 | .driver_data = (kernel_ulong_t)0, |
| 69 | }, | 67 | }, |
| 68 | { | ||
| 69 | /* Apollo Lake */ | ||
| 70 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a8e), | ||
| 71 | .driver_data = (kernel_ulong_t)0, | ||
| 72 | }, | ||
| 73 | { | ||
| 74 | /* Broxton */ | ||
| 75 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80), | ||
| 76 | .driver_data = (kernel_ulong_t)0, | ||
| 77 | }, | ||
| 70 | { 0 }, | 78 | { 0 }, |
| 71 | }; | 79 | }; |
| 72 | 80 | ||
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index 56101c33e10f..e1aee61dd7b3 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c | |||
| @@ -94,10 +94,13 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master, | |||
| 94 | case STP_PACKET_TRIG: | 94 | case STP_PACKET_TRIG: |
| 95 | if (flags & STP_PACKET_TIMESTAMPED) | 95 | if (flags & STP_PACKET_TIMESTAMPED) |
| 96 | reg += 4; | 96 | reg += 4; |
| 97 | iowrite8(*payload, sth->base + reg); | 97 | writeb_relaxed(*payload, sth->base + reg); |
| 98 | break; | 98 | break; |
| 99 | 99 | ||
| 100 | case STP_PACKET_MERR: | 100 | case STP_PACKET_MERR: |
| 101 | if (size > 4) | ||
| 102 | size = 4; | ||
| 103 | |||
| 101 | sth_iowrite(&out->MERR, payload, size); | 104 | sth_iowrite(&out->MERR, payload, size); |
| 102 | break; | 105 | break; |
| 103 | 106 | ||
| @@ -107,8 +110,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master, | |||
| 107 | else | 110 | else |
| 108 | outp = (u64 __iomem *)&out->FLAG; | 111 | outp = (u64 __iomem *)&out->FLAG; |
| 109 | 112 | ||
| 110 | size = 1; | 113 | size = 0; |
| 111 | sth_iowrite(outp, payload, size); | 114 | writeb_relaxed(0, outp); |
| 112 | break; | 115 | break; |
| 113 | 116 | ||
| 114 | case STP_PACKET_USER: | 117 | case STP_PACKET_USER: |
| @@ -129,6 +132,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master, | |||
| 129 | 132 | ||
| 130 | sth_iowrite(outp, payload, size); | 133 | sth_iowrite(outp, payload, size); |
| 131 | break; | 134 | break; |
| 135 | default: | ||
| 136 | return -ENOTSUPP; | ||
| 132 | } | 137 | } |
| 133 | 138 | ||
| 134 | return size; | 139 | return size; |
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig index 83e9f591a54b..847a39b35307 100644 --- a/drivers/hwtracing/stm/Kconfig +++ b/drivers/hwtracing/stm/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config STM | 1 | config STM |
| 2 | tristate "System Trace Module devices" | 2 | tristate "System Trace Module devices" |
| 3 | select CONFIGFS_FS | 3 | select CONFIGFS_FS |
| 4 | select SRCU | ||
| 4 | help | 5 | help |
| 5 | A System Trace Module (STM) is a device exporting data in System | 6 | A System Trace Module (STM) is a device exporting data in System |
| 6 | Trace Protocol (STP) format as defined by MIPI STP standards. | 7 | Trace Protocol (STP) format as defined by MIPI STP standards. |
| @@ -8,6 +9,8 @@ config STM | |||
| 8 | 9 | ||
| 9 | Say Y here to enable System Trace Module device support. | 10 | Say Y here to enable System Trace Module device support. |
| 10 | 11 | ||
| 12 | if STM | ||
| 13 | |||
| 11 | config STM_DUMMY | 14 | config STM_DUMMY |
| 12 | tristate "Dummy STM driver" | 15 | tristate "Dummy STM driver" |
| 13 | help | 16 | help |
| @@ -24,3 +27,16 @@ config STM_SOURCE_CONSOLE | |||
| 24 | 27 | ||
| 25 | If you want to send kernel console messages over STM devices, | 28 | If you want to send kernel console messages over STM devices, |
| 26 | say Y. | 29 | say Y. |
| 30 | |||
| 31 | config STM_SOURCE_HEARTBEAT | ||
| 32 | tristate "Heartbeat over STM devices" | ||
| 33 | help | ||
| 34 | This is a kernel space trace source that sends periodic | ||
| 35 | heartbeat messages to trace hosts over STM devices. It is | ||
| 36 | also useful for testing stm class drivers and the stm class | ||
| 37 | framework itself. | ||
| 38 | |||
| 39 | If you want to send heartbeat messages over STM devices, | ||
| 40 | say Y. | ||
| 41 | |||
| 42 | endif | ||
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile index f9312c38dd7a..a9ce3d487e57 100644 --- a/drivers/hwtracing/stm/Makefile +++ b/drivers/hwtracing/stm/Makefile | |||
| @@ -5,5 +5,7 @@ stm_core-y := core.o policy.o | |||
| 5 | obj-$(CONFIG_STM_DUMMY) += dummy_stm.o | 5 | obj-$(CONFIG_STM_DUMMY) += dummy_stm.o |
| 6 | 6 | ||
| 7 | obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o | 7 | obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o |
| 8 | obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o | ||
| 8 | 9 | ||
| 9 | stm_console-y := console.o | 10 | stm_console-y := console.o |
| 11 | stm_heartbeat-y := heartbeat.o | ||
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index b6445d9e5453..de80d45d8df9 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c | |||
| @@ -113,6 +113,7 @@ struct stm_device *stm_find_device(const char *buf) | |||
| 113 | 113 | ||
| 114 | stm = to_stm_device(dev); | 114 | stm = to_stm_device(dev); |
| 115 | if (!try_module_get(stm->owner)) { | 115 | if (!try_module_get(stm->owner)) { |
| 116 | /* matches class_find_device() above */ | ||
| 116 | put_device(dev); | 117 | put_device(dev); |
| 117 | return NULL; | 118 | return NULL; |
| 118 | } | 119 | } |
| @@ -125,7 +126,7 @@ struct stm_device *stm_find_device(const char *buf) | |||
| 125 | * @stm: stm device, previously acquired by stm_find_device() | 126 | * @stm: stm device, previously acquired by stm_find_device() |
| 126 | * | 127 | * |
| 127 | * This drops the module reference and device reference taken by | 128 | * This drops the module reference and device reference taken by |
| 128 | * stm_find_device(). | 129 | * stm_find_device() or stm_char_open(). |
| 129 | */ | 130 | */ |
| 130 | void stm_put_device(struct stm_device *stm) | 131 | void stm_put_device(struct stm_device *stm) |
| 131 | { | 132 | { |
| @@ -185,6 +186,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output) | |||
| 185 | { | 186 | { |
| 186 | struct stp_master *master = stm_master(stm, output->master); | 187 | struct stp_master *master = stm_master(stm, output->master); |
| 187 | 188 | ||
| 189 | lockdep_assert_held(&stm->mc_lock); | ||
| 190 | lockdep_assert_held(&output->lock); | ||
| 191 | |||
| 188 | if (WARN_ON_ONCE(master->nr_free < output->nr_chans)) | 192 | if (WARN_ON_ONCE(master->nr_free < output->nr_chans)) |
| 189 | return; | 193 | return; |
| 190 | 194 | ||
| @@ -199,6 +203,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output) | |||
| 199 | { | 203 | { |
| 200 | struct stp_master *master = stm_master(stm, output->master); | 204 | struct stp_master *master = stm_master(stm, output->master); |
| 201 | 205 | ||
| 206 | lockdep_assert_held(&stm->mc_lock); | ||
| 207 | lockdep_assert_held(&output->lock); | ||
| 208 | |||
| 202 | bitmap_release_region(&master->chan_map[0], output->channel, | 209 | bitmap_release_region(&master->chan_map[0], output->channel, |
| 203 | ilog2(output->nr_chans)); | 210 | ilog2(output->nr_chans)); |
| 204 | 211 | ||
| @@ -233,7 +240,7 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start, | |||
| 233 | return -1; | 240 | return -1; |
| 234 | } | 241 | } |
| 235 | 242 | ||
| 236 | static unsigned int | 243 | static int |
| 237 | stm_find_master_chan(struct stm_device *stm, unsigned int width, | 244 | stm_find_master_chan(struct stm_device *stm, unsigned int width, |
| 238 | unsigned int *mstart, unsigned int mend, | 245 | unsigned int *mstart, unsigned int mend, |
| 239 | unsigned int *cstart, unsigned int cend) | 246 | unsigned int *cstart, unsigned int cend) |
| @@ -288,12 +295,13 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, | |||
| 288 | } | 295 | } |
| 289 | 296 | ||
| 290 | spin_lock(&stm->mc_lock); | 297 | spin_lock(&stm->mc_lock); |
| 298 | spin_lock(&output->lock); | ||
| 291 | /* output is already assigned -- shouldn't happen */ | 299 | /* output is already assigned -- shouldn't happen */ |
| 292 | if (WARN_ON_ONCE(output->nr_chans)) | 300 | if (WARN_ON_ONCE(output->nr_chans)) |
| 293 | goto unlock; | 301 | goto unlock; |
| 294 | 302 | ||
| 295 | ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend); | 303 | ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend); |
| 296 | if (ret) | 304 | if (ret < 0) |
| 297 | goto unlock; | 305 | goto unlock; |
| 298 | 306 | ||
| 299 | output->master = midx; | 307 | output->master = midx; |
| @@ -304,6 +312,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width, | |||
| 304 | 312 | ||
| 305 | ret = 0; | 313 | ret = 0; |
| 306 | unlock: | 314 | unlock: |
| 315 | spin_unlock(&output->lock); | ||
| 307 | spin_unlock(&stm->mc_lock); | 316 | spin_unlock(&stm->mc_lock); |
| 308 | 317 | ||
| 309 | return ret; | 318 | return ret; |
| @@ -312,11 +321,18 @@ unlock: | |||
| 312 | static void stm_output_free(struct stm_device *stm, struct stm_output *output) | 321 | static void stm_output_free(struct stm_device *stm, struct stm_output *output) |
| 313 | { | 322 | { |
| 314 | spin_lock(&stm->mc_lock); | 323 | spin_lock(&stm->mc_lock); |
| 324 | spin_lock(&output->lock); | ||
| 315 | if (output->nr_chans) | 325 | if (output->nr_chans) |
| 316 | stm_output_disclaim(stm, output); | 326 | stm_output_disclaim(stm, output); |
| 327 | spin_unlock(&output->lock); | ||
| 317 | spin_unlock(&stm->mc_lock); | 328 | spin_unlock(&stm->mc_lock); |
| 318 | } | 329 | } |
| 319 | 330 | ||
| 331 | static void stm_output_init(struct stm_output *output) | ||
| 332 | { | ||
| 333 | spin_lock_init(&output->lock); | ||
| 334 | } | ||
| 335 | |||
| 320 | static int major_match(struct device *dev, const void *data) | 336 | static int major_match(struct device *dev, const void *data) |
| 321 | { | 337 | { |
| 322 | unsigned int major = *(unsigned int *)data; | 338 | unsigned int major = *(unsigned int *)data; |
| @@ -339,6 +355,7 @@ static int stm_char_open(struct inode *inode, struct file *file) | |||
| 339 | if (!stmf) | 355 | if (!stmf) |
| 340 | return -ENOMEM; | 356 | return -ENOMEM; |
| 341 | 357 | ||
| 358 | stm_output_init(&stmf->output); | ||
| 342 | stmf->stm = to_stm_device(dev); | 359 | stmf->stm = to_stm_device(dev); |
| 343 | 360 | ||
| 344 | if (!try_module_get(stmf->stm->owner)) | 361 | if (!try_module_get(stmf->stm->owner)) |
| @@ -349,6 +366,8 @@ static int stm_char_open(struct inode *inode, struct file *file) | |||
| 349 | return nonseekable_open(inode, file); | 366 | return nonseekable_open(inode, file); |
| 350 | 367 | ||
| 351 | err_free: | 368 | err_free: |
| 369 | /* matches class_find_device() above */ | ||
| 370 | put_device(dev); | ||
| 352 | kfree(stmf); | 371 | kfree(stmf); |
| 353 | 372 | ||
| 354 | return err; | 373 | return err; |
| @@ -357,9 +376,19 @@ err_free: | |||
| 357 | static int stm_char_release(struct inode *inode, struct file *file) | 376 | static int stm_char_release(struct inode *inode, struct file *file) |
| 358 | { | 377 | { |
| 359 | struct stm_file *stmf = file->private_data; | 378 | struct stm_file *stmf = file->private_data; |
| 379 | struct stm_device *stm = stmf->stm; | ||
| 380 | |||
| 381 | if (stm->data->unlink) | ||
| 382 | stm->data->unlink(stm->data, stmf->output.master, | ||
| 383 | stmf->output.channel); | ||
| 360 | 384 | ||
| 361 | stm_output_free(stmf->stm, &stmf->output); | 385 | stm_output_free(stm, &stmf->output); |
| 362 | stm_put_device(stmf->stm); | 386 | |
| 387 | /* | ||
| 388 | * matches the stm_char_open()'s | ||
| 389 | * class_find_device() + try_module_get() | ||
| 390 | */ | ||
| 391 | stm_put_device(stm); | ||
| 363 | kfree(stmf); | 392 | kfree(stmf); |
| 364 | 393 | ||
| 365 | return 0; | 394 | return 0; |
| @@ -380,8 +409,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width) | |||
| 380 | return ret; | 409 | return ret; |
| 381 | } | 410 | } |
| 382 | 411 | ||
| 383 | static void stm_write(struct stm_data *data, unsigned int master, | 412 | static ssize_t stm_write(struct stm_data *data, unsigned int master, |
| 384 | unsigned int channel, const char *buf, size_t count) | 413 | unsigned int channel, const char *buf, size_t count) |
| 385 | { | 414 | { |
| 386 | unsigned int flags = STP_PACKET_TIMESTAMPED; | 415 | unsigned int flags = STP_PACKET_TIMESTAMPED; |
| 387 | const unsigned char *p = buf, nil = 0; | 416 | const unsigned char *p = buf, nil = 0; |
| @@ -393,9 +422,14 @@ static void stm_write(struct stm_data *data, unsigned int master, | |||
| 393 | sz = data->packet(data, master, channel, STP_PACKET_DATA, flags, | 422 | sz = data->packet(data, master, channel, STP_PACKET_DATA, flags, |
| 394 | sz, p); | 423 | sz, p); |
| 395 | flags = 0; | 424 | flags = 0; |
| 425 | |||
| 426 | if (sz < 0) | ||
| 427 | break; | ||
| 396 | } | 428 | } |
| 397 | 429 | ||
| 398 | data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); | 430 | data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); |
| 431 | |||
| 432 | return pos; | ||
| 399 | } | 433 | } |
| 400 | 434 | ||
| 401 | static ssize_t stm_char_write(struct file *file, const char __user *buf, | 435 | static ssize_t stm_char_write(struct file *file, const char __user *buf, |
| @@ -406,6 +440,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, | |||
| 406 | char *kbuf; | 440 | char *kbuf; |
| 407 | int err; | 441 | int err; |
| 408 | 442 | ||
| 443 | if (count + 1 > PAGE_SIZE) | ||
| 444 | count = PAGE_SIZE - 1; | ||
| 445 | |||
| 409 | /* | 446 | /* |
| 410 | * if no m/c have been assigned to this writer up to this | 447 | * if no m/c have been assigned to this writer up to this |
| 411 | * point, use "default" policy entry | 448 | * point, use "default" policy entry |
| @@ -430,8 +467,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, | |||
| 430 | return -EFAULT; | 467 | return -EFAULT; |
| 431 | } | 468 | } |
| 432 | 469 | ||
| 433 | stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf, | 470 | count = stm_write(stm->data, stmf->output.master, stmf->output.channel, |
| 434 | count); | 471 | kbuf, count); |
| 435 | 472 | ||
| 436 | kfree(kbuf); | 473 | kfree(kbuf); |
| 437 | 474 | ||
| @@ -515,10 +552,8 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) | |||
| 515 | ret = stm->data->link(stm->data, stmf->output.master, | 552 | ret = stm->data->link(stm->data, stmf->output.master, |
| 516 | stmf->output.channel); | 553 | stmf->output.channel); |
| 517 | 554 | ||
| 518 | if (ret) { | 555 | if (ret) |
| 519 | stm_output_free(stmf->stm, &stmf->output); | 556 | stm_output_free(stmf->stm, &stmf->output); |
| 520 | stm_put_device(stmf->stm); | ||
| 521 | } | ||
| 522 | 557 | ||
| 523 | err_free: | 558 | err_free: |
| 524 | kfree(id); | 559 | kfree(id); |
| @@ -618,7 +653,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
| 618 | if (!stm_data->packet || !stm_data->sw_nchannels) | 653 | if (!stm_data->packet || !stm_data->sw_nchannels) |
| 619 | return -EINVAL; | 654 | return -EINVAL; |
| 620 | 655 | ||
| 621 | nmasters = stm_data->sw_end - stm_data->sw_start; | 656 | nmasters = stm_data->sw_end - stm_data->sw_start + 1; |
| 622 | stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); | 657 | stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); |
| 623 | if (!stm) | 658 | if (!stm) |
| 624 | return -ENOMEM; | 659 | return -ENOMEM; |
| @@ -641,6 +676,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
| 641 | if (err) | 676 | if (err) |
| 642 | goto err_device; | 677 | goto err_device; |
| 643 | 678 | ||
| 679 | mutex_init(&stm->link_mutex); | ||
| 644 | spin_lock_init(&stm->link_lock); | 680 | spin_lock_init(&stm->link_lock); |
| 645 | INIT_LIST_HEAD(&stm->link_list); | 681 | INIT_LIST_HEAD(&stm->link_list); |
| 646 | 682 | ||
| @@ -654,6 +690,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
| 654 | return 0; | 690 | return 0; |
| 655 | 691 | ||
| 656 | err_device: | 692 | err_device: |
| 693 | /* matches device_initialize() above */ | ||
| 657 | put_device(&stm->dev); | 694 | put_device(&stm->dev); |
| 658 | err_free: | 695 | err_free: |
| 659 | kfree(stm); | 696 | kfree(stm); |
| @@ -662,20 +699,28 @@ err_free: | |||
| 662 | } | 699 | } |
| 663 | EXPORT_SYMBOL_GPL(stm_register_device); | 700 | EXPORT_SYMBOL_GPL(stm_register_device); |
| 664 | 701 | ||
| 665 | static void __stm_source_link_drop(struct stm_source_device *src, | 702 | static int __stm_source_link_drop(struct stm_source_device *src, |
| 666 | struct stm_device *stm); | 703 | struct stm_device *stm); |
| 667 | 704 | ||
| 668 | void stm_unregister_device(struct stm_data *stm_data) | 705 | void stm_unregister_device(struct stm_data *stm_data) |
| 669 | { | 706 | { |
| 670 | struct stm_device *stm = stm_data->stm; | 707 | struct stm_device *stm = stm_data->stm; |
| 671 | struct stm_source_device *src, *iter; | 708 | struct stm_source_device *src, *iter; |
| 672 | int i; | 709 | int i, ret; |
| 673 | 710 | ||
| 674 | spin_lock(&stm->link_lock); | 711 | mutex_lock(&stm->link_mutex); |
| 675 | list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) { | 712 | list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) { |
| 676 | __stm_source_link_drop(src, stm); | 713 | ret = __stm_source_link_drop(src, stm); |
| 714 | /* | ||
| 715 | * src <-> stm link must not change under the same | ||
| 716 | * stm::link_mutex, so complain loudly if it has; | ||
| 717 | * also in this situation ret!=0 means this src is | ||
| 718 | * not connected to this stm and it should be otherwise | ||
| 719 | * safe to proceed with the tear-down of stm. | ||
| 720 | */ | ||
| 721 | WARN_ON_ONCE(ret); | ||
| 677 | } | 722 | } |
| 678 | spin_unlock(&stm->link_lock); | 723 | mutex_unlock(&stm->link_mutex); |
| 679 | 724 | ||
| 680 | synchronize_srcu(&stm_source_srcu); | 725 | synchronize_srcu(&stm_source_srcu); |
| 681 | 726 | ||
| @@ -686,7 +731,7 @@ void stm_unregister_device(struct stm_data *stm_data) | |||
| 686 | stp_policy_unbind(stm->policy); | 731 | stp_policy_unbind(stm->policy); |
| 687 | mutex_unlock(&stm->policy_mutex); | 732 | mutex_unlock(&stm->policy_mutex); |
| 688 | 733 | ||
| 689 | for (i = 0; i < stm->sw_nmasters; i++) | 734 | for (i = stm->data->sw_start; i <= stm->data->sw_end; i++) |
| 690 | stp_master_free(stm, i); | 735 | stp_master_free(stm, i); |
| 691 | 736 | ||
| 692 | device_unregister(&stm->dev); | 737 | device_unregister(&stm->dev); |
| @@ -694,6 +739,17 @@ void stm_unregister_device(struct stm_data *stm_data) | |||
| 694 | } | 739 | } |
| 695 | EXPORT_SYMBOL_GPL(stm_unregister_device); | 740 | EXPORT_SYMBOL_GPL(stm_unregister_device); |
| 696 | 741 | ||
| 742 | /* | ||
| 743 | * stm::link_list access serialization uses a spinlock and a mutex; holding | ||
| 744 | * either of them guarantees that the list is stable; modification requires | ||
| 745 | * holding both of them. | ||
| 746 | * | ||
| 747 | * Lock ordering is as follows: | ||
| 748 | * stm::link_mutex | ||
| 749 | * stm::link_lock | ||
| 750 | * src::link_lock | ||
| 751 | */ | ||
| 752 | |||
| 697 | /** | 753 | /** |
| 698 | * stm_source_link_add() - connect an stm_source device to an stm device | 754 | * stm_source_link_add() - connect an stm_source device to an stm device |
| 699 | * @src: stm_source device | 755 | * @src: stm_source device |
| @@ -710,6 +766,7 @@ static int stm_source_link_add(struct stm_source_device *src, | |||
| 710 | char *id; | 766 | char *id; |
| 711 | int err; | 767 | int err; |
| 712 | 768 | ||
| 769 | mutex_lock(&stm->link_mutex); | ||
| 713 | spin_lock(&stm->link_lock); | 770 | spin_lock(&stm->link_lock); |
| 714 | spin_lock(&src->link_lock); | 771 | spin_lock(&src->link_lock); |
| 715 | 772 | ||
| @@ -719,6 +776,7 @@ static int stm_source_link_add(struct stm_source_device *src, | |||
| 719 | 776 | ||
| 720 | spin_unlock(&src->link_lock); | 777 | spin_unlock(&src->link_lock); |
| 721 | spin_unlock(&stm->link_lock); | 778 | spin_unlock(&stm->link_lock); |
| 779 | mutex_unlock(&stm->link_mutex); | ||
| 722 | 780 | ||
| 723 | id = kstrdup(src->data->name, GFP_KERNEL); | 781 | id = kstrdup(src->data->name, GFP_KERNEL); |
| 724 | if (id) { | 782 | if (id) { |
| @@ -753,9 +811,9 @@ static int stm_source_link_add(struct stm_source_device *src, | |||
| 753 | 811 | ||
| 754 | fail_free_output: | 812 | fail_free_output: |
| 755 | stm_output_free(stm, &src->output); | 813 | stm_output_free(stm, &src->output); |
| 756 | stm_put_device(stm); | ||
| 757 | 814 | ||
| 758 | fail_detach: | 815 | fail_detach: |
| 816 | mutex_lock(&stm->link_mutex); | ||
| 759 | spin_lock(&stm->link_lock); | 817 | spin_lock(&stm->link_lock); |
| 760 | spin_lock(&src->link_lock); | 818 | spin_lock(&src->link_lock); |
| 761 | 819 | ||
| @@ -764,6 +822,7 @@ fail_detach: | |||
| 764 | 822 | ||
| 765 | spin_unlock(&src->link_lock); | 823 | spin_unlock(&src->link_lock); |
| 766 | spin_unlock(&stm->link_lock); | 824 | spin_unlock(&stm->link_lock); |
| 825 | mutex_unlock(&stm->link_mutex); | ||
| 767 | 826 | ||
| 768 | return err; | 827 | return err; |
| 769 | } | 828 | } |
| @@ -776,28 +835,55 @@ fail_detach: | |||
| 776 | * If @stm is @src::link, disconnect them from one another and put the | 835 | * If @stm is @src::link, disconnect them from one another and put the |
| 777 | * reference on the @stm device. | 836 | * reference on the @stm device. |
| 778 | * | 837 | * |
| 779 | * Caller must hold stm::link_lock. | 838 | * Caller must hold stm::link_mutex. |
| 780 | */ | 839 | */ |
| 781 | static void __stm_source_link_drop(struct stm_source_device *src, | 840 | static int __stm_source_link_drop(struct stm_source_device *src, |
| 782 | struct stm_device *stm) | 841 | struct stm_device *stm) |
| 783 | { | 842 | { |
| 784 | struct stm_device *link; | 843 | struct stm_device *link; |
| 844 | int ret = 0; | ||
| 845 | |||
| 846 | lockdep_assert_held(&stm->link_mutex); | ||
| 785 | 847 | ||
| 848 | /* for stm::link_list modification, we hold both mutex and spinlock */ | ||
| 849 | spin_lock(&stm->link_lock); | ||
| 786 | spin_lock(&src->link_lock); | 850 | spin_lock(&src->link_lock); |
| 787 | link = srcu_dereference_check(src->link, &stm_source_srcu, 1); | 851 | link = srcu_dereference_check(src->link, &stm_source_srcu, 1); |
| 788 | if (WARN_ON_ONCE(link != stm)) { | 852 | |
| 789 | spin_unlock(&src->link_lock); | 853 | /* |
| 790 | return; | 854 | * The linked device may have changed since we last looked, because |
| 855 | * we weren't holding the src::link_lock back then; if this is the | ||
| 856 | * case, tell the caller to retry. | ||
| 857 | */ | ||
| 858 | if (link != stm) { | ||
| 859 | ret = -EAGAIN; | ||
| 860 | goto unlock; | ||
| 791 | } | 861 | } |
| 792 | 862 | ||
| 793 | stm_output_free(link, &src->output); | 863 | stm_output_free(link, &src->output); |
| 794 | /* caller must hold stm::link_lock */ | ||
| 795 | list_del_init(&src->link_entry); | 864 | list_del_init(&src->link_entry); |
| 796 | /* matches stm_find_device() from stm_source_link_store() */ | 865 | /* matches stm_find_device() from stm_source_link_store() */ |
| 797 | stm_put_device(link); | 866 | stm_put_device(link); |
| 798 | rcu_assign_pointer(src->link, NULL); | 867 | rcu_assign_pointer(src->link, NULL); |
| 799 | 868 | ||
| 869 | unlock: | ||
| 800 | spin_unlock(&src->link_lock); | 870 | spin_unlock(&src->link_lock); |
| 871 | spin_unlock(&stm->link_lock); | ||
| 872 | |||
| 873 | /* | ||
| 874 | * Call the unlink callbacks for both source and stm, when we know | ||
| 875 | * that we have actually performed the unlinking. | ||
| 876 | */ | ||
| 877 | if (!ret) { | ||
| 878 | if (src->data->unlink) | ||
| 879 | src->data->unlink(src->data); | ||
| 880 | |||
| 881 | if (stm->data->unlink) | ||
| 882 | stm->data->unlink(stm->data, src->output.master, | ||
| 883 | src->output.channel); | ||
| 884 | } | ||
| 885 | |||
| 886 | return ret; | ||
| 801 | } | 887 | } |
| 802 | 888 | ||
| 803 | /** | 889 | /** |
| @@ -813,21 +899,29 @@ static void __stm_source_link_drop(struct stm_source_device *src, | |||
| 813 | static void stm_source_link_drop(struct stm_source_device *src) | 899 | static void stm_source_link_drop(struct stm_source_device *src) |
| 814 | { | 900 | { |
| 815 | struct stm_device *stm; | 901 | struct stm_device *stm; |
| 816 | int idx; | 902 | int idx, ret; |
| 817 | 903 | ||
| 904 | retry: | ||
| 818 | idx = srcu_read_lock(&stm_source_srcu); | 905 | idx = srcu_read_lock(&stm_source_srcu); |
| 906 | /* | ||
| 907 | * The stm device will be valid for the duration of this | ||
| 908 | * read section, but the link may change before we grab | ||
| 909 | * the src::link_lock in __stm_source_link_drop(). | ||
| 910 | */ | ||
| 819 | stm = srcu_dereference(src->link, &stm_source_srcu); | 911 | stm = srcu_dereference(src->link, &stm_source_srcu); |
| 820 | 912 | ||
| 913 | ret = 0; | ||
| 821 | if (stm) { | 914 | if (stm) { |
| 822 | if (src->data->unlink) | 915 | mutex_lock(&stm->link_mutex); |
| 823 | src->data->unlink(src->data); | 916 | ret = __stm_source_link_drop(src, stm); |
| 824 | 917 | mutex_unlock(&stm->link_mutex); | |
| 825 | spin_lock(&stm->link_lock); | ||
| 826 | __stm_source_link_drop(src, stm); | ||
| 827 | spin_unlock(&stm->link_lock); | ||
| 828 | } | 918 | } |
| 829 | 919 | ||
| 830 | srcu_read_unlock(&stm_source_srcu, idx); | 920 | srcu_read_unlock(&stm_source_srcu, idx); |
| 921 | |||
| 922 | /* if it did change, retry */ | ||
| 923 | if (ret == -EAGAIN) | ||
| 924 | goto retry; | ||
| 831 | } | 925 | } |
| 832 | 926 | ||
| 833 | static ssize_t stm_source_link_show(struct device *dev, | 927 | static ssize_t stm_source_link_show(struct device *dev, |
| @@ -862,8 +956,10 @@ static ssize_t stm_source_link_store(struct device *dev, | |||
| 862 | return -EINVAL; | 956 | return -EINVAL; |
| 863 | 957 | ||
| 864 | err = stm_source_link_add(src, link); | 958 | err = stm_source_link_add(src, link); |
| 865 | if (err) | 959 | if (err) { |
| 960 | /* matches the stm_find_device() above */ | ||
| 866 | stm_put_device(link); | 961 | stm_put_device(link); |
| 962 | } | ||
| 867 | 963 | ||
| 868 | return err ? : count; | 964 | return err ? : count; |
| 869 | } | 965 | } |
| @@ -925,6 +1021,7 @@ int stm_source_register_device(struct device *parent, | |||
| 925 | if (err) | 1021 | if (err) |
| 926 | goto err; | 1022 | goto err; |
| 927 | 1023 | ||
| 1024 | stm_output_init(&src->output); | ||
| 928 | spin_lock_init(&src->link_lock); | 1025 | spin_lock_init(&src->link_lock); |
| 929 | INIT_LIST_HEAD(&src->link_entry); | 1026 | INIT_LIST_HEAD(&src->link_entry); |
| 930 | src->data = data; | 1027 | src->data = data; |
| @@ -973,9 +1070,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan, | |||
| 973 | 1070 | ||
| 974 | stm = srcu_dereference(src->link, &stm_source_srcu); | 1071 | stm = srcu_dereference(src->link, &stm_source_srcu); |
| 975 | if (stm) | 1072 | if (stm) |
| 976 | stm_write(stm->data, src->output.master, | 1073 | count = stm_write(stm->data, src->output.master, |
| 977 | src->output.channel + chan, | 1074 | src->output.channel + chan, |
| 978 | buf, count); | 1075 | buf, count); |
| 979 | else | 1076 | else |
| 980 | count = -ENODEV; | 1077 | count = -ENODEV; |
| 981 | 1078 | ||
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c index 3709bef0b21f..310adf57e7a1 100644 --- a/drivers/hwtracing/stm/dummy_stm.c +++ b/drivers/hwtracing/stm/dummy_stm.c | |||
| @@ -40,22 +40,75 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master, | |||
| 40 | return size; | 40 | return size; |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | static struct stm_data dummy_stm = { | 43 | #define DUMMY_STM_MAX 32 |
| 44 | .name = "dummy_stm", | 44 | |
| 45 | .sw_start = 0x0000, | 45 | static struct stm_data dummy_stm[DUMMY_STM_MAX]; |
| 46 | .sw_end = 0xffff, | 46 | |
| 47 | .sw_nchannels = 0xffff, | 47 | static int nr_dummies = 4; |
| 48 | .packet = dummy_stm_packet, | 48 | |
| 49 | }; | 49 | module_param(nr_dummies, int, 0600); |
| 50 | |||
| 51 | static unsigned int dummy_stm_nr; | ||
| 52 | |||
| 53 | static unsigned int fail_mode; | ||
| 54 | |||
| 55 | module_param(fail_mode, int, 0600); | ||
| 56 | |||
| 57 | static int dummy_stm_link(struct stm_data *data, unsigned int master, | ||
| 58 | unsigned int channel) | ||
| 59 | { | ||
| 60 | if (fail_mode && (channel & fail_mode)) | ||
| 61 | return -EINVAL; | ||
| 62 | |||
| 63 | return 0; | ||
| 64 | } | ||
| 50 | 65 | ||
| 51 | static int dummy_stm_init(void) | 66 | static int dummy_stm_init(void) |
| 52 | { | 67 | { |
| 53 | return stm_register_device(NULL, &dummy_stm, THIS_MODULE); | 68 | int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies); |
| 69 | |||
| 70 | if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX) | ||
| 71 | return -EINVAL; | ||
| 72 | |||
| 73 | for (i = 0; i < __nr_dummies; i++) { | ||
| 74 | dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i); | ||
| 75 | if (!dummy_stm[i].name) | ||
| 76 | goto fail_unregister; | ||
| 77 | |||
| 78 | dummy_stm[i].sw_start = 0x0000; | ||
| 79 | dummy_stm[i].sw_end = 0xffff; | ||
| 80 | dummy_stm[i].sw_nchannels = 0xffff; | ||
| 81 | dummy_stm[i].packet = dummy_stm_packet; | ||
| 82 | dummy_stm[i].link = dummy_stm_link; | ||
| 83 | |||
| 84 | ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE); | ||
| 85 | if (ret) | ||
| 86 | goto fail_free; | ||
| 87 | } | ||
| 88 | |||
| 89 | dummy_stm_nr = __nr_dummies; | ||
| 90 | |||
| 91 | return 0; | ||
| 92 | |||
| 93 | fail_unregister: | ||
| 94 | for (i--; i >= 0; i--) { | ||
| 95 | stm_unregister_device(&dummy_stm[i]); | ||
| 96 | fail_free: | ||
| 97 | kfree(dummy_stm[i].name); | ||
| 98 | } | ||
| 99 | |||
| 100 | return ret; | ||
| 101 | |||
| 54 | } | 102 | } |
| 55 | 103 | ||
| 56 | static void dummy_stm_exit(void) | 104 | static void dummy_stm_exit(void) |
| 57 | { | 105 | { |
| 58 | stm_unregister_device(&dummy_stm); | 106 | int i; |
| 107 | |||
| 108 | for (i = 0; i < dummy_stm_nr; i++) { | ||
| 109 | stm_unregister_device(&dummy_stm[i]); | ||
| 110 | kfree(dummy_stm[i].name); | ||
| 111 | } | ||
| 59 | } | 112 | } |
| 60 | 113 | ||
| 61 | module_init(dummy_stm_init); | 114 | module_init(dummy_stm_init); |
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c new file mode 100644 index 000000000000..0133571b506f --- /dev/null +++ b/drivers/hwtracing/stm/heartbeat.c | |||
| @@ -0,0 +1,130 @@ | |||
| 1 | /* | ||
| 2 | * Simple heartbeat STM source driver | ||
| 3 | * Copyright (c) 2016, Intel Corporation. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * Heartbeat STM source will send repetitive messages over STM devices to a | ||
| 15 | * trace host. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/kernel.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/hrtimer.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | #include <linux/stm.h> | ||
| 23 | |||
| 24 | #define STM_HEARTBEAT_MAX 32 | ||
| 25 | |||
| 26 | static int nr_devs = 4; | ||
| 27 | static int interval_ms = 10; | ||
| 28 | |||
| 29 | module_param(nr_devs, int, 0600); | ||
| 30 | module_param(interval_ms, int, 0600); | ||
| 31 | |||
| 32 | static struct stm_heartbeat { | ||
| 33 | struct stm_source_data data; | ||
| 34 | struct hrtimer hrtimer; | ||
| 35 | unsigned int active; | ||
| 36 | } stm_heartbeat[STM_HEARTBEAT_MAX]; | ||
| 37 | |||
| 38 | static unsigned int nr_instances; | ||
| 39 | |||
| 40 | static const char str[] = "heartbeat stm source driver is here to serve you"; | ||
| 41 | |||
| 42 | static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr) | ||
| 43 | { | ||
| 44 | struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat, | ||
| 45 | hrtimer); | ||
| 46 | |||
| 47 | stm_source_write(&heartbeat->data, 0, str, sizeof str); | ||
| 48 | if (heartbeat->active) | ||
| 49 | hrtimer_forward_now(hr, ms_to_ktime(interval_ms)); | ||
| 50 | |||
| 51 | return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART; | ||
| 52 | } | ||
| 53 | |||
| 54 | static int stm_heartbeat_link(struct stm_source_data *data) | ||
| 55 | { | ||
| 56 | struct stm_heartbeat *heartbeat = | ||
| 57 | container_of(data, struct stm_heartbeat, data); | ||
| 58 | |||
| 59 | heartbeat->active = 1; | ||
| 60 | hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms), | ||
| 61 | HRTIMER_MODE_ABS); | ||
| 62 | |||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | |||
| 66 | static void stm_heartbeat_unlink(struct stm_source_data *data) | ||
| 67 | { | ||
| 68 | struct stm_heartbeat *heartbeat = | ||
| 69 | container_of(data, struct stm_heartbeat, data); | ||
| 70 | |||
| 71 | heartbeat->active = 0; | ||
| 72 | hrtimer_cancel(&heartbeat->hrtimer); | ||
| 73 | } | ||
| 74 | |||
| 75 | static int stm_heartbeat_init(void) | ||
| 76 | { | ||
| 77 | int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs); | ||
| 78 | |||
| 79 | if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX) | ||
| 80 | return -EINVAL; | ||
| 81 | |||
| 82 | for (i = 0; i < __nr_instances; i++) { | ||
| 83 | stm_heartbeat[i].data.name = | ||
| 84 | kasprintf(GFP_KERNEL, "heartbeat.%d", i); | ||
| 85 | if (!stm_heartbeat[i].data.name) | ||
| 86 | goto fail_unregister; | ||
| 87 | |||
| 88 | stm_heartbeat[i].data.nr_chans = 1; | ||
| 89 | stm_heartbeat[i].data.link = stm_heartbeat_link; | ||
| 90 | stm_heartbeat[i].data.unlink = stm_heartbeat_unlink; | ||
| 91 | hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC, | ||
| 92 | HRTIMER_MODE_ABS); | ||
| 93 | stm_heartbeat[i].hrtimer.function = | ||
| 94 | stm_heartbeat_hrtimer_handler; | ||
| 95 | |||
| 96 | ret = stm_source_register_device(NULL, &stm_heartbeat[i].data); | ||
| 97 | if (ret) | ||
| 98 | goto fail_free; | ||
| 99 | } | ||
| 100 | |||
| 101 | nr_instances = __nr_instances; | ||
| 102 | |||
| 103 | return 0; | ||
| 104 | |||
| 105 | fail_unregister: | ||
| 106 | for (i--; i >= 0; i--) { | ||
| 107 | stm_source_unregister_device(&stm_heartbeat[i].data); | ||
| 108 | fail_free: | ||
| 109 | kfree(stm_heartbeat[i].data.name); | ||
| 110 | } | ||
| 111 | |||
| 112 | return ret; | ||
| 113 | } | ||
| 114 | |||
| 115 | static void stm_heartbeat_exit(void) | ||
| 116 | { | ||
| 117 | int i; | ||
| 118 | |||
| 119 | for (i = 0; i < nr_instances; i++) { | ||
| 120 | stm_source_unregister_device(&stm_heartbeat[i].data); | ||
| 121 | kfree(stm_heartbeat[i].data.name); | ||
| 122 | } | ||
| 123 | } | ||
| 124 | |||
| 125 | module_init(stm_heartbeat_init); | ||
| 126 | module_exit(stm_heartbeat_exit); | ||
| 127 | |||
| 128 | MODULE_LICENSE("GPL v2"); | ||
| 129 | MODULE_DESCRIPTION("stm_heartbeat driver"); | ||
| 130 | MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>"); | ||
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 11ab6d01adf6..1db189657b2b 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c | |||
| @@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy) | |||
| 272 | { | 272 | { |
| 273 | struct stm_device *stm = policy->stm; | 273 | struct stm_device *stm = policy->stm; |
| 274 | 274 | ||
| 275 | /* | ||
| 276 | * stp_policy_release() will not call here if the policy is already | ||
| 277 | * unbound; other users should not either, as no link exists between | ||
| 278 | * this policy and anything else in that case | ||
| 279 | */ | ||
| 275 | if (WARN_ON_ONCE(!policy->stm)) | 280 | if (WARN_ON_ONCE(!policy->stm)) |
| 276 | return; | 281 | return; |
| 277 | 282 | ||
| 278 | mutex_lock(&stm->policy_mutex); | 283 | lockdep_assert_held(&stm->policy_mutex); |
| 279 | stm->policy = NULL; | ||
| 280 | mutex_unlock(&stm->policy_mutex); | ||
| 281 | 284 | ||
| 285 | stm->policy = NULL; | ||
| 282 | policy->stm = NULL; | 286 | policy->stm = NULL; |
| 283 | 287 | ||
| 284 | stm_put_device(stm); | 288 | stm_put_device(stm); |
| @@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy) | |||
| 287 | static void stp_policy_release(struct config_item *item) | 291 | static void stp_policy_release(struct config_item *item) |
| 288 | { | 292 | { |
| 289 | struct stp_policy *policy = to_stp_policy(item); | 293 | struct stp_policy *policy = to_stp_policy(item); |
| 294 | struct stm_device *stm = policy->stm; | ||
| 290 | 295 | ||
| 296 | /* a policy *can* be unbound and still exist in configfs tree */ | ||
| 297 | if (!stm) | ||
| 298 | return; | ||
| 299 | |||
| 300 | mutex_lock(&stm->policy_mutex); | ||
| 291 | stp_policy_unbind(policy); | 301 | stp_policy_unbind(policy); |
| 302 | mutex_unlock(&stm->policy_mutex); | ||
| 303 | |||
| 292 | kfree(policy); | 304 | kfree(policy); |
| 293 | } | 305 | } |
| 294 | 306 | ||
| @@ -320,10 +332,11 @@ stp_policies_make(struct config_group *group, const char *name) | |||
| 320 | 332 | ||
| 321 | /* | 333 | /* |
| 322 | * node must look like <device_name>.<policy_name>, where | 334 | * node must look like <device_name>.<policy_name>, where |
| 323 | * <device_name> is the name of an existing stm device and | 335 | * <device_name> is the name of an existing stm device; may |
| 324 | * <policy_name> is an arbitrary string | 336 | * contain dots; |
| 337 | * <policy_name> is an arbitrary string; may not contain dots | ||
| 325 | */ | 338 | */ |
| 326 | p = strchr(devname, '.'); | 339 | p = strrchr(devname, '.'); |
| 327 | if (!p) { | 340 | if (!p) { |
| 328 | kfree(devname); | 341 | kfree(devname); |
| 329 | return ERR_PTR(-EINVAL); | 342 | return ERR_PTR(-EINVAL); |
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h index 95ece0292c99..4e8c6926260f 100644 --- a/drivers/hwtracing/stm/stm.h +++ b/drivers/hwtracing/stm/stm.h | |||
| @@ -45,6 +45,7 @@ struct stm_device { | |||
| 45 | int major; | 45 | int major; |
| 46 | unsigned int sw_nmasters; | 46 | unsigned int sw_nmasters; |
| 47 | struct stm_data *data; | 47 | struct stm_data *data; |
| 48 | struct mutex link_mutex; | ||
| 48 | spinlock_t link_lock; | 49 | spinlock_t link_lock; |
| 49 | struct list_head link_list; | 50 | struct list_head link_list; |
| 50 | /* master allocation */ | 51 | /* master allocation */ |
| @@ -56,6 +57,7 @@ struct stm_device { | |||
| 56 | container_of((_d), struct stm_device, dev) | 57 | container_of((_d), struct stm_device, dev) |
| 57 | 58 | ||
| 58 | struct stm_output { | 59 | struct stm_output { |
| 60 | spinlock_t lock; | ||
| 59 | unsigned int master; | 61 | unsigned int master; |
| 60 | unsigned int channel; | 62 | unsigned int channel; |
| 61 | unsigned int nr_chans; | 63 | unsigned int nr_chans; |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 3de93517efe4..14606afbfaa8 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
| @@ -336,7 +336,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p, | |||
| 336 | union ib_gid gid; | 336 | union ib_gid gid; |
| 337 | struct ib_gid_attr gid_attr = {}; | 337 | struct ib_gid_attr gid_attr = {}; |
| 338 | ssize_t ret; | 338 | ssize_t ret; |
| 339 | va_list args; | ||
| 340 | 339 | ||
| 341 | ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, | 340 | ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, |
| 342 | &gid_attr); | 341 | &gid_attr); |
| @@ -348,7 +347,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p, | |||
| 348 | err: | 347 | err: |
| 349 | if (gid_attr.ndev) | 348 | if (gid_attr.ndev) |
| 350 | dev_put(gid_attr.ndev); | 349 | dev_put(gid_attr.ndev); |
| 351 | va_end(args); | ||
| 352 | return ret; | 350 | return ret; |
| 353 | } | 351 | } |
| 354 | 352 | ||
| @@ -722,12 +720,11 @@ static struct attribute_group *get_counter_table(struct ib_device *dev, | |||
| 722 | 720 | ||
| 723 | if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO, | 721 | if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO, |
| 724 | &cpi, 40, sizeof(cpi)) >= 0) { | 722 | &cpi, 40, sizeof(cpi)) >= 0) { |
| 725 | 723 | if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH) | |
| 726 | if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH) | ||
| 727 | /* We have extended counters */ | 724 | /* We have extended counters */ |
| 728 | return &pma_group_ext; | 725 | return &pma_group_ext; |
| 729 | 726 | ||
| 730 | if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF) | 727 | if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF) |
| 731 | /* But not the IETF ones */ | 728 | /* But not the IETF ones */ |
| 732 | return &pma_group_noietf; | 729 | return &pma_group_noietf; |
| 733 | } | 730 | } |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 9116bc3988a6..34cb8e87c7b8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -270,8 +270,10 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
| 270 | /* fall through */ | 270 | /* fall through */ |
| 271 | case IB_QPT_RC: | 271 | case IB_QPT_RC: |
| 272 | size += sizeof(struct mlx5_wqe_ctrl_seg) + | 272 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
| 273 | sizeof(struct mlx5_wqe_atomic_seg) + | 273 | max(sizeof(struct mlx5_wqe_atomic_seg) + |
| 274 | sizeof(struct mlx5_wqe_raddr_seg); | 274 | sizeof(struct mlx5_wqe_raddr_seg), |
| 275 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + | ||
| 276 | sizeof(struct mlx5_mkey_seg)); | ||
| 275 | break; | 277 | break; |
| 276 | 278 | ||
| 277 | case IB_QPT_XRC_TGT: | 279 | case IB_QPT_XRC_TGT: |
| @@ -279,9 +281,9 @@ static int sq_overhead(enum ib_qp_type qp_type) | |||
| 279 | 281 | ||
| 280 | case IB_QPT_UC: | 282 | case IB_QPT_UC: |
| 281 | size += sizeof(struct mlx5_wqe_ctrl_seg) + | 283 | size += sizeof(struct mlx5_wqe_ctrl_seg) + |
| 282 | sizeof(struct mlx5_wqe_raddr_seg) + | 284 | max(sizeof(struct mlx5_wqe_raddr_seg), |
| 283 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + | 285 | sizeof(struct mlx5_wqe_umr_ctrl_seg) + |
| 284 | sizeof(struct mlx5_mkey_seg); | 286 | sizeof(struct mlx5_mkey_seg)); |
| 285 | break; | 287 | break; |
| 286 | 288 | ||
| 287 | case IB_QPT_UD: | 289 | case IB_QPT_UD: |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 573849354cb9..f38743018cb4 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
| @@ -228,6 +228,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) | |||
| 228 | 228 | ||
| 229 | ocrdma_alloc_pd_pool(dev); | 229 | ocrdma_alloc_pd_pool(dev); |
| 230 | 230 | ||
| 231 | if (!ocrdma_alloc_stats_resources(dev)) { | ||
| 232 | pr_err("%s: stats resource allocation failed\n", __func__); | ||
| 233 | goto alloc_err; | ||
| 234 | } | ||
| 235 | |||
| 231 | spin_lock_init(&dev->av_tbl.lock); | 236 | spin_lock_init(&dev->av_tbl.lock); |
| 232 | spin_lock_init(&dev->flush_q_lock); | 237 | spin_lock_init(&dev->flush_q_lock); |
| 233 | return 0; | 238 | return 0; |
| @@ -238,6 +243,7 @@ alloc_err: | |||
| 238 | 243 | ||
| 239 | static void ocrdma_free_resources(struct ocrdma_dev *dev) | 244 | static void ocrdma_free_resources(struct ocrdma_dev *dev) |
| 240 | { | 245 | { |
| 246 | ocrdma_release_stats_resources(dev); | ||
| 241 | kfree(dev->stag_arr); | 247 | kfree(dev->stag_arr); |
| 242 | kfree(dev->qp_tbl); | 248 | kfree(dev->qp_tbl); |
| 243 | kfree(dev->cq_tbl); | 249 | kfree(dev->cq_tbl); |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 86c303a620c1..255f774080a4 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c | |||
| @@ -64,10 +64,11 @@ static int ocrdma_add_stat(char *start, char *pcur, | |||
| 64 | return cpy_len; | 64 | return cpy_len; |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) | 67 | bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev) |
| 68 | { | 68 | { |
| 69 | struct stats_mem *mem = &dev->stats_mem; | 69 | struct stats_mem *mem = &dev->stats_mem; |
| 70 | 70 | ||
| 71 | mutex_init(&dev->stats_lock); | ||
| 71 | /* Alloc mbox command mem*/ | 72 | /* Alloc mbox command mem*/ |
| 72 | mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), | 73 | mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), |
| 73 | sizeof(struct ocrdma_rdma_stats_resp)); | 74 | sizeof(struct ocrdma_rdma_stats_resp)); |
| @@ -91,13 +92,14 @@ static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) | |||
| 91 | return true; | 92 | return true; |
| 92 | } | 93 | } |
| 93 | 94 | ||
| 94 | static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) | 95 | void ocrdma_release_stats_resources(struct ocrdma_dev *dev) |
| 95 | { | 96 | { |
| 96 | struct stats_mem *mem = &dev->stats_mem; | 97 | struct stats_mem *mem = &dev->stats_mem; |
| 97 | 98 | ||
| 98 | if (mem->va) | 99 | if (mem->va) |
| 99 | dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, | 100 | dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, |
| 100 | mem->va, mem->pa); | 101 | mem->va, mem->pa); |
| 102 | mem->va = NULL; | ||
| 101 | kfree(mem->debugfs_mem); | 103 | kfree(mem->debugfs_mem); |
| 102 | } | 104 | } |
| 103 | 105 | ||
| @@ -838,15 +840,9 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev) | |||
| 838 | &dev->reset_stats, &ocrdma_dbg_ops)) | 840 | &dev->reset_stats, &ocrdma_dbg_ops)) |
| 839 | goto err; | 841 | goto err; |
| 840 | 842 | ||
| 841 | /* Now create dma_mem for stats mbx command */ | ||
| 842 | if (!ocrdma_alloc_stats_mem(dev)) | ||
| 843 | goto err; | ||
| 844 | |||
| 845 | mutex_init(&dev->stats_lock); | ||
| 846 | 843 | ||
| 847 | return; | 844 | return; |
| 848 | err: | 845 | err: |
| 849 | ocrdma_release_stats_mem(dev); | ||
| 850 | debugfs_remove_recursive(dev->dir); | 846 | debugfs_remove_recursive(dev->dir); |
| 851 | dev->dir = NULL; | 847 | dev->dir = NULL; |
| 852 | } | 848 | } |
| @@ -855,9 +851,7 @@ void ocrdma_rem_port_stats(struct ocrdma_dev *dev) | |||
| 855 | { | 851 | { |
| 856 | if (!dev->dir) | 852 | if (!dev->dir) |
| 857 | return; | 853 | return; |
| 858 | debugfs_remove(dev->dir); | 854 | debugfs_remove_recursive(dev->dir); |
| 859 | mutex_destroy(&dev->stats_lock); | ||
| 860 | ocrdma_release_stats_mem(dev); | ||
| 861 | } | 855 | } |
| 862 | 856 | ||
| 863 | void ocrdma_init_debugfs(void) | 857 | void ocrdma_init_debugfs(void) |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h index c9e58d04c7b8..bba1fec4f11f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h | |||
| @@ -65,6 +65,8 @@ enum OCRDMA_STATS_TYPE { | |||
| 65 | 65 | ||
| 66 | void ocrdma_rem_debugfs(void); | 66 | void ocrdma_rem_debugfs(void); |
| 67 | void ocrdma_init_debugfs(void); | 67 | void ocrdma_init_debugfs(void); |
| 68 | bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev); | ||
| 69 | void ocrdma_release_stats_resources(struct ocrdma_dev *dev); | ||
| 68 | void ocrdma_rem_port_stats(struct ocrdma_dev *dev); | 70 | void ocrdma_rem_port_stats(struct ocrdma_dev *dev); |
| 69 | void ocrdma_add_port_stats(struct ocrdma_dev *dev); | 71 | void ocrdma_add_port_stats(struct ocrdma_dev *dev); |
| 70 | int ocrdma_pma_counters(struct ocrdma_dev *dev, | 72 | int ocrdma_pma_counters(struct ocrdma_dev *dev, |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index d4c687b548d8..37620b4baafb 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, | |||
| 125 | IB_DEVICE_SYS_IMAGE_GUID | | 125 | IB_DEVICE_SYS_IMAGE_GUID | |
| 126 | IB_DEVICE_LOCAL_DMA_LKEY | | 126 | IB_DEVICE_LOCAL_DMA_LKEY | |
| 127 | IB_DEVICE_MEM_MGT_EXTENSIONS; | 127 | IB_DEVICE_MEM_MGT_EXTENSIONS; |
| 128 | attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); | 128 | attr->max_sge = dev->attr.max_send_sge; |
| 129 | attr->max_sge_rd = 0; | 129 | attr->max_sge_rd = attr->max_sge; |
| 130 | attr->max_cq = dev->attr.max_cq; | 130 | attr->max_cq = dev->attr.max_cq; |
| 131 | attr->max_cqe = dev->attr.max_cqe; | 131 | attr->max_cqe = dev->attr.max_cqe; |
| 132 | attr->max_mr = dev->attr.max_mr; | 132 | attr->max_mr = dev->attr.max_mr; |
| @@ -2726,8 +2726,7 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) | |||
| 2726 | OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; | 2726 | OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; |
| 2727 | ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & | 2727 | ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & |
| 2728 | OCRDMA_CQE_SRCQP_MASK; | 2728 | OCRDMA_CQE_SRCQP_MASK; |
| 2729 | ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & | 2729 | ibwc->pkey_index = 0; |
| 2730 | OCRDMA_CQE_PKEY_MASK; | ||
| 2731 | ibwc->wc_flags = IB_WC_GRH; | 2730 | ibwc->wc_flags = IB_WC_GRH; |
| 2732 | ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> | 2731 | ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> |
| 2733 | OCRDMA_CQE_UD_XFER_LEN_SHIFT); | 2732 | OCRDMA_CQE_UD_XFER_LEN_SHIFT); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 5ea0c14070d1..fa9c42ff1fb0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 245 | skb_reset_mac_header(skb); | 245 | skb_reset_mac_header(skb); |
| 246 | skb_pull(skb, IPOIB_ENCAP_LEN); | 246 | skb_pull(skb, IPOIB_ENCAP_LEN); |
| 247 | 247 | ||
| 248 | skb->truesize = SKB_TRUESIZE(skb->len); | ||
| 249 | |||
| 250 | ++dev->stats.rx_packets; | 248 | ++dev->stats.rx_packets; |
| 251 | dev->stats.rx_bytes += skb->len; | 249 | dev->stats.rx_bytes += skb->len; |
| 252 | 250 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 050dfa175d16..25889311b1e9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -456,7 +456,10 @@ out_locked: | |||
| 456 | return status; | 456 | return status; |
| 457 | } | 457 | } |
| 458 | 458 | ||
| 459 | static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) | 459 | /* |
| 460 | * Caller must hold 'priv->lock' | ||
| 461 | */ | ||
| 462 | static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) | ||
| 460 | { | 463 | { |
| 461 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 464 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 462 | struct ib_sa_multicast *multicast; | 465 | struct ib_sa_multicast *multicast; |
| @@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) | |||
| 466 | ib_sa_comp_mask comp_mask; | 469 | ib_sa_comp_mask comp_mask; |
| 467 | int ret = 0; | 470 | int ret = 0; |
| 468 | 471 | ||
| 472 | if (!priv->broadcast || | ||
| 473 | !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) | ||
| 474 | return -EINVAL; | ||
| 475 | |||
| 469 | ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); | 476 | ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); |
| 470 | 477 | ||
| 471 | rec.mgid = mcast->mcmember.mgid; | 478 | rec.mgid = mcast->mcmember.mgid; |
| @@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) | |||
| 525 | rec.join_state = 4; | 532 | rec.join_state = 4; |
| 526 | #endif | 533 | #endif |
| 527 | } | 534 | } |
| 535 | spin_unlock_irq(&priv->lock); | ||
| 528 | 536 | ||
| 529 | multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, | 537 | multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, |
| 530 | &rec, comp_mask, GFP_KERNEL, | 538 | &rec, comp_mask, GFP_KERNEL, |
| 531 | ipoib_mcast_join_complete, mcast); | 539 | ipoib_mcast_join_complete, mcast); |
| 540 | spin_lock_irq(&priv->lock); | ||
| 532 | if (IS_ERR(multicast)) { | 541 | if (IS_ERR(multicast)) { |
| 533 | ret = PTR_ERR(multicast); | 542 | ret = PTR_ERR(multicast); |
| 534 | ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); | 543 | ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); |
| 535 | spin_lock_irq(&priv->lock); | ||
| 536 | /* Requeue this join task with a backoff delay */ | 544 | /* Requeue this join task with a backoff delay */ |
| 537 | __ipoib_mcast_schedule_join_thread(priv, mcast, 1); | 545 | __ipoib_mcast_schedule_join_thread(priv, mcast, 1); |
| 538 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | 546 | clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); |
| 539 | spin_unlock_irq(&priv->lock); | 547 | spin_unlock_irq(&priv->lock); |
| 540 | complete(&mcast->done); | 548 | complete(&mcast->done); |
| 549 | spin_lock_irq(&priv->lock); | ||
| 541 | } | 550 | } |
| 551 | return 0; | ||
| 542 | } | 552 | } |
| 543 | 553 | ||
| 544 | void ipoib_mcast_join_task(struct work_struct *work) | 554 | void ipoib_mcast_join_task(struct work_struct *work) |
| @@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
| 620 | /* Found the next unjoined group */ | 630 | /* Found the next unjoined group */ |
| 621 | init_completion(&mcast->done); | 631 | init_completion(&mcast->done); |
| 622 | set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | 632 | set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); |
| 623 | spin_unlock_irq(&priv->lock); | 633 | if (ipoib_mcast_join(dev, mcast)) { |
| 624 | ipoib_mcast_join(dev, mcast); | 634 | spin_unlock_irq(&priv->lock); |
| 625 | spin_lock_irq(&priv->lock); | 635 | return; |
| 636 | } | ||
| 626 | } else if (!delay_until || | 637 | } else if (!delay_until || |
| 627 | time_before(mcast->delay_until, delay_until)) | 638 | time_before(mcast->delay_until, delay_until)) |
| 628 | delay_until = mcast->delay_until; | 639 | delay_until = mcast->delay_until; |
| @@ -641,10 +652,9 @@ out: | |||
| 641 | if (mcast) { | 652 | if (mcast) { |
| 642 | init_completion(&mcast->done); | 653 | init_completion(&mcast->done); |
| 643 | set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); | 654 | set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); |
| 655 | ipoib_mcast_join(dev, mcast); | ||
| 644 | } | 656 | } |
| 645 | spin_unlock_irq(&priv->lock); | 657 | spin_unlock_irq(&priv->lock); |
| 646 | if (mcast) | ||
| 647 | ipoib_mcast_join(dev, mcast); | ||
| 648 | } | 658 | } |
| 649 | 659 | ||
| 650 | int ipoib_mcast_start_thread(struct net_device *dev) | 660 | int ipoib_mcast_start_thread(struct net_device *dev) |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 6727954ab74b..e8a84d12b7ff 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
| @@ -1207,7 +1207,6 @@ static void xpad_led_disconnect(struct usb_xpad *xpad) | |||
| 1207 | #else | 1207 | #else |
| 1208 | static int xpad_led_probe(struct usb_xpad *xpad) { return 0; } | 1208 | static int xpad_led_probe(struct usb_xpad *xpad) { return 0; } |
| 1209 | static void xpad_led_disconnect(struct usb_xpad *xpad) { } | 1209 | static void xpad_led_disconnect(struct usb_xpad *xpad) { } |
| 1210 | static void xpad_identify_controller(struct usb_xpad *xpad) { } | ||
| 1211 | #endif | 1210 | #endif |
| 1212 | 1211 | ||
| 1213 | static int xpad_start_input(struct usb_xpad *xpad) | 1212 | static int xpad_start_input(struct usb_xpad *xpad) |
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c index 4d446d5085aa..c01a1d648f9f 100644 --- a/drivers/input/keyboard/adp5589-keys.c +++ b/drivers/input/keyboard/adp5589-keys.c | |||
| @@ -235,7 +235,7 @@ struct adp5589_kpad { | |||
| 235 | unsigned short gpimapsize; | 235 | unsigned short gpimapsize; |
| 236 | unsigned extend_cfg; | 236 | unsigned extend_cfg; |
| 237 | bool is_adp5585; | 237 | bool is_adp5585; |
| 238 | bool adp5585_support_row5; | 238 | bool support_row5; |
| 239 | #ifdef CONFIG_GPIOLIB | 239 | #ifdef CONFIG_GPIOLIB |
| 240 | unsigned char gpiomap[ADP5589_MAXGPIO]; | 240 | unsigned char gpiomap[ADP5589_MAXGPIO]; |
| 241 | bool export_gpio; | 241 | bool export_gpio; |
| @@ -485,7 +485,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad, | |||
| 485 | if (kpad->extend_cfg & C4_EXTEND_CFG) | 485 | if (kpad->extend_cfg & C4_EXTEND_CFG) |
| 486 | pin_used[kpad->var->c4_extend_cfg] = true; | 486 | pin_used[kpad->var->c4_extend_cfg] = true; |
| 487 | 487 | ||
| 488 | if (!kpad->adp5585_support_row5) | 488 | if (!kpad->support_row5) |
| 489 | pin_used[5] = true; | 489 | pin_used[5] = true; |
| 490 | 490 | ||
| 491 | for (i = 0; i < kpad->var->maxgpio; i++) | 491 | for (i = 0; i < kpad->var->maxgpio; i++) |
| @@ -884,12 +884,13 @@ static int adp5589_probe(struct i2c_client *client, | |||
| 884 | 884 | ||
| 885 | switch (id->driver_data) { | 885 | switch (id->driver_data) { |
| 886 | case ADP5585_02: | 886 | case ADP5585_02: |
| 887 | kpad->adp5585_support_row5 = true; | 887 | kpad->support_row5 = true; |
| 888 | case ADP5585_01: | 888 | case ADP5585_01: |
| 889 | kpad->is_adp5585 = true; | 889 | kpad->is_adp5585 = true; |
| 890 | kpad->var = &const_adp5585; | 890 | kpad->var = &const_adp5585; |
| 891 | break; | 891 | break; |
| 892 | case ADP5589: | 892 | case ADP5589: |
| 893 | kpad->support_row5 = true; | ||
| 893 | kpad->var = &const_adp5589; | 894 | kpad->var = &const_adp5589; |
| 894 | break; | 895 | break; |
| 895 | } | 896 | } |
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 378db10001df..4401be225d64 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c | |||
| @@ -304,8 +304,10 @@ static int cap11xx_init_leds(struct device *dev, | |||
| 304 | led->cdev.brightness = LED_OFF; | 304 | led->cdev.brightness = LED_OFF; |
| 305 | 305 | ||
| 306 | error = of_property_read_u32(child, "reg", ®); | 306 | error = of_property_read_u32(child, "reg", ®); |
| 307 | if (error != 0 || reg >= num_leds) | 307 | if (error != 0 || reg >= num_leds) { |
| 308 | of_node_put(child); | ||
| 308 | return -EINVAL; | 309 | return -EINVAL; |
| 310 | } | ||
| 309 | 311 | ||
| 310 | led->reg = reg; | 312 | led->reg = reg; |
| 311 | led->priv = priv; | 313 | led->priv = priv; |
| @@ -313,8 +315,10 @@ static int cap11xx_init_leds(struct device *dev, | |||
| 313 | INIT_WORK(&led->work, cap11xx_led_work); | 315 | INIT_WORK(&led->work, cap11xx_led_work); |
| 314 | 316 | ||
| 315 | error = devm_led_classdev_register(dev, &led->cdev); | 317 | error = devm_led_classdev_register(dev, &led->cdev); |
| 316 | if (error) | 318 | if (error) { |
| 319 | of_node_put(child); | ||
| 317 | return error; | 320 | return error; |
| 321 | } | ||
| 318 | 322 | ||
| 319 | priv->num_leds++; | 323 | priv->num_leds++; |
| 320 | led++; | 324 | led++; |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index d6d16fa78281..1f2337abcf2f 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
| @@ -733,7 +733,7 @@ config INPUT_XEN_KBDDEV_FRONTEND | |||
| 733 | module will be called xen-kbdfront. | 733 | module will be called xen-kbdfront. |
| 734 | 734 | ||
| 735 | config INPUT_SIRFSOC_ONKEY | 735 | config INPUT_SIRFSOC_ONKEY |
| 736 | bool "CSR SiRFSoC power on/off/suspend key support" | 736 | tristate "CSR SiRFSoC power on/off/suspend key support" |
| 737 | depends on ARCH_SIRF && OF | 737 | depends on ARCH_SIRF && OF |
| 738 | default y | 738 | default y |
| 739 | help | 739 | help |
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c index 9d5b89befe6f..ed7237f19539 100644 --- a/drivers/input/misc/sirfsoc-onkey.c +++ b/drivers/input/misc/sirfsoc-onkey.c | |||
| @@ -101,7 +101,7 @@ static void sirfsoc_pwrc_close(struct input_dev *input) | |||
| 101 | static const struct of_device_id sirfsoc_pwrc_of_match[] = { | 101 | static const struct of_device_id sirfsoc_pwrc_of_match[] = { |
| 102 | { .compatible = "sirf,prima2-pwrc" }, | 102 | { .compatible = "sirf,prima2-pwrc" }, |
| 103 | {}, | 103 | {}, |
| 104 | } | 104 | }; |
| 105 | MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match); | 105 | MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match); |
| 106 | 106 | ||
| 107 | static int sirfsoc_pwrc_probe(struct platform_device *pdev) | 107 | static int sirfsoc_pwrc_probe(struct platform_device *pdev) |
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c index e272f06258ce..a3f0f5a47490 100644 --- a/drivers/input/mouse/vmmouse.c +++ b/drivers/input/mouse/vmmouse.c | |||
| @@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse) | |||
| 458 | priv->abs_dev = abs_dev; | 458 | priv->abs_dev = abs_dev; |
| 459 | psmouse->private = priv; | 459 | psmouse->private = priv; |
| 460 | 460 | ||
| 461 | input_set_capability(rel_dev, EV_REL, REL_WHEEL); | ||
| 462 | |||
| 463 | /* Set up and register absolute device */ | 461 | /* Set up and register absolute device */ |
| 464 | snprintf(priv->phys, sizeof(priv->phys), "%s/input1", | 462 | snprintf(priv->phys, sizeof(priv->phys), "%s/input1", |
| 465 | psmouse->ps2dev.serio->phys); | 463 | psmouse->ps2dev.serio->phys); |
| @@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse) | |||
| 475 | abs_dev->id.version = psmouse->model; | 473 | abs_dev->id.version = psmouse->model; |
| 476 | abs_dev->dev.parent = &psmouse->ps2dev.serio->dev; | 474 | abs_dev->dev.parent = &psmouse->ps2dev.serio->dev; |
| 477 | 475 | ||
| 478 | error = input_register_device(priv->abs_dev); | ||
| 479 | if (error) | ||
| 480 | goto init_fail; | ||
| 481 | |||
| 482 | /* Set absolute device capabilities */ | 476 | /* Set absolute device capabilities */ |
| 483 | input_set_capability(abs_dev, EV_KEY, BTN_LEFT); | 477 | input_set_capability(abs_dev, EV_KEY, BTN_LEFT); |
| 484 | input_set_capability(abs_dev, EV_KEY, BTN_RIGHT); | 478 | input_set_capability(abs_dev, EV_KEY, BTN_RIGHT); |
| @@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse) | |||
| 488 | input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0); | 482 | input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0); |
| 489 | input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0); | 483 | input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0); |
| 490 | 484 | ||
| 485 | error = input_register_device(priv->abs_dev); | ||
| 486 | if (error) | ||
| 487 | goto init_fail; | ||
| 488 | |||
| 489 | /* Add wheel capability to the relative device */ | ||
| 490 | input_set_capability(rel_dev, EV_REL, REL_WHEEL); | ||
| 491 | |||
| 491 | psmouse->protocol_handler = vmmouse_process_byte; | 492 | psmouse->protocol_handler = vmmouse_process_byte; |
| 492 | psmouse->disconnect = vmmouse_disconnect; | 493 | psmouse->disconnect = vmmouse_disconnect; |
| 493 | psmouse->reconnect = vmmouse_reconnect; | 494 | psmouse->reconnect = vmmouse_reconnect; |
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 8f828975ab10..1ca7f551e2da 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c | |||
| @@ -134,7 +134,7 @@ static void serio_find_driver(struct serio *serio) | |||
| 134 | int error; | 134 | int error; |
| 135 | 135 | ||
| 136 | error = device_attach(&serio->dev); | 136 | error = device_attach(&serio->dev); |
| 137 | if (error < 0) | 137 | if (error < 0 && error != -EPROBE_DEFER) |
| 138 | dev_warn(&serio->dev, | 138 | dev_warn(&serio->dev, |
| 139 | "device_attach() failed for %s (%s), error: %d\n", | 139 | "device_attach() failed for %s (%s), error: %d\n", |
| 140 | serio->phys, serio->name, error); | 140 | serio->phys, serio->name, error); |
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c index 5d4903a402cc..69828d015d45 100644 --- a/drivers/input/touchscreen/colibri-vf50-ts.c +++ b/drivers/input/touchscreen/colibri-vf50-ts.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
| 22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
| 23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 24 | #include <linux/of.h> | ||
| 24 | #include <linux/pinctrl/consumer.h> | 25 | #include <linux/pinctrl/consumer.h> |
| 25 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
| 26 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 0b0f8c17f3f7..23fbe382da8b 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c | |||
| @@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev, | |||
| 822 | int error; | 822 | int error; |
| 823 | 823 | ||
| 824 | error = device_property_read_u32(dev, "threshold", &val); | 824 | error = device_property_read_u32(dev, "threshold", &val); |
| 825 | if (!error) | 825 | if (!error) { |
| 826 | reg_addr->reg_threshold = val; | 826 | edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val); |
| 827 | tsdata->threshold = val; | ||
| 828 | } | ||
| 827 | 829 | ||
| 828 | error = device_property_read_u32(dev, "gain", &val); | 830 | error = device_property_read_u32(dev, "gain", &val); |
| 829 | if (!error) | 831 | if (!error) { |
| 830 | reg_addr->reg_gain = val; | 832 | edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val); |
| 833 | tsdata->gain = val; | ||
| 834 | } | ||
| 831 | 835 | ||
| 832 | error = device_property_read_u32(dev, "offset", &val); | 836 | error = device_property_read_u32(dev, "offset", &val); |
| 833 | if (!error) | 837 | if (!error) { |
| 834 | reg_addr->reg_offset = val; | 838 | edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val); |
| 839 | tsdata->offset = val; | ||
| 840 | } | ||
| 835 | } | 841 | } |
| 836 | 842 | ||
| 837 | static void | 843 | static void |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 3447549fcc93..0a73632b28d5 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -66,7 +66,10 @@ struct its_node { | |||
| 66 | unsigned long phys_base; | 66 | unsigned long phys_base; |
| 67 | struct its_cmd_block *cmd_base; | 67 | struct its_cmd_block *cmd_base; |
| 68 | struct its_cmd_block *cmd_write; | 68 | struct its_cmd_block *cmd_write; |
| 69 | void *tables[GITS_BASER_NR_REGS]; | 69 | struct { |
| 70 | void *base; | ||
| 71 | u32 order; | ||
| 72 | } tables[GITS_BASER_NR_REGS]; | ||
| 70 | struct its_collection *collections; | 73 | struct its_collection *collections; |
| 71 | struct list_head its_device_list; | 74 | struct list_head its_device_list; |
| 72 | u64 flags; | 75 | u64 flags; |
| @@ -807,9 +810,10 @@ static void its_free_tables(struct its_node *its) | |||
| 807 | int i; | 810 | int i; |
| 808 | 811 | ||
| 809 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { | 812 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
| 810 | if (its->tables[i]) { | 813 | if (its->tables[i].base) { |
| 811 | free_page((unsigned long)its->tables[i]); | 814 | free_pages((unsigned long)its->tables[i].base, |
| 812 | its->tables[i] = NULL; | 815 | its->tables[i].order); |
| 816 | its->tables[i].base = NULL; | ||
| 813 | } | 817 | } |
| 814 | } | 818 | } |
| 815 | } | 819 | } |
| @@ -890,7 +894,8 @@ retry_alloc_baser: | |||
| 890 | goto out_free; | 894 | goto out_free; |
| 891 | } | 895 | } |
| 892 | 896 | ||
| 893 | its->tables[i] = base; | 897 | its->tables[i].base = base; |
| 898 | its->tables[i].order = order; | ||
| 894 | 899 | ||
| 895 | retry_baser: | 900 | retry_baser: |
| 896 | val = (virt_to_phys(base) | | 901 | val = (virt_to_phys(base) | |
| @@ -940,7 +945,7 @@ retry_baser: | |||
| 940 | * something is horribly wrong... | 945 | * something is horribly wrong... |
| 941 | */ | 946 | */ |
| 942 | free_pages((unsigned long)base, order); | 947 | free_pages((unsigned long)base, order); |
| 943 | its->tables[i] = NULL; | 948 | its->tables[i].base = NULL; |
| 944 | 949 | ||
| 945 | switch (psz) { | 950 | switch (psz) { |
| 946 | case SZ_16K: | 951 | case SZ_16K: |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 911758c056c1..8f9ebf714e2b 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -384,9 +384,6 @@ static struct irq_chip gic_chip = { | |||
| 384 | .irq_unmask = gic_unmask_irq, | 384 | .irq_unmask = gic_unmask_irq, |
| 385 | .irq_eoi = gic_eoi_irq, | 385 | .irq_eoi = gic_eoi_irq, |
| 386 | .irq_set_type = gic_set_type, | 386 | .irq_set_type = gic_set_type, |
| 387 | #ifdef CONFIG_SMP | ||
| 388 | .irq_set_affinity = gic_set_affinity, | ||
| 389 | #endif | ||
| 390 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, | 387 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, |
| 391 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, | 388 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, |
| 392 | .flags = IRQCHIP_SET_TYPE_MASKED | | 389 | .flags = IRQCHIP_SET_TYPE_MASKED | |
| @@ -400,9 +397,6 @@ static struct irq_chip gic_eoimode1_chip = { | |||
| 400 | .irq_unmask = gic_unmask_irq, | 397 | .irq_unmask = gic_unmask_irq, |
| 401 | .irq_eoi = gic_eoimode1_eoi_irq, | 398 | .irq_eoi = gic_eoimode1_eoi_irq, |
| 402 | .irq_set_type = gic_set_type, | 399 | .irq_set_type = gic_set_type, |
| 403 | #ifdef CONFIG_SMP | ||
| 404 | .irq_set_affinity = gic_set_affinity, | ||
| 405 | #endif | ||
| 406 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, | 400 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, |
| 407 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, | 401 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, |
| 408 | .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, | 402 | .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, |
| @@ -443,7 +437,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic) | |||
| 443 | u32 bypass = 0; | 437 | u32 bypass = 0; |
| 444 | u32 mode = 0; | 438 | u32 mode = 0; |
| 445 | 439 | ||
| 446 | if (static_key_true(&supports_deactivate)) | 440 | if (gic == &gic_data[0] && static_key_true(&supports_deactivate)) |
| 447 | mode = GIC_CPU_CTRL_EOImodeNS; | 441 | mode = GIC_CPU_CTRL_EOImodeNS; |
| 448 | 442 | ||
| 449 | /* | 443 | /* |
| @@ -1039,6 +1033,11 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, | |||
| 1039 | gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); | 1033 | gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); |
| 1040 | } | 1034 | } |
| 1041 | 1035 | ||
| 1036 | #ifdef CONFIG_SMP | ||
| 1037 | if (gic_nr == 0) | ||
| 1038 | gic->chip.irq_set_affinity = gic_set_affinity; | ||
| 1039 | #endif | ||
| 1040 | |||
| 1042 | #ifdef CONFIG_GIC_NON_BANKED | 1041 | #ifdef CONFIG_GIC_NON_BANKED |
| 1043 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ | 1042 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ |
| 1044 | unsigned int cpu; | 1043 | unsigned int cpu; |
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c index 0704362f4c82..376b28074e0d 100644 --- a/drivers/irqchip/irq-sun4i.c +++ b/drivers/irqchip/irq-sun4i.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
| 23 | 23 | ||
| 24 | #include <asm/exception.h> | 24 | #include <asm/exception.h> |
| 25 | #include <asm/mach/irq.h> | ||
| 26 | 25 | ||
| 27 | #define SUN4I_IRQ_VECTOR_REG 0x00 | 26 | #define SUN4I_IRQ_VECTOR_REG 0x00 |
| 28 | #define SUN4I_IRQ_PROTECTION_REG 0x08 | 27 | #define SUN4I_IRQ_PROTECTION_REG 0x08 |
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index e6e4bacb09ee..12099b09a9a7 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c | |||
| @@ -2048,6 +2048,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high) | |||
| 2048 | 2048 | ||
| 2049 | return 0; | 2049 | return 0; |
| 2050 | } | 2050 | } |
| 2051 | EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon); | ||
| 2051 | 2052 | ||
| 2052 | static int config_hot_period(u16 val) | 2053 | static int config_hot_period(u16 val) |
| 2053 | { | 2054 | { |
| @@ -2074,11 +2075,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k) | |||
| 2074 | 2075 | ||
| 2075 | return config_hot_period(cycles32k); | 2076 | return config_hot_period(cycles32k); |
| 2076 | } | 2077 | } |
| 2078 | EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense); | ||
| 2077 | 2079 | ||
| 2078 | int db8500_prcmu_stop_temp_sense(void) | 2080 | int db8500_prcmu_stop_temp_sense(void) |
| 2079 | { | 2081 | { |
| 2080 | return config_hot_period(0xFFFF); | 2082 | return config_hot_period(0xFFFF); |
| 2081 | } | 2083 | } |
| 2084 | EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense); | ||
| 2082 | 2085 | ||
| 2083 | static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) | 2086 | static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) |
| 2084 | { | 2087 | { |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index f0ba78289504..a216b4667742 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
| @@ -440,7 +440,7 @@ config ARM_CHARLCD | |||
| 440 | still useful. | 440 | still useful. |
| 441 | 441 | ||
| 442 | config BMP085 | 442 | config BMP085 |
| 443 | bool | 443 | tristate |
| 444 | depends on SYSFS | 444 | depends on SYSFS |
| 445 | 445 | ||
| 446 | config BMP085_I2C | 446 | config BMP085_I2C |
| @@ -470,7 +470,7 @@ config BMP085_SPI | |||
| 470 | config PCH_PHUB | 470 | config PCH_PHUB |
| 471 | tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" | 471 | tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB" |
| 472 | select GENERIC_NET_UTILS | 472 | select GENERIC_NET_UTILS |
| 473 | depends on PCI && (X86_32 || COMPILE_TEST) | 473 | depends on PCI && (X86_32 || MIPS || COMPILE_TEST) |
| 474 | help | 474 | help |
| 475 | This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of | 475 | This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of |
| 476 | Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded | 476 | Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded |
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c index a3e789b85cc8..dfb72ecfa604 100644 --- a/drivers/misc/apds990x.c +++ b/drivers/misc/apds990x.c | |||
| @@ -1215,7 +1215,7 @@ static int apds990x_remove(struct i2c_client *client) | |||
| 1215 | #ifdef CONFIG_PM_SLEEP | 1215 | #ifdef CONFIG_PM_SLEEP |
| 1216 | static int apds990x_suspend(struct device *dev) | 1216 | static int apds990x_suspend(struct device *dev) |
| 1217 | { | 1217 | { |
| 1218 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1218 | struct i2c_client *client = to_i2c_client(dev); |
| 1219 | struct apds990x_chip *chip = i2c_get_clientdata(client); | 1219 | struct apds990x_chip *chip = i2c_get_clientdata(client); |
| 1220 | 1220 | ||
| 1221 | apds990x_chip_off(chip); | 1221 | apds990x_chip_off(chip); |
| @@ -1224,7 +1224,7 @@ static int apds990x_suspend(struct device *dev) | |||
| 1224 | 1224 | ||
| 1225 | static int apds990x_resume(struct device *dev) | 1225 | static int apds990x_resume(struct device *dev) |
| 1226 | { | 1226 | { |
| 1227 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1227 | struct i2c_client *client = to_i2c_client(dev); |
| 1228 | struct apds990x_chip *chip = i2c_get_clientdata(client); | 1228 | struct apds990x_chip *chip = i2c_get_clientdata(client); |
| 1229 | 1229 | ||
| 1230 | /* | 1230 | /* |
| @@ -1240,7 +1240,7 @@ static int apds990x_resume(struct device *dev) | |||
| 1240 | #ifdef CONFIG_PM | 1240 | #ifdef CONFIG_PM |
| 1241 | static int apds990x_runtime_suspend(struct device *dev) | 1241 | static int apds990x_runtime_suspend(struct device *dev) |
| 1242 | { | 1242 | { |
| 1243 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1243 | struct i2c_client *client = to_i2c_client(dev); |
| 1244 | struct apds990x_chip *chip = i2c_get_clientdata(client); | 1244 | struct apds990x_chip *chip = i2c_get_clientdata(client); |
| 1245 | 1245 | ||
| 1246 | apds990x_chip_off(chip); | 1246 | apds990x_chip_off(chip); |
| @@ -1249,7 +1249,7 @@ static int apds990x_runtime_suspend(struct device *dev) | |||
| 1249 | 1249 | ||
| 1250 | static int apds990x_runtime_resume(struct device *dev) | 1250 | static int apds990x_runtime_resume(struct device *dev) |
| 1251 | { | 1251 | { |
| 1252 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1252 | struct i2c_client *client = to_i2c_client(dev); |
| 1253 | struct apds990x_chip *chip = i2c_get_clientdata(client); | 1253 | struct apds990x_chip *chip = i2c_get_clientdata(client); |
| 1254 | 1254 | ||
| 1255 | apds990x_chip_on(chip); | 1255 | apds990x_chip_on(chip); |
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c index c65b5ea5d5ef..b3176ee92b90 100644 --- a/drivers/misc/arm-charlcd.c +++ b/drivers/misc/arm-charlcd.c | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | * Author: Linus Walleij <triad@df.lth.se> | 8 | * Author: Linus Walleij <triad@df.lth.se> |
| 9 | */ | 9 | */ |
| 10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
| 13 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
| 14 | #include <linux/of.h> | 13 | #include <linux/of.h> |
| @@ -328,20 +327,6 @@ out_no_resource: | |||
| 328 | return ret; | 327 | return ret; |
| 329 | } | 328 | } |
| 330 | 329 | ||
| 331 | static int __exit charlcd_remove(struct platform_device *pdev) | ||
| 332 | { | ||
| 333 | struct charlcd *lcd = platform_get_drvdata(pdev); | ||
| 334 | |||
| 335 | if (lcd) { | ||
| 336 | free_irq(lcd->irq, lcd); | ||
| 337 | iounmap(lcd->virtbase); | ||
| 338 | release_mem_region(lcd->phybase, lcd->physize); | ||
| 339 | kfree(lcd); | ||
| 340 | } | ||
| 341 | |||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | |||
| 345 | static int charlcd_suspend(struct device *dev) | 330 | static int charlcd_suspend(struct device *dev) |
| 346 | { | 331 | { |
| 347 | struct platform_device *pdev = to_platform_device(dev); | 332 | struct platform_device *pdev = to_platform_device(dev); |
| @@ -376,13 +361,8 @@ static struct platform_driver charlcd_driver = { | |||
| 376 | .driver = { | 361 | .driver = { |
| 377 | .name = DRIVERNAME, | 362 | .name = DRIVERNAME, |
| 378 | .pm = &charlcd_pm_ops, | 363 | .pm = &charlcd_pm_ops, |
| 364 | .suppress_bind_attrs = true, | ||
| 379 | .of_match_table = of_match_ptr(charlcd_match), | 365 | .of_match_table = of_match_ptr(charlcd_match), |
| 380 | }, | 366 | }, |
| 381 | .remove = __exit_p(charlcd_remove), | ||
| 382 | }; | 367 | }; |
| 383 | 368 | builtin_platform_driver_probe(charlcd_driver, charlcd_probe); | |
| 384 | module_platform_driver_probe(charlcd_driver, charlcd_probe); | ||
| 385 | |||
| 386 | MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>"); | ||
| 387 | MODULE_DESCRIPTION("ARM Character LCD Driver"); | ||
| 388 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c index 753d7ecdadaa..845466e45b95 100644 --- a/drivers/misc/bh1770glc.c +++ b/drivers/misc/bh1770glc.c | |||
| @@ -1323,7 +1323,7 @@ static int bh1770_remove(struct i2c_client *client) | |||
| 1323 | #ifdef CONFIG_PM_SLEEP | 1323 | #ifdef CONFIG_PM_SLEEP |
| 1324 | static int bh1770_suspend(struct device *dev) | 1324 | static int bh1770_suspend(struct device *dev) |
| 1325 | { | 1325 | { |
| 1326 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1326 | struct i2c_client *client = to_i2c_client(dev); |
| 1327 | struct bh1770_chip *chip = i2c_get_clientdata(client); | 1327 | struct bh1770_chip *chip = i2c_get_clientdata(client); |
| 1328 | 1328 | ||
| 1329 | bh1770_chip_off(chip); | 1329 | bh1770_chip_off(chip); |
| @@ -1333,7 +1333,7 @@ static int bh1770_suspend(struct device *dev) | |||
| 1333 | 1333 | ||
| 1334 | static int bh1770_resume(struct device *dev) | 1334 | static int bh1770_resume(struct device *dev) |
| 1335 | { | 1335 | { |
| 1336 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1336 | struct i2c_client *client = to_i2c_client(dev); |
| 1337 | struct bh1770_chip *chip = i2c_get_clientdata(client); | 1337 | struct bh1770_chip *chip = i2c_get_clientdata(client); |
| 1338 | int ret = 0; | 1338 | int ret = 0; |
| 1339 | 1339 | ||
| @@ -1361,7 +1361,7 @@ static int bh1770_resume(struct device *dev) | |||
| 1361 | #ifdef CONFIG_PM | 1361 | #ifdef CONFIG_PM |
| 1362 | static int bh1770_runtime_suspend(struct device *dev) | 1362 | static int bh1770_runtime_suspend(struct device *dev) |
| 1363 | { | 1363 | { |
| 1364 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1364 | struct i2c_client *client = to_i2c_client(dev); |
| 1365 | struct bh1770_chip *chip = i2c_get_clientdata(client); | 1365 | struct bh1770_chip *chip = i2c_get_clientdata(client); |
| 1366 | 1366 | ||
| 1367 | bh1770_chip_off(chip); | 1367 | bh1770_chip_off(chip); |
| @@ -1371,7 +1371,7 @@ static int bh1770_runtime_suspend(struct device *dev) | |||
| 1371 | 1371 | ||
| 1372 | static int bh1770_runtime_resume(struct device *dev) | 1372 | static int bh1770_runtime_resume(struct device *dev) |
| 1373 | { | 1373 | { |
| 1374 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 1374 | struct i2c_client *client = to_i2c_client(dev); |
| 1375 | struct bh1770_chip *chip = i2c_get_clientdata(client); | 1375 | struct bh1770_chip *chip = i2c_get_clientdata(client); |
| 1376 | 1376 | ||
| 1377 | bh1770_chip_on(chip); | 1377 | bh1770_chip_on(chip); |
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index cc8645b5369d..1922cb8f6b88 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c | |||
| @@ -721,9 +721,7 @@ static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj, | |||
| 721 | struct bin_attribute *attr, | 721 | struct bin_attribute *attr, |
| 722 | char *buffer, loff_t offset, size_t count) | 722 | char *buffer, loff_t offset, size_t count) |
| 723 | { | 723 | { |
| 724 | struct c2port_device *c2dev = | 724 | struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); |
| 725 | dev_get_drvdata(container_of(kobj, | ||
| 726 | struct device, kobj)); | ||
| 727 | ssize_t ret; | 725 | ssize_t ret; |
| 728 | 726 | ||
| 729 | /* Check the device and flash access status */ | 727 | /* Check the device and flash access status */ |
| @@ -838,9 +836,7 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj, | |||
| 838 | struct bin_attribute *attr, | 836 | struct bin_attribute *attr, |
| 839 | char *buffer, loff_t offset, size_t count) | 837 | char *buffer, loff_t offset, size_t count) |
| 840 | { | 838 | { |
| 841 | struct c2port_device *c2dev = | 839 | struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj)); |
| 842 | dev_get_drvdata(container_of(kobj, | ||
| 843 | struct device, kobj)); | ||
| 844 | int ret; | 840 | int ret; |
| 845 | 841 | ||
| 846 | /* Check the device access status */ | 842 | /* Check the device access status */ |
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 02006f7109a8..038af5d45145 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c | |||
| @@ -386,8 +386,7 @@ static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj, | |||
| 386 | struct bin_attribute *bin_attr, char *buf, | 386 | struct bin_attribute *bin_attr, char *buf, |
| 387 | loff_t off, size_t count) | 387 | loff_t off, size_t count) |
| 388 | { | 388 | { |
| 389 | struct cxl_afu *afu = to_cxl_afu(container_of(kobj, | 389 | struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj)); |
| 390 | struct device, kobj)); | ||
| 391 | 390 | ||
| 392 | return cxl_afu_read_err_buffer(afu, buf, off, count); | 391 | return cxl_afu_read_err_buffer(afu, buf, off, count); |
| 393 | } | 392 | } |
| @@ -467,7 +466,7 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj, | |||
| 467 | loff_t off, size_t count) | 466 | loff_t off, size_t count) |
| 468 | { | 467 | { |
| 469 | struct afu_config_record *cr = to_cr(kobj); | 468 | struct afu_config_record *cr = to_cr(kobj); |
| 470 | struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj)); | 469 | struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent)); |
| 471 | 470 | ||
| 472 | u64 i, j, val; | 471 | u64 i, j, val; |
| 473 | 472 | ||
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 5d7c0900fa1b..d105c2564400 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
| @@ -289,7 +289,7 @@ static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj, | |||
| 289 | { | 289 | { |
| 290 | struct at24_data *at24; | 290 | struct at24_data *at24; |
| 291 | 291 | ||
| 292 | at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); | 292 | at24 = dev_get_drvdata(kobj_to_dev(kobj)); |
| 293 | return at24_read(at24, buf, off, count); | 293 | return at24_read(at24, buf, off, count); |
| 294 | } | 294 | } |
| 295 | 295 | ||
| @@ -420,7 +420,7 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj, | |||
| 420 | { | 420 | { |
| 421 | struct at24_data *at24; | 421 | struct at24_data *at24; |
| 422 | 422 | ||
| 423 | at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); | 423 | at24 = dev_get_drvdata(kobj_to_dev(kobj)); |
| 424 | return at24_write(at24, buf, off, count); | 424 | return at24_write(at24, buf, off, count); |
| 425 | } | 425 | } |
| 426 | 426 | ||
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index f850ef556bcc..3e9e5a28acaa 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c | |||
| @@ -139,7 +139,7 @@ at25_bin_read(struct file *filp, struct kobject *kobj, | |||
| 139 | struct device *dev; | 139 | struct device *dev; |
| 140 | struct at25_data *at25; | 140 | struct at25_data *at25; |
| 141 | 141 | ||
| 142 | dev = container_of(kobj, struct device, kobj); | 142 | dev = kobj_to_dev(kobj); |
| 143 | at25 = dev_get_drvdata(dev); | 143 | at25 = dev_get_drvdata(dev); |
| 144 | 144 | ||
| 145 | return at25_ee_read(at25, buf, off, count); | 145 | return at25_ee_read(at25, buf, off, count); |
| @@ -273,7 +273,7 @@ at25_bin_write(struct file *filp, struct kobject *kobj, | |||
| 273 | struct device *dev; | 273 | struct device *dev; |
| 274 | struct at25_data *at25; | 274 | struct at25_data *at25; |
| 275 | 275 | ||
| 276 | dev = container_of(kobj, struct device, kobj); | 276 | dev = kobj_to_dev(kobj); |
| 277 | at25 = dev_get_drvdata(dev); | 277 | at25 = dev_get_drvdata(dev); |
| 278 | 278 | ||
| 279 | return at25_ee_write(at25, buf, off, count); | 279 | return at25_ee_write(at25, buf, off, count); |
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c index 7342fd637031..3d1d55157e5f 100644 --- a/drivers/misc/eeprom/eeprom.c +++ b/drivers/misc/eeprom/eeprom.c | |||
| @@ -84,7 +84,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, | |||
| 84 | struct bin_attribute *bin_attr, | 84 | struct bin_attribute *bin_attr, |
| 85 | char *buf, loff_t off, size_t count) | 85 | char *buf, loff_t off, size_t count) |
| 86 | { | 86 | { |
| 87 | struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj)); | 87 | struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj)); |
| 88 | struct eeprom_data *data = i2c_get_clientdata(client); | 88 | struct eeprom_data *data = i2c_get_clientdata(client); |
| 89 | u8 slice; | 89 | u8 slice; |
| 90 | 90 | ||
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index ff63f05edc76..f62ab29e293c 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c | |||
| @@ -10,9 +10,13 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
| 12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
| 13 | #include <linux/gpio/consumer.h> | ||
| 13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 15 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/of.h> | ||
| 18 | #include <linux/of_device.h> | ||
| 19 | #include <linux/of_gpio.h> | ||
| 16 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 17 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
| 18 | #include <linux/sysfs.h> | 22 | #include <linux/sysfs.h> |
| @@ -25,6 +29,15 @@ | |||
| 25 | #define ADDR_ERAL 0x20 | 29 | #define ADDR_ERAL 0x20 |
| 26 | #define ADDR_EWEN 0x30 | 30 | #define ADDR_EWEN 0x30 |
| 27 | 31 | ||
| 32 | struct eeprom_93xx46_devtype_data { | ||
| 33 | unsigned int quirks; | ||
| 34 | }; | ||
| 35 | |||
| 36 | static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = { | ||
| 37 | .quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ | | ||
| 38 | EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH, | ||
| 39 | }; | ||
| 40 | |||
| 28 | struct eeprom_93xx46_dev { | 41 | struct eeprom_93xx46_dev { |
| 29 | struct spi_device *spi; | 42 | struct spi_device *spi; |
| 30 | struct eeprom_93xx46_platform_data *pdata; | 43 | struct eeprom_93xx46_platform_data *pdata; |
| @@ -33,6 +46,16 @@ struct eeprom_93xx46_dev { | |||
| 33 | int addrlen; | 46 | int addrlen; |
| 34 | }; | 47 | }; |
| 35 | 48 | ||
| 49 | static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev) | ||
| 50 | { | ||
| 51 | return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ; | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev) | ||
| 55 | { | ||
| 56 | return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; | ||
| 57 | } | ||
| 58 | |||
| 36 | static ssize_t | 59 | static ssize_t |
| 37 | eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj, | 60 | eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj, |
| 38 | struct bin_attribute *bin_attr, | 61 | struct bin_attribute *bin_attr, |
| @@ -40,58 +63,73 @@ eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj, | |||
| 40 | { | 63 | { |
| 41 | struct eeprom_93xx46_dev *edev; | 64 | struct eeprom_93xx46_dev *edev; |
| 42 | struct device *dev; | 65 | struct device *dev; |
| 43 | struct spi_message m; | 66 | ssize_t ret = 0; |
| 44 | struct spi_transfer t[2]; | ||
| 45 | int bits, ret; | ||
| 46 | u16 cmd_addr; | ||
| 47 | 67 | ||
| 48 | dev = container_of(kobj, struct device, kobj); | 68 | dev = kobj_to_dev(kobj); |
| 49 | edev = dev_get_drvdata(dev); | 69 | edev = dev_get_drvdata(dev); |
| 50 | 70 | ||
| 51 | cmd_addr = OP_READ << edev->addrlen; | 71 | mutex_lock(&edev->lock); |
| 52 | 72 | ||
| 53 | if (edev->addrlen == 7) { | 73 | if (edev->pdata->prepare) |
| 54 | cmd_addr |= off & 0x7f; | 74 | edev->pdata->prepare(edev); |
| 55 | bits = 10; | ||
| 56 | } else { | ||
| 57 | cmd_addr |= off & 0x3f; | ||
| 58 | bits = 9; | ||
| 59 | } | ||
| 60 | 75 | ||
| 61 | dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", | 76 | while (count) { |
| 62 | cmd_addr, edev->spi->max_speed_hz); | 77 | struct spi_message m; |
| 78 | struct spi_transfer t[2] = { { 0 } }; | ||
| 79 | u16 cmd_addr = OP_READ << edev->addrlen; | ||
| 80 | size_t nbytes = count; | ||
| 81 | int bits; | ||
| 82 | int err; | ||
| 83 | |||
| 84 | if (edev->addrlen == 7) { | ||
| 85 | cmd_addr |= off & 0x7f; | ||
| 86 | bits = 10; | ||
| 87 | if (has_quirk_single_word_read(edev)) | ||
| 88 | nbytes = 1; | ||
| 89 | } else { | ||
| 90 | cmd_addr |= (off >> 1) & 0x3f; | ||
| 91 | bits = 9; | ||
| 92 | if (has_quirk_single_word_read(edev)) | ||
| 93 | nbytes = 2; | ||
| 94 | } | ||
| 63 | 95 | ||
| 64 | spi_message_init(&m); | 96 | dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n", |
| 65 | memset(t, 0, sizeof(t)); | 97 | cmd_addr, edev->spi->max_speed_hz); |
| 66 | 98 | ||
| 67 | t[0].tx_buf = (char *)&cmd_addr; | 99 | spi_message_init(&m); |
| 68 | t[0].len = 2; | ||
| 69 | t[0].bits_per_word = bits; | ||
| 70 | spi_message_add_tail(&t[0], &m); | ||
| 71 | 100 | ||
| 72 | t[1].rx_buf = buf; | 101 | t[0].tx_buf = (char *)&cmd_addr; |
| 73 | t[1].len = count; | 102 | t[0].len = 2; |
| 74 | t[1].bits_per_word = 8; | 103 | t[0].bits_per_word = bits; |
| 75 | spi_message_add_tail(&t[1], &m); | 104 | spi_message_add_tail(&t[0], &m); |
| 76 | 105 | ||
| 77 | mutex_lock(&edev->lock); | 106 | t[1].rx_buf = buf; |
| 107 | t[1].len = count; | ||
| 108 | t[1].bits_per_word = 8; | ||
| 109 | spi_message_add_tail(&t[1], &m); | ||
| 78 | 110 | ||
| 79 | if (edev->pdata->prepare) | 111 | err = spi_sync(edev->spi, &m); |
| 80 | edev->pdata->prepare(edev); | 112 | /* have to wait at least Tcsl ns */ |
| 113 | ndelay(250); | ||
| 81 | 114 | ||
| 82 | ret = spi_sync(edev->spi, &m); | 115 | if (err) { |
| 83 | /* have to wait at least Tcsl ns */ | 116 | dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", |
| 84 | ndelay(250); | 117 | nbytes, (int)off, err); |
| 85 | if (ret) { | 118 | ret = err; |
| 86 | dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", | 119 | break; |
| 87 | count, (int)off, ret); | 120 | } |
| 121 | |||
| 122 | buf += nbytes; | ||
| 123 | off += nbytes; | ||
| 124 | count -= nbytes; | ||
| 125 | ret += nbytes; | ||
| 88 | } | 126 | } |
| 89 | 127 | ||
| 90 | if (edev->pdata->finish) | 128 | if (edev->pdata->finish) |
| 91 | edev->pdata->finish(edev); | 129 | edev->pdata->finish(edev); |
| 92 | 130 | ||
| 93 | mutex_unlock(&edev->lock); | 131 | mutex_unlock(&edev->lock); |
| 94 | return ret ? : count; | 132 | return ret; |
| 95 | } | 133 | } |
| 96 | 134 | ||
| 97 | static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) | 135 | static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) |
| @@ -110,7 +148,13 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) | |||
| 110 | bits = 9; | 148 | bits = 9; |
| 111 | } | 149 | } |
| 112 | 150 | ||
| 113 | dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr); | 151 | if (has_quirk_instruction_length(edev)) { |
| 152 | cmd_addr <<= 2; | ||
| 153 | bits += 2; | ||
| 154 | } | ||
| 155 | |||
| 156 | dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n", | ||
| 157 | is_on ? "en" : "ds", cmd_addr, bits); | ||
| 114 | 158 | ||
| 115 | spi_message_init(&m); | 159 | spi_message_init(&m); |
| 116 | memset(&t, 0, sizeof(t)); | 160 | memset(&t, 0, sizeof(t)); |
| @@ -155,7 +199,7 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev, | |||
| 155 | bits = 10; | 199 | bits = 10; |
| 156 | data_len = 1; | 200 | data_len = 1; |
| 157 | } else { | 201 | } else { |
| 158 | cmd_addr |= off & 0x3f; | 202 | cmd_addr |= (off >> 1) & 0x3f; |
| 159 | bits = 9; | 203 | bits = 9; |
| 160 | data_len = 2; | 204 | data_len = 2; |
| 161 | } | 205 | } |
| @@ -190,7 +234,7 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj, | |||
| 190 | struct device *dev; | 234 | struct device *dev; |
| 191 | int i, ret, step = 1; | 235 | int i, ret, step = 1; |
| 192 | 236 | ||
| 193 | dev = container_of(kobj, struct device, kobj); | 237 | dev = kobj_to_dev(kobj); |
| 194 | edev = dev_get_drvdata(dev); | 238 | edev = dev_get_drvdata(dev); |
| 195 | 239 | ||
| 196 | /* only write even number of bytes on 16-bit devices */ | 240 | /* only write even number of bytes on 16-bit devices */ |
| @@ -245,6 +289,13 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) | |||
| 245 | bits = 9; | 289 | bits = 9; |
| 246 | } | 290 | } |
| 247 | 291 | ||
| 292 | if (has_quirk_instruction_length(edev)) { | ||
| 293 | cmd_addr <<= 2; | ||
| 294 | bits += 2; | ||
| 295 | } | ||
| 296 | |||
| 297 | dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits); | ||
| 298 | |||
| 248 | spi_message_init(&m); | 299 | spi_message_init(&m); |
| 249 | memset(&t, 0, sizeof(t)); | 300 | memset(&t, 0, sizeof(t)); |
| 250 | 301 | ||
| @@ -294,12 +345,100 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev, | |||
| 294 | } | 345 | } |
| 295 | static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase); | 346 | static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase); |
| 296 | 347 | ||
| 348 | static void select_assert(void *context) | ||
| 349 | { | ||
| 350 | struct eeprom_93xx46_dev *edev = context; | ||
| 351 | |||
| 352 | gpiod_set_value_cansleep(edev->pdata->select, 1); | ||
| 353 | } | ||
| 354 | |||
| 355 | static void select_deassert(void *context) | ||
| 356 | { | ||
| 357 | struct eeprom_93xx46_dev *edev = context; | ||
| 358 | |||
| 359 | gpiod_set_value_cansleep(edev->pdata->select, 0); | ||
| 360 | } | ||
| 361 | |||
| 362 | static const struct of_device_id eeprom_93xx46_of_table[] = { | ||
| 363 | { .compatible = "eeprom-93xx46", }, | ||
| 364 | { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, }, | ||
| 365 | {} | ||
| 366 | }; | ||
| 367 | MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table); | ||
| 368 | |||
| 369 | static int eeprom_93xx46_probe_dt(struct spi_device *spi) | ||
| 370 | { | ||
| 371 | const struct of_device_id *of_id = | ||
| 372 | of_match_device(eeprom_93xx46_of_table, &spi->dev); | ||
| 373 | struct device_node *np = spi->dev.of_node; | ||
| 374 | struct eeprom_93xx46_platform_data *pd; | ||
| 375 | u32 tmp; | ||
| 376 | int gpio; | ||
| 377 | enum of_gpio_flags of_flags; | ||
| 378 | int ret; | ||
| 379 | |||
| 380 | pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL); | ||
| 381 | if (!pd) | ||
| 382 | return -ENOMEM; | ||
| 383 | |||
| 384 | ret = of_property_read_u32(np, "data-size", &tmp); | ||
| 385 | if (ret < 0) { | ||
| 386 | dev_err(&spi->dev, "data-size property not found\n"); | ||
| 387 | return ret; | ||
| 388 | } | ||
| 389 | |||
| 390 | if (tmp == 8) { | ||
| 391 | pd->flags |= EE_ADDR8; | ||
| 392 | } else if (tmp == 16) { | ||
| 393 | pd->flags |= EE_ADDR16; | ||
| 394 | } else { | ||
| 395 | dev_err(&spi->dev, "invalid data-size (%d)\n", tmp); | ||
| 396 | return -EINVAL; | ||
| 397 | } | ||
| 398 | |||
| 399 | if (of_property_read_bool(np, "read-only")) | ||
| 400 | pd->flags |= EE_READONLY; | ||
| 401 | |||
| 402 | gpio = of_get_named_gpio_flags(np, "select-gpios", 0, &of_flags); | ||
| 403 | if (gpio_is_valid(gpio)) { | ||
| 404 | unsigned long flags = | ||
| 405 | of_flags == OF_GPIO_ACTIVE_LOW ? GPIOF_ACTIVE_LOW : 0; | ||
| 406 | |||
| 407 | ret = devm_gpio_request_one(&spi->dev, gpio, flags, | ||
| 408 | "eeprom_93xx46_select"); | ||
| 409 | if (ret) | ||
| 410 | return ret; | ||
| 411 | |||
| 412 | pd->select = gpio_to_desc(gpio); | ||
| 413 | pd->prepare = select_assert; | ||
| 414 | pd->finish = select_deassert; | ||
| 415 | |||
| 416 | gpiod_direction_output(pd->select, 0); | ||
| 417 | } | ||
| 418 | |||
| 419 | if (of_id->data) { | ||
| 420 | const struct eeprom_93xx46_devtype_data *data = of_id->data; | ||
| 421 | |||
| 422 | pd->quirks = data->quirks; | ||
| 423 | } | ||
| 424 | |||
| 425 | spi->dev.platform_data = pd; | ||
| 426 | |||
| 427 | return 0; | ||
| 428 | } | ||
| 429 | |||
| 297 | static int eeprom_93xx46_probe(struct spi_device *spi) | 430 | static int eeprom_93xx46_probe(struct spi_device *spi) |
| 298 | { | 431 | { |
| 299 | struct eeprom_93xx46_platform_data *pd; | 432 | struct eeprom_93xx46_platform_data *pd; |
| 300 | struct eeprom_93xx46_dev *edev; | 433 | struct eeprom_93xx46_dev *edev; |
| 301 | int err; | 434 | int err; |
| 302 | 435 | ||
| 436 | if (spi->dev.of_node) { | ||
| 437 | err = eeprom_93xx46_probe_dt(spi); | ||
| 438 | if (err < 0) | ||
| 439 | return err; | ||
| 440 | } | ||
| 441 | |||
| 303 | pd = spi->dev.platform_data; | 442 | pd = spi->dev.platform_data; |
| 304 | if (!pd) { | 443 | if (!pd) { |
| 305 | dev_err(&spi->dev, "missing platform data\n"); | 444 | dev_err(&spi->dev, "missing platform data\n"); |
| @@ -370,6 +509,7 @@ static int eeprom_93xx46_remove(struct spi_device *spi) | |||
| 370 | static struct spi_driver eeprom_93xx46_driver = { | 509 | static struct spi_driver eeprom_93xx46_driver = { |
| 371 | .driver = { | 510 | .driver = { |
| 372 | .name = "93xx46", | 511 | .name = "93xx46", |
| 512 | .of_match_table = of_match_ptr(eeprom_93xx46_of_table), | ||
| 373 | }, | 513 | }, |
| 374 | .probe = eeprom_93xx46_probe, | 514 | .probe = eeprom_93xx46_probe, |
| 375 | .remove = eeprom_93xx46_remove, | 515 | .remove = eeprom_93xx46_remove, |
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c index 6ab31eff0536..c24c9b7c1dd3 100644 --- a/drivers/misc/genwqe/card_sysfs.c +++ b/drivers/misc/genwqe/card_sysfs.c | |||
| @@ -278,7 +278,7 @@ static umode_t genwqe_is_visible(struct kobject *kobj, | |||
| 278 | struct attribute *attr, int n) | 278 | struct attribute *attr, int n) |
| 279 | { | 279 | { |
| 280 | unsigned int j; | 280 | unsigned int j; |
| 281 | struct device *dev = container_of(kobj, struct device, kobj); | 281 | struct device *dev = kobj_to_dev(kobj); |
| 282 | struct genwqe_dev *cd = dev_get_drvdata(dev); | 282 | struct genwqe_dev *cd = dev_get_drvdata(dev); |
| 283 | umode_t mode = attr->mode; | 283 | umode_t mode = attr->mode; |
| 284 | 284 | ||
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h index 5bd127727d8e..9fea49d2e15b 100644 --- a/drivers/misc/ibmasm/ibmasm.h +++ b/drivers/misc/ibmasm/ibmasm.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <linux/kref.h> | 34 | #include <linux/kref.h> |
| 35 | #include <linux/device.h> | 35 | #include <linux/device.h> |
| 36 | #include <linux/input.h> | 36 | #include <linux/input.h> |
| 37 | #include <linux/time64.h> | ||
| 37 | 38 | ||
| 38 | /* Driver identification */ | 39 | /* Driver identification */ |
| 39 | #define DRIVER_NAME "ibmasm" | 40 | #define DRIVER_NAME "ibmasm" |
| @@ -53,9 +54,11 @@ extern int ibmasm_debug; | |||
| 53 | 54 | ||
| 54 | static inline char *get_timestamp(char *buf) | 55 | static inline char *get_timestamp(char *buf) |
| 55 | { | 56 | { |
| 56 | struct timeval now; | 57 | struct timespec64 now; |
| 57 | do_gettimeofday(&now); | 58 | |
| 58 | sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec); | 59 | ktime_get_real_ts64(&now); |
| 60 | sprintf(buf, "%llu.%.08lu", (long long)now.tv_sec, | ||
| 61 | now.tv_nsec / NSEC_PER_USEC); | ||
| 59 | return buf; | 62 | return buf; |
| 60 | } | 63 | } |
| 61 | 64 | ||
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index 0c3bb7e3ee80..14b7d539fed6 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c | |||
| @@ -209,7 +209,7 @@ static int lis3lv02d_i2c_remove(struct i2c_client *client) | |||
| 209 | #ifdef CONFIG_PM_SLEEP | 209 | #ifdef CONFIG_PM_SLEEP |
| 210 | static int lis3lv02d_i2c_suspend(struct device *dev) | 210 | static int lis3lv02d_i2c_suspend(struct device *dev) |
| 211 | { | 211 | { |
| 212 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 212 | struct i2c_client *client = to_i2c_client(dev); |
| 213 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 213 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
| 214 | 214 | ||
| 215 | if (!lis3->pdata || !lis3->pdata->wakeup_flags) | 215 | if (!lis3->pdata || !lis3->pdata->wakeup_flags) |
| @@ -219,7 +219,7 @@ static int lis3lv02d_i2c_suspend(struct device *dev) | |||
| 219 | 219 | ||
| 220 | static int lis3lv02d_i2c_resume(struct device *dev) | 220 | static int lis3lv02d_i2c_resume(struct device *dev) |
| 221 | { | 221 | { |
| 222 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 222 | struct i2c_client *client = to_i2c_client(dev); |
| 223 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 223 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
| 224 | 224 | ||
| 225 | /* | 225 | /* |
| @@ -238,7 +238,7 @@ static int lis3lv02d_i2c_resume(struct device *dev) | |||
| 238 | #ifdef CONFIG_PM | 238 | #ifdef CONFIG_PM |
| 239 | static int lis3_i2c_runtime_suspend(struct device *dev) | 239 | static int lis3_i2c_runtime_suspend(struct device *dev) |
| 240 | { | 240 | { |
| 241 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 241 | struct i2c_client *client = to_i2c_client(dev); |
| 242 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 242 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
| 243 | 243 | ||
| 244 | lis3lv02d_poweroff(lis3); | 244 | lis3lv02d_poweroff(lis3); |
| @@ -247,7 +247,7 @@ static int lis3_i2c_runtime_suspend(struct device *dev) | |||
| 247 | 247 | ||
| 248 | static int lis3_i2c_runtime_resume(struct device *dev) | 248 | static int lis3_i2c_runtime_resume(struct device *dev) |
| 249 | { | 249 | { |
| 250 | struct i2c_client *client = container_of(dev, struct i2c_client, dev); | 250 | struct i2c_client *client = to_i2c_client(dev); |
| 251 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); | 251 | struct lis3lv02d *lis3 = i2c_get_clientdata(client); |
| 252 | 252 | ||
| 253 | lis3lv02d_poweron(lis3); | 253 | lis3lv02d_poweron(lis3); |
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 11fdadc68e53..5c1351b19029 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c | |||
| @@ -335,7 +335,7 @@ static noinline void corrupt_stack(void) | |||
| 335 | memset((void *)data, 0, 64); | 335 | memset((void *)data, 0, 64); |
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | static void execute_location(void *dst) | 338 | static void noinline execute_location(void *dst) |
| 339 | { | 339 | { |
| 340 | void (*func)(void) = dst; | 340 | void (*func)(void) = dst; |
| 341 | 341 | ||
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index d23384dde73b..c49e1d2269af 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config INTEL_MEI | 1 | config INTEL_MEI |
| 2 | tristate "Intel Management Engine Interface" | 2 | tristate "Intel Management Engine Interface" |
| 3 | depends on X86 && PCI && WATCHDOG_CORE | 3 | depends on X86 && PCI |
| 4 | help | 4 | help |
| 5 | The Intel Management Engine (Intel ME) provides Manageability, | 5 | The Intel Management Engine (Intel ME) provides Manageability, |
| 6 | Security and Media services for system containing Intel chipsets. | 6 | Security and Media services for system containing Intel chipsets. |
| @@ -12,7 +12,7 @@ config INTEL_MEI | |||
| 12 | config INTEL_MEI_ME | 12 | config INTEL_MEI_ME |
| 13 | tristate "ME Enabled Intel Chipsets" | 13 | tristate "ME Enabled Intel Chipsets" |
| 14 | select INTEL_MEI | 14 | select INTEL_MEI |
| 15 | depends on X86 && PCI && WATCHDOG_CORE | 15 | depends on X86 && PCI |
| 16 | help | 16 | help |
| 17 | MEI support for ME Enabled Intel chipsets. | 17 | MEI support for ME Enabled Intel chipsets. |
| 18 | 18 | ||
| @@ -37,7 +37,7 @@ config INTEL_MEI_ME | |||
| 37 | config INTEL_MEI_TXE | 37 | config INTEL_MEI_TXE |
| 38 | tristate "Intel Trusted Execution Environment with ME Interface" | 38 | tristate "Intel Trusted Execution Environment with ME Interface" |
| 39 | select INTEL_MEI | 39 | select INTEL_MEI |
| 40 | depends on X86 && PCI && WATCHDOG_CORE | 40 | depends on X86 && PCI |
| 41 | help | 41 | help |
| 42 | MEI Support for Trusted Execution Environment device on Intel SoCs | 42 | MEI Support for Trusted Execution Environment device on Intel SoCs |
| 43 | 43 | ||
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 01447ca21c26..59e6b0aede34 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile | |||
| @@ -9,7 +9,6 @@ mei-objs += interrupt.o | |||
| 9 | mei-objs += client.o | 9 | mei-objs += client.o |
| 10 | mei-objs += main.o | 10 | mei-objs += main.o |
| 11 | mei-objs += amthif.o | 11 | mei-objs += amthif.o |
| 12 | mei-objs += wd.o | ||
| 13 | mei-objs += bus.o | 12 | mei-objs += bus.o |
| 14 | mei-objs += bus-fixup.o | 13 | mei-objs += bus-fixup.o |
| 15 | mei-$(CONFIG_DEBUG_FS) += debugfs.o | 14 | mei-$(CONFIG_DEBUG_FS) += debugfs.o |
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index cd0403f09267..194360a5f782 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c | |||
| @@ -50,7 +50,6 @@ void mei_amthif_reset_params(struct mei_device *dev) | |||
| 50 | dev->iamthif_current_cb = NULL; | 50 | dev->iamthif_current_cb = NULL; |
| 51 | dev->iamthif_canceled = false; | 51 | dev->iamthif_canceled = false; |
| 52 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 52 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
| 53 | dev->iamthif_timer = 0; | ||
| 54 | dev->iamthif_stall_timer = 0; | 53 | dev->iamthif_stall_timer = 0; |
| 55 | dev->iamthif_open_count = 0; | 54 | dev->iamthif_open_count = 0; |
| 56 | } | 55 | } |
| @@ -68,11 +67,14 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl) | |||
| 68 | struct mei_cl *cl = &dev->iamthif_cl; | 67 | struct mei_cl *cl = &dev->iamthif_cl; |
| 69 | int ret; | 68 | int ret; |
| 70 | 69 | ||
| 70 | if (mei_cl_is_connected(cl)) | ||
| 71 | return 0; | ||
| 72 | |||
| 71 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 73 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
| 72 | 74 | ||
| 73 | mei_cl_init(cl, dev); | 75 | mei_cl_init(cl, dev); |
| 74 | 76 | ||
| 75 | ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID); | 77 | ret = mei_cl_link(cl); |
| 76 | if (ret < 0) { | 78 | if (ret < 0) { |
| 77 | dev_err(dev->dev, "amthif: failed cl_link %d\n", ret); | 79 | dev_err(dev->dev, "amthif: failed cl_link %d\n", ret); |
| 78 | return ret; | 80 | return ret; |
| @@ -80,32 +82,10 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl) | |||
| 80 | 82 | ||
| 81 | ret = mei_cl_connect(cl, me_cl, NULL); | 83 | ret = mei_cl_connect(cl, me_cl, NULL); |
| 82 | 84 | ||
| 83 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | ||
| 84 | |||
| 85 | return ret; | 85 | return ret; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | /** | 88 | /** |
| 89 | * mei_amthif_find_read_list_entry - finds a amthilist entry for current file | ||
| 90 | * | ||
| 91 | * @dev: the device structure | ||
| 92 | * @file: pointer to file object | ||
| 93 | * | ||
| 94 | * Return: returned a list entry on success, NULL on failure. | ||
| 95 | */ | ||
| 96 | struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, | ||
| 97 | struct file *file) | ||
| 98 | { | ||
| 99 | struct mei_cl_cb *cb; | ||
| 100 | |||
| 101 | list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list) | ||
| 102 | if (cb->file_object == file) | ||
| 103 | return cb; | ||
| 104 | return NULL; | ||
| 105 | } | ||
| 106 | |||
| 107 | |||
| 108 | /** | ||
| 109 | * mei_amthif_read - read data from AMTHIF client | 89 | * mei_amthif_read - read data from AMTHIF client |
| 110 | * | 90 | * |
| 111 | * @dev: the device structure | 91 | * @dev: the device structure |
| @@ -126,18 +106,11 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
| 126 | { | 106 | { |
| 127 | struct mei_cl *cl = file->private_data; | 107 | struct mei_cl *cl = file->private_data; |
| 128 | struct mei_cl_cb *cb; | 108 | struct mei_cl_cb *cb; |
| 129 | unsigned long timeout; | ||
| 130 | int rets; | 109 | int rets; |
| 131 | int wait_ret; | 110 | int wait_ret; |
| 132 | 111 | ||
| 133 | /* Only possible if we are in timeout */ | ||
| 134 | if (!cl) { | ||
| 135 | dev_err(dev->dev, "bad file ext.\n"); | ||
| 136 | return -ETIME; | ||
| 137 | } | ||
| 138 | |||
| 139 | dev_dbg(dev->dev, "checking amthif data\n"); | 112 | dev_dbg(dev->dev, "checking amthif data\n"); |
| 140 | cb = mei_amthif_find_read_list_entry(dev, file); | 113 | cb = mei_cl_read_cb(cl, file); |
| 141 | 114 | ||
| 142 | /* Check for if we can block or not*/ | 115 | /* Check for if we can block or not*/ |
| 143 | if (cb == NULL && file->f_flags & O_NONBLOCK) | 116 | if (cb == NULL && file->f_flags & O_NONBLOCK) |
| @@ -149,8 +122,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
| 149 | /* unlock the Mutex */ | 122 | /* unlock the Mutex */ |
| 150 | mutex_unlock(&dev->device_lock); | 123 | mutex_unlock(&dev->device_lock); |
| 151 | 124 | ||
| 152 | wait_ret = wait_event_interruptible(dev->iamthif_cl.wait, | 125 | wait_ret = wait_event_interruptible(cl->rx_wait, |
| 153 | (cb = mei_amthif_find_read_list_entry(dev, file))); | 126 | !list_empty(&cl->rd_completed) || |
| 127 | !mei_cl_is_connected(cl)); | ||
| 154 | 128 | ||
| 155 | /* Locking again the Mutex */ | 129 | /* Locking again the Mutex */ |
| 156 | mutex_lock(&dev->device_lock); | 130 | mutex_lock(&dev->device_lock); |
| @@ -158,7 +132,12 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
| 158 | if (wait_ret) | 132 | if (wait_ret) |
| 159 | return -ERESTARTSYS; | 133 | return -ERESTARTSYS; |
| 160 | 134 | ||
| 161 | dev_dbg(dev->dev, "woke up from sleep\n"); | 135 | if (!mei_cl_is_connected(cl)) { |
| 136 | rets = -EBUSY; | ||
| 137 | goto out; | ||
| 138 | } | ||
| 139 | |||
| 140 | cb = mei_cl_read_cb(cl, file); | ||
| 162 | } | 141 | } |
| 163 | 142 | ||
| 164 | if (cb->status) { | 143 | if (cb->status) { |
| @@ -168,24 +147,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
| 168 | } | 147 | } |
| 169 | 148 | ||
| 170 | dev_dbg(dev->dev, "Got amthif data\n"); | 149 | dev_dbg(dev->dev, "Got amthif data\n"); |
| 171 | dev->iamthif_timer = 0; | ||
| 172 | |||
| 173 | timeout = cb->read_time + | ||
| 174 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); | ||
| 175 | dev_dbg(dev->dev, "amthif timeout = %lud\n", | ||
| 176 | timeout); | ||
| 177 | |||
| 178 | if (time_after(jiffies, timeout)) { | ||
| 179 | dev_dbg(dev->dev, "amthif Time out\n"); | ||
| 180 | /* 15 sec for the message has expired */ | ||
| 181 | list_del_init(&cb->list); | ||
| 182 | rets = -ETIME; | ||
| 183 | goto free; | ||
| 184 | } | ||
| 185 | /* if the whole message will fit remove it from the list */ | 150 | /* if the whole message will fit remove it from the list */ |
| 186 | if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset)) | 151 | if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset)) |
| 187 | list_del_init(&cb->list); | 152 | list_del_init(&cb->list); |
| 188 | else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { | 153 | else if (cb->buf_idx <= *offset) { |
| 189 | /* end of the message has been reached */ | 154 | /* end of the message has been reached */ |
| 190 | list_del_init(&cb->list); | 155 | list_del_init(&cb->list); |
| 191 | rets = 0; | 156 | rets = 0; |
| @@ -195,9 +160,8 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, | |||
| 195 | * remove message from deletion list | 160 | * remove message from deletion list |
| 196 | */ | 161 | */ |
| 197 | 162 | ||
| 198 | dev_dbg(dev->dev, "amthif cb->buf size - %d\n", | 163 | dev_dbg(dev->dev, "amthif cb->buf.size - %zu cb->buf_idx - %zu\n", |
| 199 | cb->buf.size); | 164 | cb->buf.size, cb->buf_idx); |
| 200 | dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); | ||
| 201 | 165 | ||
| 202 | /* length is being truncated to PAGE_SIZE, however, | 166 | /* length is being truncated to PAGE_SIZE, however, |
| 203 | * the buf_idx may point beyond */ | 167 | * the buf_idx may point beyond */ |
| @@ -229,7 +193,7 @@ out: | |||
| 229 | * | 193 | * |
| 230 | * Return: 0 on success, <0 on failure. | 194 | * Return: 0 on success, <0 on failure. |
| 231 | */ | 195 | */ |
| 232 | static int mei_amthif_read_start(struct mei_cl *cl, struct file *file) | 196 | static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file) |
| 233 | { | 197 | { |
| 234 | struct mei_device *dev = cl->dev; | 198 | struct mei_device *dev = cl->dev; |
| 235 | struct mei_cl_cb *cb; | 199 | struct mei_cl_cb *cb; |
| @@ -248,7 +212,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file) | |||
| 248 | list_add_tail(&cb->list, &dev->ctrl_wr_list.list); | 212 | list_add_tail(&cb->list, &dev->ctrl_wr_list.list); |
| 249 | 213 | ||
| 250 | dev->iamthif_state = MEI_IAMTHIF_READING; | 214 | dev->iamthif_state = MEI_IAMTHIF_READING; |
| 251 | dev->iamthif_file_object = cb->file_object; | 215 | dev->iamthif_fp = cb->fp; |
| 252 | dev->iamthif_current_cb = cb; | 216 | dev->iamthif_current_cb = cb; |
| 253 | 217 | ||
| 254 | return 0; | 218 | return 0; |
| @@ -277,7 +241,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb) | |||
| 277 | 241 | ||
| 278 | dev->iamthif_state = MEI_IAMTHIF_WRITING; | 242 | dev->iamthif_state = MEI_IAMTHIF_WRITING; |
| 279 | dev->iamthif_current_cb = cb; | 243 | dev->iamthif_current_cb = cb; |
| 280 | dev->iamthif_file_object = cb->file_object; | 244 | dev->iamthif_fp = cb->fp; |
| 281 | dev->iamthif_canceled = false; | 245 | dev->iamthif_canceled = false; |
| 282 | 246 | ||
| 283 | ret = mei_cl_write(cl, cb, false); | 247 | ret = mei_cl_write(cl, cb, false); |
| @@ -285,7 +249,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb) | |||
| 285 | return ret; | 249 | return ret; |
| 286 | 250 | ||
| 287 | if (cb->completed) | 251 | if (cb->completed) |
| 288 | cb->status = mei_amthif_read_start(cl, cb->file_object); | 252 | cb->status = mei_amthif_read_start(cl, cb->fp); |
| 289 | 253 | ||
| 290 | return 0; | 254 | return 0; |
| 291 | } | 255 | } |
| @@ -304,8 +268,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev) | |||
| 304 | 268 | ||
| 305 | dev->iamthif_canceled = false; | 269 | dev->iamthif_canceled = false; |
| 306 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 270 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
| 307 | dev->iamthif_timer = 0; | 271 | dev->iamthif_fp = NULL; |
| 308 | dev->iamthif_file_object = NULL; | ||
| 309 | 272 | ||
| 310 | dev_dbg(dev->dev, "complete amthif cmd_list cb.\n"); | 273 | dev_dbg(dev->dev, "complete amthif cmd_list cb.\n"); |
| 311 | 274 | ||
| @@ -329,17 +292,17 @@ int mei_amthif_run_next_cmd(struct mei_device *dev) | |||
| 329 | int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb) | 292 | int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb) |
| 330 | { | 293 | { |
| 331 | 294 | ||
| 332 | struct mei_device *dev; | 295 | struct mei_device *dev = cl->dev; |
| 333 | |||
| 334 | if (WARN_ON(!cl || !cl->dev)) | ||
| 335 | return -ENODEV; | ||
| 336 | 296 | ||
| 337 | if (WARN_ON(!cb)) | 297 | list_add_tail(&cb->list, &dev->amthif_cmd_list.list); |
| 338 | return -EINVAL; | ||
| 339 | 298 | ||
| 340 | dev = cl->dev; | 299 | /* |
| 300 | * The previous request is still in processing, queue this one. | ||
| 301 | */ | ||
| 302 | if (dev->iamthif_state > MEI_IAMTHIF_IDLE && | ||
| 303 | dev->iamthif_state < MEI_IAMTHIF_READ_COMPLETE) | ||
| 304 | return 0; | ||
| 341 | 305 | ||
| 342 | list_add_tail(&cb->list, &dev->amthif_cmd_list.list); | ||
| 343 | return mei_amthif_run_next_cmd(dev); | 306 | return mei_amthif_run_next_cmd(dev); |
| 344 | } | 307 | } |
| 345 | 308 | ||
| @@ -360,10 +323,10 @@ unsigned int mei_amthif_poll(struct mei_device *dev, | |||
| 360 | { | 323 | { |
| 361 | unsigned int mask = 0; | 324 | unsigned int mask = 0; |
| 362 | 325 | ||
| 363 | poll_wait(file, &dev->iamthif_cl.wait, wait); | 326 | poll_wait(file, &dev->iamthif_cl.rx_wait, wait); |
| 364 | 327 | ||
| 365 | if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && | 328 | if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE && |
| 366 | dev->iamthif_file_object == file) { | 329 | dev->iamthif_fp == file) { |
| 367 | 330 | ||
| 368 | mask |= POLLIN | POLLRDNORM; | 331 | mask |= POLLIN | POLLRDNORM; |
| 369 | mei_amthif_run_next_cmd(dev); | 332 | mei_amthif_run_next_cmd(dev); |
| @@ -393,7 +356,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 393 | return ret; | 356 | return ret; |
| 394 | 357 | ||
| 395 | if (cb->completed) | 358 | if (cb->completed) |
| 396 | cb->status = mei_amthif_read_start(cl, cb->file_object); | 359 | cb->status = mei_amthif_read_start(cl, cb->fp); |
| 397 | 360 | ||
| 398 | return 0; | 361 | return 0; |
| 399 | } | 362 | } |
| @@ -437,11 +400,12 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl, | |||
| 437 | /** | 400 | /** |
| 438 | * mei_amthif_complete - complete amthif callback. | 401 | * mei_amthif_complete - complete amthif callback. |
| 439 | * | 402 | * |
| 440 | * @dev: the device structure. | 403 | * @cl: host client |
| 441 | * @cb: callback block. | 404 | * @cb: callback block. |
| 442 | */ | 405 | */ |
| 443 | void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) | 406 | void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb) |
| 444 | { | 407 | { |
| 408 | struct mei_device *dev = cl->dev; | ||
| 445 | 409 | ||
| 446 | if (cb->fop_type == MEI_FOP_WRITE) { | 410 | if (cb->fop_type == MEI_FOP_WRITE) { |
| 447 | if (!cb->status) { | 411 | if (!cb->status) { |
| @@ -453,25 +417,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb) | |||
| 453 | * in case of error enqueue the write cb to complete read list | 417 | * in case of error enqueue the write cb to complete read list |
| 454 | * so it can be propagated to the reader | 418 | * so it can be propagated to the reader |
| 455 | */ | 419 | */ |
| 456 | list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); | 420 | list_add_tail(&cb->list, &cl->rd_completed); |
| 457 | wake_up_interruptible(&dev->iamthif_cl.wait); | 421 | wake_up_interruptible(&cl->rx_wait); |
| 458 | return; | 422 | return; |
| 459 | } | 423 | } |
| 460 | 424 | ||
| 461 | if (!dev->iamthif_canceled) { | 425 | if (!dev->iamthif_canceled) { |
| 462 | dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; | 426 | dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE; |
| 463 | dev->iamthif_stall_timer = 0; | 427 | dev->iamthif_stall_timer = 0; |
| 464 | list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list); | 428 | list_add_tail(&cb->list, &cl->rd_completed); |
| 465 | dev_dbg(dev->dev, "amthif read completed\n"); | 429 | dev_dbg(dev->dev, "amthif read completed\n"); |
| 466 | dev->iamthif_timer = jiffies; | ||
| 467 | dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", | ||
| 468 | dev->iamthif_timer); | ||
| 469 | } else { | 430 | } else { |
| 470 | mei_amthif_run_next_cmd(dev); | 431 | mei_amthif_run_next_cmd(dev); |
| 471 | } | 432 | } |
| 472 | 433 | ||
| 473 | dev_dbg(dev->dev, "completing amthif call back.\n"); | 434 | dev_dbg(dev->dev, "completing amthif call back.\n"); |
| 474 | wake_up_interruptible(&dev->iamthif_cl.wait); | 435 | wake_up_interruptible(&cl->rx_wait); |
| 475 | } | 436 | } |
| 476 | 437 | ||
| 477 | /** | 438 | /** |
| @@ -497,7 +458,7 @@ static bool mei_clear_list(struct mei_device *dev, | |||
| 497 | /* list all list member */ | 458 | /* list all list member */ |
| 498 | list_for_each_entry_safe(cb, next, mei_cb_list, list) { | 459 | list_for_each_entry_safe(cb, next, mei_cb_list, list) { |
| 499 | /* check if list member associated with a file */ | 460 | /* check if list member associated with a file */ |
| 500 | if (file == cb->file_object) { | 461 | if (file == cb->fp) { |
| 501 | /* check if cb equal to current iamthif cb */ | 462 | /* check if cb equal to current iamthif cb */ |
| 502 | if (dev->iamthif_current_cb == cb) { | 463 | if (dev->iamthif_current_cb == cb) { |
| 503 | dev->iamthif_current_cb = NULL; | 464 | dev->iamthif_current_cb = NULL; |
| @@ -523,13 +484,14 @@ static bool mei_clear_list(struct mei_device *dev, | |||
| 523 | * | 484 | * |
| 524 | * Return: true if callback removed from the list, false otherwise | 485 | * Return: true if callback removed from the list, false otherwise |
| 525 | */ | 486 | */ |
| 526 | static bool mei_clear_lists(struct mei_device *dev, struct file *file) | 487 | static bool mei_clear_lists(struct mei_device *dev, const struct file *file) |
| 527 | { | 488 | { |
| 528 | bool removed = false; | 489 | bool removed = false; |
| 490 | struct mei_cl *cl = &dev->iamthif_cl; | ||
| 529 | 491 | ||
| 530 | /* remove callbacks associated with a file */ | 492 | /* remove callbacks associated with a file */ |
| 531 | mei_clear_list(dev, file, &dev->amthif_cmd_list.list); | 493 | mei_clear_list(dev, file, &dev->amthif_cmd_list.list); |
| 532 | if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list)) | 494 | if (mei_clear_list(dev, file, &cl->rd_completed)) |
| 533 | removed = true; | 495 | removed = true; |
| 534 | 496 | ||
| 535 | mei_clear_list(dev, file, &dev->ctrl_rd_list.list); | 497 | mei_clear_list(dev, file, &dev->ctrl_rd_list.list); |
| @@ -546,7 +508,7 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file) | |||
| 546 | /* check if iamthif_current_cb not NULL */ | 508 | /* check if iamthif_current_cb not NULL */ |
| 547 | if (dev->iamthif_current_cb && !removed) { | 509 | if (dev->iamthif_current_cb && !removed) { |
| 548 | /* check file and iamthif current cb association */ | 510 | /* check file and iamthif current cb association */ |
| 549 | if (dev->iamthif_current_cb->file_object == file) { | 511 | if (dev->iamthif_current_cb->fp == file) { |
| 550 | /* remove cb */ | 512 | /* remove cb */ |
| 551 | mei_io_cb_free(dev->iamthif_current_cb); | 513 | mei_io_cb_free(dev->iamthif_current_cb); |
| 552 | dev->iamthif_current_cb = NULL; | 514 | dev->iamthif_current_cb = NULL; |
| @@ -569,7 +531,7 @@ int mei_amthif_release(struct mei_device *dev, struct file *file) | |||
| 569 | if (dev->iamthif_open_count > 0) | 531 | if (dev->iamthif_open_count > 0) |
| 570 | dev->iamthif_open_count--; | 532 | dev->iamthif_open_count--; |
| 571 | 533 | ||
| 572 | if (dev->iamthif_file_object == file && | 534 | if (dev->iamthif_fp == file && |
| 573 | dev->iamthif_state != MEI_IAMTHIF_IDLE) { | 535 | dev->iamthif_state != MEI_IAMTHIF_IDLE) { |
| 574 | 536 | ||
| 575 | dev_dbg(dev->dev, "amthif canceled iamthif state %d\n", | 537 | dev_dbg(dev->dev, "amthif canceled iamthif state %d\n", |
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 020de5919c21..e9e6ea3ab73c 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c | |||
| @@ -35,6 +35,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; | |||
| 35 | #define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \ | 35 | #define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \ |
| 36 | 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) | 36 | 0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) |
| 37 | 37 | ||
| 38 | #define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \ | ||
| 39 | 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB) | ||
| 40 | |||
| 38 | #define MEI_UUID_ANY NULL_UUID_LE | 41 | #define MEI_UUID_ANY NULL_UUID_LE |
| 39 | 42 | ||
| 40 | /** | 43 | /** |
| @@ -48,8 +51,7 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; | |||
| 48 | */ | 51 | */ |
| 49 | static void number_of_connections(struct mei_cl_device *cldev) | 52 | static void number_of_connections(struct mei_cl_device *cldev) |
| 50 | { | 53 | { |
| 51 | dev_dbg(&cldev->dev, "running hook %s on %pUl\n", | 54 | dev_dbg(&cldev->dev, "running hook %s\n", __func__); |
| 52 | __func__, mei_me_cl_uuid(cldev->me_cl)); | ||
| 53 | 55 | ||
| 54 | if (cldev->me_cl->props.max_number_of_connections > 1) | 56 | if (cldev->me_cl->props.max_number_of_connections > 1) |
| 55 | cldev->do_match = 0; | 57 | cldev->do_match = 0; |
| @@ -62,11 +64,36 @@ static void number_of_connections(struct mei_cl_device *cldev) | |||
| 62 | */ | 64 | */ |
| 63 | static void blacklist(struct mei_cl_device *cldev) | 65 | static void blacklist(struct mei_cl_device *cldev) |
| 64 | { | 66 | { |
| 65 | dev_dbg(&cldev->dev, "running hook %s on %pUl\n", | 67 | dev_dbg(&cldev->dev, "running hook %s\n", __func__); |
| 66 | __func__, mei_me_cl_uuid(cldev->me_cl)); | 68 | |
| 67 | cldev->do_match = 0; | 69 | cldev->do_match = 0; |
| 68 | } | 70 | } |
| 69 | 71 | ||
| 72 | /** | ||
| 73 | * mei_wd - wd client on the bus, change protocol version | ||
| 74 | * as the API has changed. | ||
| 75 | * | ||
| 76 | * @cldev: me clients device | ||
| 77 | */ | ||
| 78 | #if IS_ENABLED(CONFIG_INTEL_MEI_ME) | ||
| 79 | #include <linux/pci.h> | ||
| 80 | #include "hw-me-regs.h" | ||
| 81 | static void mei_wd(struct mei_cl_device *cldev) | ||
| 82 | { | ||
| 83 | struct pci_dev *pdev = to_pci_dev(cldev->dev.parent); | ||
| 84 | |||
| 85 | dev_dbg(&cldev->dev, "running hook %s\n", __func__); | ||
| 86 | if (pdev->device == MEI_DEV_ID_WPT_LP || | ||
| 87 | pdev->device == MEI_DEV_ID_SPT || | ||
| 88 | pdev->device == MEI_DEV_ID_SPT_H) | ||
| 89 | cldev->me_cl->props.protocol_version = 0x2; | ||
| 90 | |||
| 91 | cldev->do_match = 1; | ||
| 92 | } | ||
| 93 | #else | ||
| 94 | static inline void mei_wd(struct mei_cl_device *cldev) {} | ||
| 95 | #endif /* CONFIG_INTEL_MEI_ME */ | ||
| 96 | |||
| 70 | struct mei_nfc_cmd { | 97 | struct mei_nfc_cmd { |
| 71 | u8 command; | 98 | u8 command; |
| 72 | u8 status; | 99 | u8 status; |
| @@ -208,12 +235,11 @@ static void mei_nfc(struct mei_cl_device *cldev) | |||
| 208 | 235 | ||
| 209 | bus = cldev->bus; | 236 | bus = cldev->bus; |
| 210 | 237 | ||
| 211 | dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n", | 238 | dev_dbg(&cldev->dev, "running hook %s\n", __func__); |
| 212 | __func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match); | ||
| 213 | 239 | ||
| 214 | mutex_lock(&bus->device_lock); | 240 | mutex_lock(&bus->device_lock); |
| 215 | /* we need to connect to INFO GUID */ | 241 | /* we need to connect to INFO GUID */ |
| 216 | cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY); | 242 | cl = mei_cl_alloc_linked(bus); |
| 217 | if (IS_ERR(cl)) { | 243 | if (IS_ERR(cl)) { |
| 218 | ret = PTR_ERR(cl); | 244 | ret = PTR_ERR(cl); |
| 219 | cl = NULL; | 245 | cl = NULL; |
| @@ -282,6 +308,7 @@ static struct mei_fixup { | |||
| 282 | MEI_FIXUP(MEI_UUID_ANY, number_of_connections), | 308 | MEI_FIXUP(MEI_UUID_ANY, number_of_connections), |
| 283 | MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist), | 309 | MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist), |
| 284 | MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), | 310 | MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), |
| 311 | MEI_FIXUP(MEI_UUID_WD, mei_wd), | ||
| 285 | }; | 312 | }; |
| 286 | 313 | ||
| 287 | /** | 314 | /** |
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 0b05aa938799..5d5996e39a67 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
| @@ -44,7 +44,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
| 44 | bool blocking) | 44 | bool blocking) |
| 45 | { | 45 | { |
| 46 | struct mei_device *bus; | 46 | struct mei_device *bus; |
| 47 | struct mei_cl_cb *cb = NULL; | 47 | struct mei_cl_cb *cb; |
| 48 | ssize_t rets; | 48 | ssize_t rets; |
| 49 | 49 | ||
| 50 | if (WARN_ON(!cl || !cl->dev)) | 50 | if (WARN_ON(!cl || !cl->dev)) |
| @@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
| 53 | bus = cl->dev; | 53 | bus = cl->dev; |
| 54 | 54 | ||
| 55 | mutex_lock(&bus->device_lock); | 55 | mutex_lock(&bus->device_lock); |
| 56 | if (bus->dev_state != MEI_DEV_ENABLED) { | ||
| 57 | rets = -ENODEV; | ||
| 58 | goto out; | ||
| 59 | } | ||
| 60 | |||
| 56 | if (!mei_cl_is_connected(cl)) { | 61 | if (!mei_cl_is_connected(cl)) { |
| 57 | rets = -ENODEV; | 62 | rets = -ENODEV; |
| 58 | goto out; | 63 | goto out; |
| @@ -81,8 +86,6 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | |||
| 81 | 86 | ||
| 82 | out: | 87 | out: |
| 83 | mutex_unlock(&bus->device_lock); | 88 | mutex_unlock(&bus->device_lock); |
| 84 | if (rets < 0) | ||
| 85 | mei_io_cb_free(cb); | ||
| 86 | 89 | ||
| 87 | return rets; | 90 | return rets; |
| 88 | } | 91 | } |
| @@ -109,6 +112,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) | |||
| 109 | bus = cl->dev; | 112 | bus = cl->dev; |
| 110 | 113 | ||
| 111 | mutex_lock(&bus->device_lock); | 114 | mutex_lock(&bus->device_lock); |
| 115 | if (bus->dev_state != MEI_DEV_ENABLED) { | ||
| 116 | rets = -ENODEV; | ||
| 117 | goto out; | ||
| 118 | } | ||
| 112 | 119 | ||
| 113 | cb = mei_cl_read_cb(cl, NULL); | 120 | cb = mei_cl_read_cb(cl, NULL); |
| 114 | if (cb) | 121 | if (cb) |
| @@ -230,45 +237,55 @@ static void mei_cl_bus_event_work(struct work_struct *work) | |||
| 230 | * mei_cl_bus_notify_event - schedule notify cb on bus client | 237 | * mei_cl_bus_notify_event - schedule notify cb on bus client |
| 231 | * | 238 | * |
| 232 | * @cl: host client | 239 | * @cl: host client |
| 240 | * | ||
| 241 | * Return: true if event was scheduled | ||
| 242 | * false if the client is not waiting for event | ||
| 233 | */ | 243 | */ |
| 234 | void mei_cl_bus_notify_event(struct mei_cl *cl) | 244 | bool mei_cl_bus_notify_event(struct mei_cl *cl) |
| 235 | { | 245 | { |
| 236 | struct mei_cl_device *cldev = cl->cldev; | 246 | struct mei_cl_device *cldev = cl->cldev; |
| 237 | 247 | ||
| 238 | if (!cldev || !cldev->event_cb) | 248 | if (!cldev || !cldev->event_cb) |
| 239 | return; | 249 | return false; |
| 240 | 250 | ||
| 241 | if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF))) | 251 | if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF))) |
| 242 | return; | 252 | return false; |
| 243 | 253 | ||
| 244 | if (!cl->notify_ev) | 254 | if (!cl->notify_ev) |
| 245 | return; | 255 | return false; |
| 246 | 256 | ||
| 247 | set_bit(MEI_CL_EVENT_NOTIF, &cldev->events); | 257 | set_bit(MEI_CL_EVENT_NOTIF, &cldev->events); |
| 248 | 258 | ||
| 249 | schedule_work(&cldev->event_work); | 259 | schedule_work(&cldev->event_work); |
| 250 | 260 | ||
| 251 | cl->notify_ev = false; | 261 | cl->notify_ev = false; |
| 262 | |||
| 263 | return true; | ||
| 252 | } | 264 | } |
| 253 | 265 | ||
| 254 | /** | 266 | /** |
| 255 | * mei_cl_bus_rx_event - schedule rx evenet | 267 | * mei_cl_bus_rx_event - schedule rx event |
| 256 | * | 268 | * |
| 257 | * @cl: host client | 269 | * @cl: host client |
| 270 | * | ||
| 271 | * Return: true if event was scheduled | ||
| 272 | * false if the client is not waiting for event | ||
| 258 | */ | 273 | */ |
| 259 | void mei_cl_bus_rx_event(struct mei_cl *cl) | 274 | bool mei_cl_bus_rx_event(struct mei_cl *cl) |
| 260 | { | 275 | { |
| 261 | struct mei_cl_device *cldev = cl->cldev; | 276 | struct mei_cl_device *cldev = cl->cldev; |
| 262 | 277 | ||
| 263 | if (!cldev || !cldev->event_cb) | 278 | if (!cldev || !cldev->event_cb) |
| 264 | return; | 279 | return false; |
| 265 | 280 | ||
| 266 | if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX))) | 281 | if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX))) |
| 267 | return; | 282 | return false; |
| 268 | 283 | ||
| 269 | set_bit(MEI_CL_EVENT_RX, &cldev->events); | 284 | set_bit(MEI_CL_EVENT_RX, &cldev->events); |
| 270 | 285 | ||
| 271 | schedule_work(&cldev->event_work); | 286 | schedule_work(&cldev->event_work); |
| 287 | |||
| 288 | return true; | ||
| 272 | } | 289 | } |
| 273 | 290 | ||
| 274 | /** | 291 | /** |
| @@ -398,7 +415,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev) | |||
| 398 | 415 | ||
| 399 | if (!cl) { | 416 | if (!cl) { |
| 400 | mutex_lock(&bus->device_lock); | 417 | mutex_lock(&bus->device_lock); |
| 401 | cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY); | 418 | cl = mei_cl_alloc_linked(bus); |
| 402 | mutex_unlock(&bus->device_lock); | 419 | mutex_unlock(&bus->device_lock); |
| 403 | if (IS_ERR(cl)) | 420 | if (IS_ERR(cl)) |
| 404 | return PTR_ERR(cl); | 421 | return PTR_ERR(cl); |
| @@ -958,6 +975,22 @@ void mei_cl_bus_rescan(struct mei_device *bus) | |||
| 958 | dev_dbg(bus->dev, "rescan end"); | 975 | dev_dbg(bus->dev, "rescan end"); |
| 959 | } | 976 | } |
| 960 | 977 | ||
| 978 | void mei_cl_bus_rescan_work(struct work_struct *work) | ||
| 979 | { | ||
| 980 | struct mei_device *bus = | ||
| 981 | container_of(work, struct mei_device, bus_rescan_work); | ||
| 982 | struct mei_me_client *me_cl; | ||
| 983 | |||
| 984 | mutex_lock(&bus->device_lock); | ||
| 985 | me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid); | ||
| 986 | if (me_cl) | ||
| 987 | mei_amthif_host_init(bus, me_cl); | ||
| 988 | mei_me_cl_put(me_cl); | ||
| 989 | mutex_unlock(&bus->device_lock); | ||
| 990 | |||
| 991 | mei_cl_bus_rescan(bus); | ||
| 992 | } | ||
| 993 | |||
| 961 | int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, | 994 | int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, |
| 962 | struct module *owner) | 995 | struct module *owner) |
| 963 | { | 996 | { |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index a6c87c713193..bab17e4197b6 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
| @@ -359,7 +359,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb) | |||
| 359 | * Return: mei_cl_cb pointer or NULL; | 359 | * Return: mei_cl_cb pointer or NULL; |
| 360 | */ | 360 | */ |
| 361 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, | 361 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, |
| 362 | struct file *fp) | 362 | const struct file *fp) |
| 363 | { | 363 | { |
| 364 | struct mei_cl_cb *cb; | 364 | struct mei_cl_cb *cb; |
| 365 | 365 | ||
| @@ -368,7 +368,7 @@ struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, | |||
| 368 | return NULL; | 368 | return NULL; |
| 369 | 369 | ||
| 370 | INIT_LIST_HEAD(&cb->list); | 370 | INIT_LIST_HEAD(&cb->list); |
| 371 | cb->file_object = fp; | 371 | cb->fp = fp; |
| 372 | cb->cl = cl; | 372 | cb->cl = cl; |
| 373 | cb->buf_idx = 0; | 373 | cb->buf_idx = 0; |
| 374 | cb->fop_type = type; | 374 | cb->fop_type = type; |
| @@ -455,7 +455,8 @@ int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length) | |||
| 455 | * Return: cb on success and NULL on failure | 455 | * Return: cb on success and NULL on failure |
| 456 | */ | 456 | */ |
| 457 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, | 457 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, |
| 458 | enum mei_cb_file_ops type, struct file *fp) | 458 | enum mei_cb_file_ops type, |
| 459 | const struct file *fp) | ||
| 459 | { | 460 | { |
| 460 | struct mei_cl_cb *cb; | 461 | struct mei_cl_cb *cb; |
| 461 | 462 | ||
| @@ -485,7 +486,7 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) | |||
| 485 | struct mei_cl_cb *cb; | 486 | struct mei_cl_cb *cb; |
| 486 | 487 | ||
| 487 | list_for_each_entry(cb, &cl->rd_completed, list) | 488 | list_for_each_entry(cb, &cl->rd_completed, list) |
| 488 | if (!fp || fp == cb->file_object) | 489 | if (!fp || fp == cb->fp) |
| 489 | return cb; | 490 | return cb; |
| 490 | 491 | ||
| 491 | return NULL; | 492 | return NULL; |
| @@ -503,12 +504,12 @@ void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp) | |||
| 503 | struct mei_cl_cb *cb, *next; | 504 | struct mei_cl_cb *cb, *next; |
| 504 | 505 | ||
| 505 | list_for_each_entry_safe(cb, next, &cl->rd_completed, list) | 506 | list_for_each_entry_safe(cb, next, &cl->rd_completed, list) |
| 506 | if (!fp || fp == cb->file_object) | 507 | if (!fp || fp == cb->fp) |
| 507 | mei_io_cb_free(cb); | 508 | mei_io_cb_free(cb); |
| 508 | 509 | ||
| 509 | 510 | ||
| 510 | list_for_each_entry_safe(cb, next, &cl->rd_pending, list) | 511 | list_for_each_entry_safe(cb, next, &cl->rd_pending, list) |
| 511 | if (!fp || fp == cb->file_object) | 512 | if (!fp || fp == cb->fp) |
| 512 | mei_io_cb_free(cb); | 513 | mei_io_cb_free(cb); |
| 513 | } | 514 | } |
| 514 | 515 | ||
| @@ -535,7 +536,6 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) | |||
| 535 | mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); | 536 | mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); |
| 536 | mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); | 537 | mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); |
| 537 | mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); | 538 | mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); |
| 538 | mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); | ||
| 539 | 539 | ||
| 540 | mei_cl_read_cb_flush(cl, fp); | 540 | mei_cl_read_cb_flush(cl, fp); |
| 541 | 541 | ||
| @@ -587,27 +587,23 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev) | |||
| 587 | * mei_cl_link - allocate host id in the host map | 587 | * mei_cl_link - allocate host id in the host map |
| 588 | * | 588 | * |
| 589 | * @cl: host client | 589 | * @cl: host client |
| 590 | * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one | ||
| 591 | * | 590 | * |
| 592 | * Return: 0 on success | 591 | * Return: 0 on success |
| 593 | * -EINVAL on incorrect values | 592 | * -EINVAL on incorrect values |
| 594 | * -EMFILE if open count exceeded. | 593 | * -EMFILE if open count exceeded. |
| 595 | */ | 594 | */ |
| 596 | int mei_cl_link(struct mei_cl *cl, int id) | 595 | int mei_cl_link(struct mei_cl *cl) |
| 597 | { | 596 | { |
| 598 | struct mei_device *dev; | 597 | struct mei_device *dev; |
| 599 | long open_handle_count; | 598 | long open_handle_count; |
| 599 | int id; | ||
| 600 | 600 | ||
| 601 | if (WARN_ON(!cl || !cl->dev)) | 601 | if (WARN_ON(!cl || !cl->dev)) |
| 602 | return -EINVAL; | 602 | return -EINVAL; |
| 603 | 603 | ||
| 604 | dev = cl->dev; | 604 | dev = cl->dev; |
| 605 | 605 | ||
| 606 | /* If Id is not assigned get one*/ | 606 | id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); |
| 607 | if (id == MEI_HOST_CLIENT_ID_ANY) | ||
| 608 | id = find_first_zero_bit(dev->host_clients_map, | ||
| 609 | MEI_CLIENTS_MAX); | ||
| 610 | |||
| 611 | if (id >= MEI_CLIENTS_MAX) { | 607 | if (id >= MEI_CLIENTS_MAX) { |
| 612 | dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); | 608 | dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); |
| 613 | return -EMFILE; | 609 | return -EMFILE; |
| @@ -648,7 +644,7 @@ int mei_cl_unlink(struct mei_cl *cl) | |||
| 648 | if (!cl) | 644 | if (!cl) |
| 649 | return 0; | 645 | return 0; |
| 650 | 646 | ||
| 651 | /* wd and amthif might not be initialized */ | 647 | /* amthif might not be initialized */ |
| 652 | if (!cl->dev) | 648 | if (!cl->dev) |
| 653 | return 0; | 649 | return 0; |
| 654 | 650 | ||
| @@ -670,31 +666,12 @@ int mei_cl_unlink(struct mei_cl *cl) | |||
| 670 | return 0; | 666 | return 0; |
| 671 | } | 667 | } |
| 672 | 668 | ||
| 673 | 669 | void mei_host_client_init(struct mei_device *dev) | |
| 674 | void mei_host_client_init(struct work_struct *work) | ||
| 675 | { | 670 | { |
| 676 | struct mei_device *dev = | ||
| 677 | container_of(work, struct mei_device, init_work); | ||
| 678 | struct mei_me_client *me_cl; | ||
| 679 | |||
| 680 | mutex_lock(&dev->device_lock); | ||
| 681 | |||
| 682 | |||
| 683 | me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid); | ||
| 684 | if (me_cl) | ||
| 685 | mei_amthif_host_init(dev, me_cl); | ||
| 686 | mei_me_cl_put(me_cl); | ||
| 687 | |||
| 688 | me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid); | ||
| 689 | if (me_cl) | ||
| 690 | mei_wd_host_init(dev, me_cl); | ||
| 691 | mei_me_cl_put(me_cl); | ||
| 692 | |||
| 693 | dev->dev_state = MEI_DEV_ENABLED; | 671 | dev->dev_state = MEI_DEV_ENABLED; |
| 694 | dev->reset_count = 0; | 672 | dev->reset_count = 0; |
| 695 | mutex_unlock(&dev->device_lock); | ||
| 696 | 673 | ||
| 697 | mei_cl_bus_rescan(dev); | 674 | schedule_work(&dev->bus_rescan_work); |
| 698 | 675 | ||
| 699 | pm_runtime_mark_last_busy(dev->dev); | 676 | pm_runtime_mark_last_busy(dev->dev); |
| 700 | dev_dbg(dev->dev, "rpm: autosuspend\n"); | 677 | dev_dbg(dev->dev, "rpm: autosuspend\n"); |
| @@ -726,6 +703,33 @@ bool mei_hbuf_acquire(struct mei_device *dev) | |||
| 726 | } | 703 | } |
| 727 | 704 | ||
| 728 | /** | 705 | /** |
| 706 | * mei_cl_wake_all - wake up readers, writers and event waiters so | ||
| 707 | * they can be interrupted | ||
| 708 | * | ||
| 709 | * @cl: host client | ||
| 710 | */ | ||
| 711 | static void mei_cl_wake_all(struct mei_cl *cl) | ||
| 712 | { | ||
| 713 | struct mei_device *dev = cl->dev; | ||
| 714 | |||
| 715 | /* synchronized under device mutex */ | ||
| 716 | if (waitqueue_active(&cl->rx_wait)) { | ||
| 717 | cl_dbg(dev, cl, "Waking up reading client!\n"); | ||
| 718 | wake_up_interruptible(&cl->rx_wait); | ||
| 719 | } | ||
| 720 | /* synchronized under device mutex */ | ||
| 721 | if (waitqueue_active(&cl->tx_wait)) { | ||
| 722 | cl_dbg(dev, cl, "Waking up writing client!\n"); | ||
| 723 | wake_up_interruptible(&cl->tx_wait); | ||
| 724 | } | ||
| 725 | /* synchronized under device mutex */ | ||
| 726 | if (waitqueue_active(&cl->ev_wait)) { | ||
| 727 | cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); | ||
| 728 | wake_up_interruptible(&cl->ev_wait); | ||
| 729 | } | ||
| 730 | } | ||
| 731 | |||
| 732 | /** | ||
| 729 | * mei_cl_set_disconnected - set disconnected state and clear | 733 | * mei_cl_set_disconnected - set disconnected state and clear |
| 730 | * associated states and resources | 734 | * associated states and resources |
| 731 | * | 735 | * |
| @@ -740,8 +744,11 @@ void mei_cl_set_disconnected(struct mei_cl *cl) | |||
| 740 | return; | 744 | return; |
| 741 | 745 | ||
| 742 | cl->state = MEI_FILE_DISCONNECTED; | 746 | cl->state = MEI_FILE_DISCONNECTED; |
| 747 | mei_io_list_free(&dev->write_list, cl); | ||
| 748 | mei_io_list_free(&dev->write_waiting_list, cl); | ||
| 743 | mei_io_list_flush(&dev->ctrl_rd_list, cl); | 749 | mei_io_list_flush(&dev->ctrl_rd_list, cl); |
| 744 | mei_io_list_flush(&dev->ctrl_wr_list, cl); | 750 | mei_io_list_flush(&dev->ctrl_wr_list, cl); |
| 751 | mei_cl_wake_all(cl); | ||
| 745 | cl->mei_flow_ctrl_creds = 0; | 752 | cl->mei_flow_ctrl_creds = 0; |
| 746 | cl->timer_count = 0; | 753 | cl->timer_count = 0; |
| 747 | 754 | ||
| @@ -1034,7 +1041,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 1034 | * Return: 0 on success, <0 on failure. | 1041 | * Return: 0 on success, <0 on failure. |
| 1035 | */ | 1042 | */ |
| 1036 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, | 1043 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, |
| 1037 | struct file *file) | 1044 | const struct file *file) |
| 1038 | { | 1045 | { |
| 1039 | struct mei_device *dev; | 1046 | struct mei_device *dev; |
| 1040 | struct mei_cl_cb *cb; | 1047 | struct mei_cl_cb *cb; |
| @@ -1119,11 +1126,10 @@ nortpm: | |||
| 1119 | * mei_cl_alloc_linked - allocate and link host client | 1126 | * mei_cl_alloc_linked - allocate and link host client |
| 1120 | * | 1127 | * |
| 1121 | * @dev: the device structure | 1128 | * @dev: the device structure |
| 1122 | * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one | ||
| 1123 | * | 1129 | * |
| 1124 | * Return: cl on success ERR_PTR on failure | 1130 | * Return: cl on success ERR_PTR on failure |
| 1125 | */ | 1131 | */ |
| 1126 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id) | 1132 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) |
| 1127 | { | 1133 | { |
| 1128 | struct mei_cl *cl; | 1134 | struct mei_cl *cl; |
| 1129 | int ret; | 1135 | int ret; |
| @@ -1134,7 +1140,7 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id) | |||
| 1134 | goto err; | 1140 | goto err; |
| 1135 | } | 1141 | } |
| 1136 | 1142 | ||
| 1137 | ret = mei_cl_link(cl, id); | 1143 | ret = mei_cl_link(cl); |
| 1138 | if (ret) | 1144 | if (ret) |
| 1139 | goto err; | 1145 | goto err; |
| 1140 | 1146 | ||
| @@ -1149,11 +1155,12 @@ err: | |||
| 1149 | /** | 1155 | /** |
| 1150 | * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. | 1156 | * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. |
| 1151 | * | 1157 | * |
| 1152 | * @cl: private data of the file object | 1158 | * @cl: host client |
| 1159 | * @fp: the file pointer associated with the pointer | ||
| 1153 | * | 1160 | * |
| 1154 | * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. | 1161 | * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. |
| 1155 | */ | 1162 | */ |
| 1156 | int mei_cl_flow_ctrl_creds(struct mei_cl *cl) | 1163 | static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp) |
| 1157 | { | 1164 | { |
| 1158 | int rets; | 1165 | int rets; |
| 1159 | 1166 | ||
| @@ -1164,7 +1171,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) | |||
| 1164 | return 1; | 1171 | return 1; |
| 1165 | 1172 | ||
| 1166 | if (mei_cl_is_fixed_address(cl)) { | 1173 | if (mei_cl_is_fixed_address(cl)) { |
| 1167 | rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL); | 1174 | rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp); |
| 1168 | if (rets && rets != -EBUSY) | 1175 | if (rets && rets != -EBUSY) |
| 1169 | return rets; | 1176 | return rets; |
| 1170 | return 1; | 1177 | return 1; |
| @@ -1186,7 +1193,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl) | |||
| 1186 | * 0 on success | 1193 | * 0 on success |
| 1187 | * -EINVAL when ctrl credits are <= 0 | 1194 | * -EINVAL when ctrl credits are <= 0 |
| 1188 | */ | 1195 | */ |
| 1189 | int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) | 1196 | static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) |
| 1190 | { | 1197 | { |
| 1191 | if (WARN_ON(!cl || !cl->me_cl)) | 1198 | if (WARN_ON(!cl || !cl->me_cl)) |
| 1192 | return -EINVAL; | 1199 | return -EINVAL; |
| @@ -1283,7 +1290,8 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 1283 | * | 1290 | * |
| 1284 | * Return: 0 on such and error otherwise. | 1291 | * Return: 0 on such and error otherwise. |
| 1285 | */ | 1292 | */ |
| 1286 | int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request) | 1293 | int mei_cl_notify_request(struct mei_cl *cl, |
| 1294 | const struct file *file, u8 request) | ||
| 1287 | { | 1295 | { |
| 1288 | struct mei_device *dev; | 1296 | struct mei_device *dev; |
| 1289 | struct mei_cl_cb *cb; | 1297 | struct mei_cl_cb *cb; |
| @@ -1368,12 +1376,12 @@ void mei_cl_notify(struct mei_cl *cl) | |||
| 1368 | 1376 | ||
| 1369 | cl_dbg(dev, cl, "notify event"); | 1377 | cl_dbg(dev, cl, "notify event"); |
| 1370 | cl->notify_ev = true; | 1378 | cl->notify_ev = true; |
| 1371 | wake_up_interruptible_all(&cl->ev_wait); | 1379 | if (!mei_cl_bus_notify_event(cl)) |
| 1380 | wake_up_interruptible(&cl->ev_wait); | ||
| 1372 | 1381 | ||
| 1373 | if (cl->ev_async) | 1382 | if (cl->ev_async) |
| 1374 | kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); | 1383 | kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); |
| 1375 | 1384 | ||
| 1376 | mei_cl_bus_notify_event(cl); | ||
| 1377 | } | 1385 | } |
| 1378 | 1386 | ||
| 1379 | /** | 1387 | /** |
| @@ -1422,6 +1430,25 @@ out: | |||
| 1422 | } | 1430 | } |
| 1423 | 1431 | ||
| 1424 | /** | 1432 | /** |
| 1433 | * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control | ||
| 1434 | * for given host client | ||
| 1435 | * | ||
| 1436 | * @cl: host client | ||
| 1437 | * | ||
| 1438 | * Return: true, if found at least one cb. | ||
| 1439 | */ | ||
| 1440 | static bool mei_cl_is_read_fc_cb(struct mei_cl *cl) | ||
| 1441 | { | ||
| 1442 | struct mei_device *dev = cl->dev; | ||
| 1443 | struct mei_cl_cb *cb; | ||
| 1444 | |||
| 1445 | list_for_each_entry(cb, &dev->ctrl_wr_list.list, list) | ||
| 1446 | if (cb->fop_type == MEI_FOP_READ && cb->cl == cl) | ||
| 1447 | return true; | ||
| 1448 | return false; | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | /** | ||
| 1425 | * mei_cl_read_start - the start read client message function. | 1452 | * mei_cl_read_start - the start read client message function. |
| 1426 | * | 1453 | * |
| 1427 | * @cl: host client | 1454 | * @cl: host client |
| @@ -1430,7 +1457,7 @@ out: | |||
| 1430 | * | 1457 | * |
| 1431 | * Return: 0 on success, <0 on failure. | 1458 | * Return: 0 on success, <0 on failure. |
| 1432 | */ | 1459 | */ |
| 1433 | int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp) | 1460 | int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) |
| 1434 | { | 1461 | { |
| 1435 | struct mei_device *dev; | 1462 | struct mei_device *dev; |
| 1436 | struct mei_cl_cb *cb; | 1463 | struct mei_cl_cb *cb; |
| @@ -1445,7 +1472,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp) | |||
| 1445 | return -ENODEV; | 1472 | return -ENODEV; |
| 1446 | 1473 | ||
| 1447 | /* HW currently supports only one pending read */ | 1474 | /* HW currently supports only one pending read */ |
| 1448 | if (!list_empty(&cl->rd_pending)) | 1475 | if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl)) |
| 1449 | return -EBUSY; | 1476 | return -EBUSY; |
| 1450 | 1477 | ||
| 1451 | if (!mei_me_cl_is_active(cl->me_cl)) { | 1478 | if (!mei_me_cl_is_active(cl->me_cl)) { |
| @@ -1524,7 +1551,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 1524 | 1551 | ||
| 1525 | first_chunk = cb->buf_idx == 0; | 1552 | first_chunk = cb->buf_idx == 0; |
| 1526 | 1553 | ||
| 1527 | rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1; | 1554 | rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1; |
| 1528 | if (rets < 0) | 1555 | if (rets < 0) |
| 1529 | return rets; | 1556 | return rets; |
| 1530 | 1557 | ||
| @@ -1556,7 +1583,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 1556 | return 0; | 1583 | return 0; |
| 1557 | } | 1584 | } |
| 1558 | 1585 | ||
| 1559 | cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", | 1586 | cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", |
| 1560 | cb->buf.size, cb->buf_idx); | 1587 | cb->buf.size, cb->buf_idx); |
| 1561 | 1588 | ||
| 1562 | rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); | 1589 | rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); |
| @@ -1618,7 +1645,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | |||
| 1618 | if (rets < 0 && rets != -EINPROGRESS) { | 1645 | if (rets < 0 && rets != -EINPROGRESS) { |
| 1619 | pm_runtime_put_noidle(dev->dev); | 1646 | pm_runtime_put_noidle(dev->dev); |
| 1620 | cl_err(dev, cl, "rpm: get failed %d\n", rets); | 1647 | cl_err(dev, cl, "rpm: get failed %d\n", rets); |
| 1621 | return rets; | 1648 | goto free; |
| 1622 | } | 1649 | } |
| 1623 | 1650 | ||
| 1624 | cb->buf_idx = 0; | 1651 | cb->buf_idx = 0; |
| @@ -1630,7 +1657,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) | |||
| 1630 | mei_hdr.msg_complete = 0; | 1657 | mei_hdr.msg_complete = 0; |
| 1631 | mei_hdr.internal = cb->internal; | 1658 | mei_hdr.internal = cb->internal; |
| 1632 | 1659 | ||
| 1633 | rets = mei_cl_flow_ctrl_creds(cl); | 1660 | rets = mei_cl_flow_ctrl_creds(cl, cb->fp); |
| 1634 | if (rets < 0) | 1661 | if (rets < 0) |
| 1635 | goto err; | 1662 | goto err; |
| 1636 | 1663 | ||
| @@ -1677,7 +1704,8 @@ out: | |||
| 1677 | 1704 | ||
| 1678 | mutex_unlock(&dev->device_lock); | 1705 | mutex_unlock(&dev->device_lock); |
| 1679 | rets = wait_event_interruptible(cl->tx_wait, | 1706 | rets = wait_event_interruptible(cl->tx_wait, |
| 1680 | cl->writing_state == MEI_WRITE_COMPLETE); | 1707 | cl->writing_state == MEI_WRITE_COMPLETE || |
| 1708 | (!mei_cl_is_connected(cl))); | ||
| 1681 | mutex_lock(&dev->device_lock); | 1709 | mutex_lock(&dev->device_lock); |
| 1682 | /* wait_event_interruptible returns -ERESTARTSYS */ | 1710 | /* wait_event_interruptible returns -ERESTARTSYS */ |
| 1683 | if (rets) { | 1711 | if (rets) { |
| @@ -1685,6 +1713,10 @@ out: | |||
| 1685 | rets = -EINTR; | 1713 | rets = -EINTR; |
| 1686 | goto err; | 1714 | goto err; |
| 1687 | } | 1715 | } |
| 1716 | if (cl->writing_state != MEI_WRITE_COMPLETE) { | ||
| 1717 | rets = -EFAULT; | ||
| 1718 | goto err; | ||
| 1719 | } | ||
| 1688 | } | 1720 | } |
| 1689 | 1721 | ||
| 1690 | rets = size; | 1722 | rets = size; |
| @@ -1692,6 +1724,8 @@ err: | |||
| 1692 | cl_dbg(dev, cl, "rpm: autosuspend\n"); | 1724 | cl_dbg(dev, cl, "rpm: autosuspend\n"); |
| 1693 | pm_runtime_mark_last_busy(dev->dev); | 1725 | pm_runtime_mark_last_busy(dev->dev); |
| 1694 | pm_runtime_put_autosuspend(dev->dev); | 1726 | pm_runtime_put_autosuspend(dev->dev); |
| 1727 | free: | ||
| 1728 | mei_io_cb_free(cb); | ||
| 1695 | 1729 | ||
| 1696 | return rets; | 1730 | return rets; |
| 1697 | } | 1731 | } |
| @@ -1721,10 +1755,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) | |||
| 1721 | 1755 | ||
| 1722 | case MEI_FOP_READ: | 1756 | case MEI_FOP_READ: |
| 1723 | list_add_tail(&cb->list, &cl->rd_completed); | 1757 | list_add_tail(&cb->list, &cl->rd_completed); |
| 1724 | if (waitqueue_active(&cl->rx_wait)) | 1758 | if (!mei_cl_bus_rx_event(cl)) |
| 1725 | wake_up_interruptible_all(&cl->rx_wait); | 1759 | wake_up_interruptible(&cl->rx_wait); |
| 1726 | else | ||
| 1727 | mei_cl_bus_rx_event(cl); | ||
| 1728 | break; | 1760 | break; |
| 1729 | 1761 | ||
| 1730 | case MEI_FOP_CONNECT: | 1762 | case MEI_FOP_CONNECT: |
| @@ -1753,44 +1785,3 @@ void mei_cl_all_disconnect(struct mei_device *dev) | |||
| 1753 | list_for_each_entry(cl, &dev->file_list, link) | 1785 | list_for_each_entry(cl, &dev->file_list, link) |
| 1754 | mei_cl_set_disconnected(cl); | 1786 | mei_cl_set_disconnected(cl); |
| 1755 | } | 1787 | } |
| 1756 | |||
| 1757 | |||
| 1758 | /** | ||
| 1759 | * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted | ||
| 1760 | * | ||
| 1761 | * @dev: mei device | ||
| 1762 | */ | ||
| 1763 | void mei_cl_all_wakeup(struct mei_device *dev) | ||
| 1764 | { | ||
| 1765 | struct mei_cl *cl; | ||
| 1766 | |||
| 1767 | list_for_each_entry(cl, &dev->file_list, link) { | ||
| 1768 | if (waitqueue_active(&cl->rx_wait)) { | ||
| 1769 | cl_dbg(dev, cl, "Waking up reading client!\n"); | ||
| 1770 | wake_up_interruptible(&cl->rx_wait); | ||
| 1771 | } | ||
| 1772 | if (waitqueue_active(&cl->tx_wait)) { | ||
| 1773 | cl_dbg(dev, cl, "Waking up writing client!\n"); | ||
| 1774 | wake_up_interruptible(&cl->tx_wait); | ||
| 1775 | } | ||
| 1776 | |||
| 1777 | /* synchronized under device mutex */ | ||
| 1778 | if (waitqueue_active(&cl->ev_wait)) { | ||
| 1779 | cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); | ||
| 1780 | wake_up_interruptible(&cl->ev_wait); | ||
| 1781 | } | ||
| 1782 | } | ||
| 1783 | } | ||
| 1784 | |||
| 1785 | /** | ||
| 1786 | * mei_cl_all_write_clear - clear all pending writes | ||
| 1787 | * | ||
| 1788 | * @dev: mei device | ||
| 1789 | */ | ||
| 1790 | void mei_cl_all_write_clear(struct mei_device *dev) | ||
| 1791 | { | ||
| 1792 | mei_io_list_free(&dev->write_list, NULL); | ||
| 1793 | mei_io_list_free(&dev->write_waiting_list, NULL); | ||
| 1794 | } | ||
| 1795 | |||
| 1796 | |||
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h index 04e1aa39243f..0d7a3a1fef78 100644 --- a/drivers/misc/mei/client.h +++ b/drivers/misc/mei/client.h | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | #define _MEI_CLIENT_H_ | 18 | #define _MEI_CLIENT_H_ |
| 19 | 19 | ||
| 20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
| 21 | #include <linux/watchdog.h> | ||
| 22 | #include <linux/poll.h> | 21 | #include <linux/poll.h> |
| 23 | #include <linux/mei.h> | 22 | #include <linux/mei.h> |
| 24 | 23 | ||
| @@ -84,7 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl) | |||
| 84 | * MEI IO Functions | 83 | * MEI IO Functions |
| 85 | */ | 84 | */ |
| 86 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, | 85 | struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, |
| 87 | struct file *fp); | 86 | const struct file *fp); |
| 88 | void mei_io_cb_free(struct mei_cl_cb *priv_cb); | 87 | void mei_io_cb_free(struct mei_cl_cb *priv_cb); |
| 89 | int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length); | 88 | int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length); |
| 90 | 89 | ||
| @@ -108,21 +107,19 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev); | |||
| 108 | void mei_cl_init(struct mei_cl *cl, struct mei_device *dev); | 107 | void mei_cl_init(struct mei_cl *cl, struct mei_device *dev); |
| 109 | 108 | ||
| 110 | 109 | ||
| 111 | int mei_cl_link(struct mei_cl *cl, int id); | 110 | int mei_cl_link(struct mei_cl *cl); |
| 112 | int mei_cl_unlink(struct mei_cl *cl); | 111 | int mei_cl_unlink(struct mei_cl *cl); |
| 113 | 112 | ||
| 114 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id); | 113 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev); |
| 115 | 114 | ||
| 116 | struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, | 115 | struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, |
| 117 | const struct file *fp); | 116 | const struct file *fp); |
| 118 | void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp); | 117 | void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp); |
| 119 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, | 118 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, |
| 120 | enum mei_cb_file_ops type, struct file *fp); | 119 | enum mei_cb_file_ops type, |
| 120 | const struct file *fp); | ||
| 121 | int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); | 121 | int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp); |
| 122 | 122 | ||
| 123 | int mei_cl_flow_ctrl_creds(struct mei_cl *cl); | ||
| 124 | |||
| 125 | int mei_cl_flow_ctrl_reduce(struct mei_cl *cl); | ||
| 126 | /* | 123 | /* |
| 127 | * MEI input output function prototype | 124 | * MEI input output function prototype |
| 128 | */ | 125 | */ |
| @@ -217,10 +214,10 @@ void mei_cl_set_disconnected(struct mei_cl *cl); | |||
| 217 | int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, | 214 | int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, |
| 218 | struct mei_cl_cb *cmpl_list); | 215 | struct mei_cl_cb *cmpl_list); |
| 219 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, | 216 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, |
| 220 | struct file *file); | 217 | const struct file *file); |
| 221 | int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, | 218 | int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, |
| 222 | struct mei_cl_cb *cmpl_list); | 219 | struct mei_cl_cb *cmpl_list); |
| 223 | int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp); | 220 | int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); |
| 224 | int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr, | 221 | int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr, |
| 225 | struct mei_cl_cb *cmpl_list); | 222 | struct mei_cl_cb *cmpl_list); |
| 226 | int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); | 223 | int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking); |
| @@ -229,19 +226,18 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 229 | 226 | ||
| 230 | void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); | 227 | void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb); |
| 231 | 228 | ||
| 232 | void mei_host_client_init(struct work_struct *work); | 229 | void mei_host_client_init(struct mei_device *dev); |
| 233 | 230 | ||
| 234 | u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop); | 231 | u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop); |
| 235 | enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request); | 232 | enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request); |
| 236 | int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request); | 233 | int mei_cl_notify_request(struct mei_cl *cl, |
| 234 | const struct file *file, u8 request); | ||
| 237 | int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, | 235 | int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, |
| 238 | struct mei_cl_cb *cmpl_list); | 236 | struct mei_cl_cb *cmpl_list); |
| 239 | int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev); | 237 | int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev); |
| 240 | void mei_cl_notify(struct mei_cl *cl); | 238 | void mei_cl_notify(struct mei_cl *cl); |
| 241 | 239 | ||
| 242 | void mei_cl_all_disconnect(struct mei_device *dev); | 240 | void mei_cl_all_disconnect(struct mei_device *dev); |
| 243 | void mei_cl_all_wakeup(struct mei_device *dev); | ||
| 244 | void mei_cl_all_write_clear(struct mei_device *dev); | ||
| 245 | 241 | ||
| 246 | #define MEI_CL_FMT "cl:host=%02d me=%02d " | 242 | #define MEI_CL_FMT "cl:host=%02d me=%02d " |
| 247 | #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl) | 243 | #define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl) |
| @@ -249,6 +245,9 @@ void mei_cl_all_write_clear(struct mei_device *dev); | |||
| 249 | #define cl_dbg(dev, cl, format, arg...) \ | 245 | #define cl_dbg(dev, cl, format, arg...) \ |
| 250 | dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) | 246 | dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) |
| 251 | 247 | ||
| 248 | #define cl_warn(dev, cl, format, arg...) \ | ||
| 249 | dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) | ||
| 250 | |||
| 252 | #define cl_err(dev, cl, format, arg...) \ | 251 | #define cl_err(dev, cl, format, arg...) \ |
| 253 | dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) | 252 | dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg) |
| 254 | 253 | ||
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index a138d8a27ab5..c6c051b52f55 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c | |||
| @@ -50,6 +50,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf, | |||
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | pos += scnprintf(buf + pos, bufsz - pos, HDR); | 52 | pos += scnprintf(buf + pos, bufsz - pos, HDR); |
| 53 | #undef HDR | ||
| 53 | 54 | ||
| 54 | /* if the driver is not enabled the list won't be consistent */ | 55 | /* if the driver is not enabled the list won't be consistent */ |
| 55 | if (dev->dev_state != MEI_DEV_ENABLED) | 56 | if (dev->dev_state != MEI_DEV_ENABLED) |
| @@ -90,23 +91,37 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, | |||
| 90 | { | 91 | { |
| 91 | struct mei_device *dev = fp->private_data; | 92 | struct mei_device *dev = fp->private_data; |
| 92 | struct mei_cl *cl; | 93 | struct mei_cl *cl; |
| 93 | const size_t bufsz = 1024; | 94 | size_t bufsz = 1; |
| 94 | char *buf; | 95 | char *buf; |
| 95 | int i = 0; | 96 | int i = 0; |
| 96 | int pos = 0; | 97 | int pos = 0; |
| 97 | int ret; | 98 | int ret; |
| 98 | 99 | ||
| 100 | #define HDR " |me|host|state|rd|wr|\n" | ||
| 101 | |||
| 99 | if (!dev) | 102 | if (!dev) |
| 100 | return -ENODEV; | 103 | return -ENODEV; |
| 101 | 104 | ||
| 105 | mutex_lock(&dev->device_lock); | ||
| 106 | |||
| 107 | /* | ||
| 108 | * if the driver is not enabled the list won't be consistent, | ||
| 109 | * we output empty table | ||
| 110 | */ | ||
| 111 | if (dev->dev_state == MEI_DEV_ENABLED) | ||
| 112 | list_for_each_entry(cl, &dev->file_list, link) | ||
| 113 | bufsz++; | ||
| 114 | |||
| 115 | bufsz *= sizeof(HDR) + 1; | ||
| 116 | |||
| 102 | buf = kzalloc(bufsz, GFP_KERNEL); | 117 | buf = kzalloc(bufsz, GFP_KERNEL); |
| 103 | if (!buf) | 118 | if (!buf) { |
| 119 | mutex_unlock(&dev->device_lock); | ||
| 104 | return -ENOMEM; | 120 | return -ENOMEM; |
| 121 | } | ||
| 105 | 122 | ||
| 106 | pos += scnprintf(buf + pos, bufsz - pos, | 123 | pos += scnprintf(buf + pos, bufsz - pos, HDR); |
| 107 | " |me|host|state|rd|wr|\n"); | 124 | #undef HDR |
| 108 | |||
| 109 | mutex_lock(&dev->device_lock); | ||
| 110 | 125 | ||
| 111 | /* if the driver is not enabled the list won't be consistent */ | 126 | /* if the driver is not enabled the list won't be consistent */ |
| 112 | if (dev->dev_state != MEI_DEV_ENABLED) | 127 | if (dev->dev_state != MEI_DEV_ENABLED) |
| @@ -115,7 +130,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf, | |||
| 115 | list_for_each_entry(cl, &dev->file_list, link) { | 130 | list_for_each_entry(cl, &dev->file_list, link) { |
| 116 | 131 | ||
| 117 | pos += scnprintf(buf + pos, bufsz - pos, | 132 | pos += scnprintf(buf + pos, bufsz - pos, |
| 118 | "%2d|%2d|%4d|%5d|%2d|%2d|\n", | 133 | "%3d|%2d|%4d|%5d|%2d|%2d|\n", |
| 119 | i, mei_cl_me_id(cl), cl->host_client_id, cl->state, | 134 | i, mei_cl_me_id(cl), cl->host_client_id, cl->state, |
| 120 | !list_empty(&cl->rd_completed), cl->writing_state); | 135 | !list_empty(&cl->rd_completed), cl->writing_state); |
| 121 | i++; | 136 | i++; |
| @@ -150,16 +165,21 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf, | |||
| 150 | pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n", | 165 | pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n", |
| 151 | mei_hbm_state_str(dev->hbm_state)); | 166 | mei_hbm_state_str(dev->hbm_state)); |
| 152 | 167 | ||
| 153 | if (dev->hbm_state == MEI_HBM_STARTED) { | 168 | if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS && |
| 169 | dev->hbm_state <= MEI_HBM_STARTED) { | ||
| 154 | pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n"); | 170 | pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n"); |
| 155 | pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n", | 171 | pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n", |
| 156 | dev->hbm_f_pg_supported); | 172 | dev->hbm_f_pg_supported); |
| 157 | pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n", | 173 | pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n", |
| 158 | dev->hbm_f_dc_supported); | 174 | dev->hbm_f_dc_supported); |
| 175 | pos += scnprintf(buf + pos, bufsz - pos, "\tIE: %01d\n", | ||
| 176 | dev->hbm_f_ie_supported); | ||
| 159 | pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n", | 177 | pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n", |
| 160 | dev->hbm_f_dot_supported); | 178 | dev->hbm_f_dot_supported); |
| 161 | pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n", | 179 | pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n", |
| 162 | dev->hbm_f_ev_supported); | 180 | dev->hbm_f_ev_supported); |
| 181 | pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n", | ||
| 182 | dev->hbm_f_fa_supported); | ||
| 163 | } | 183 | } |
| 164 | 184 | ||
| 165 | pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", | 185 | pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", |
| @@ -175,6 +195,30 @@ static const struct file_operations mei_dbgfs_fops_devstate = { | |||
| 175 | .llseek = generic_file_llseek, | 195 | .llseek = generic_file_llseek, |
| 176 | }; | 196 | }; |
| 177 | 197 | ||
| 198 | static ssize_t mei_dbgfs_write_allow_fa(struct file *file, | ||
| 199 | const char __user *user_buf, | ||
| 200 | size_t count, loff_t *ppos) | ||
| 201 | { | ||
| 202 | struct mei_device *dev; | ||
| 203 | int ret; | ||
| 204 | |||
| 205 | dev = container_of(file->private_data, | ||
| 206 | struct mei_device, allow_fixed_address); | ||
| 207 | |||
| 208 | ret = debugfs_write_file_bool(file, user_buf, count, ppos); | ||
| 209 | if (ret < 0) | ||
| 210 | return ret; | ||
| 211 | dev->override_fixed_address = true; | ||
| 212 | return ret; | ||
| 213 | } | ||
| 214 | |||
| 215 | static const struct file_operations mei_dbgfs_fops_allow_fa = { | ||
| 216 | .open = simple_open, | ||
| 217 | .read = debugfs_read_file_bool, | ||
| 218 | .write = mei_dbgfs_write_allow_fa, | ||
| 219 | .llseek = generic_file_llseek, | ||
| 220 | }; | ||
| 221 | |||
| 178 | /** | 222 | /** |
| 179 | * mei_dbgfs_deregister - Remove the debugfs files and directories | 223 | * mei_dbgfs_deregister - Remove the debugfs files and directories |
| 180 | * | 224 | * |
| @@ -224,8 +268,9 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name) | |||
| 224 | dev_err(dev->dev, "devstate: registration failed\n"); | 268 | dev_err(dev->dev, "devstate: registration failed\n"); |
| 225 | goto err; | 269 | goto err; |
| 226 | } | 270 | } |
| 227 | f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir, | 271 | f = debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir, |
| 228 | &dev->allow_fixed_address); | 272 | &dev->allow_fixed_address, |
| 273 | &mei_dbgfs_fops_allow_fa); | ||
| 229 | if (!f) { | 274 | if (!f) { |
| 230 | dev_err(dev->dev, "allow_fixed_address: registration failed\n"); | 275 | dev_err(dev->dev, "allow_fixed_address: registration failed\n"); |
| 231 | goto err; | 276 | goto err; |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index e7b7aad0999b..5e305d2605f3 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
| @@ -301,7 +301,10 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) | |||
| 301 | enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; | 301 | enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data; |
| 302 | memset(enum_req, 0, len); | 302 | memset(enum_req, 0, len); |
| 303 | enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; | 303 | enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; |
| 304 | enum_req->allow_add = dev->hbm_f_dc_supported; | 304 | enum_req->flags |= dev->hbm_f_dc_supported ? |
| 305 | MEI_HBM_ENUM_F_ALLOW_ADD : 0; | ||
| 306 | enum_req->flags |= dev->hbm_f_ie_supported ? | ||
| 307 | MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0; | ||
| 305 | 308 | ||
| 306 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); | 309 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
| 307 | if (ret) { | 310 | if (ret) { |
| @@ -401,6 +404,9 @@ static int mei_hbm_fw_add_cl_req(struct mei_device *dev, | |||
| 401 | if (ret) | 404 | if (ret) |
| 402 | status = !MEI_HBMS_SUCCESS; | 405 | status = !MEI_HBMS_SUCCESS; |
| 403 | 406 | ||
| 407 | if (dev->dev_state == MEI_DEV_ENABLED) | ||
| 408 | schedule_work(&dev->bus_rescan_work); | ||
| 409 | |||
| 404 | return mei_hbm_add_cl_resp(dev, req->me_addr, status); | 410 | return mei_hbm_add_cl_resp(dev, req->me_addr, status); |
| 405 | } | 411 | } |
| 406 | 412 | ||
| @@ -543,7 +549,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
| 543 | /* We got all client properties */ | 549 | /* We got all client properties */ |
| 544 | if (next_client_index == MEI_CLIENTS_MAX) { | 550 | if (next_client_index == MEI_CLIENTS_MAX) { |
| 545 | dev->hbm_state = MEI_HBM_STARTED; | 551 | dev->hbm_state = MEI_HBM_STARTED; |
| 546 | schedule_work(&dev->init_work); | 552 | mei_host_client_init(dev); |
| 547 | 553 | ||
| 548 | return 0; | 554 | return 0; |
| 549 | } | 555 | } |
| @@ -789,8 +795,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl, | |||
| 789 | cl->state = MEI_FILE_CONNECTED; | 795 | cl->state = MEI_FILE_CONNECTED; |
| 790 | else { | 796 | else { |
| 791 | cl->state = MEI_FILE_DISCONNECT_REPLY; | 797 | cl->state = MEI_FILE_DISCONNECT_REPLY; |
| 792 | if (rs->status == MEI_CL_CONN_NOT_FOUND) | 798 | if (rs->status == MEI_CL_CONN_NOT_FOUND) { |
| 793 | mei_me_cl_del(dev, cl->me_cl); | 799 | mei_me_cl_del(dev, cl->me_cl); |
| 800 | if (dev->dev_state == MEI_DEV_ENABLED) | ||
| 801 | schedule_work(&dev->bus_rescan_work); | ||
| 802 | } | ||
| 794 | } | 803 | } |
| 795 | cl->status = mei_cl_conn_status_to_errno(rs->status); | 804 | cl->status = mei_cl_conn_status_to_errno(rs->status); |
| 796 | } | 805 | } |
| @@ -866,7 +875,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev, | |||
| 866 | 875 | ||
| 867 | cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req); | 876 | cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req); |
| 868 | if (cl) { | 877 | if (cl) { |
| 869 | cl_dbg(dev, cl, "fw disconnect request received\n"); | 878 | cl_warn(dev, cl, "fw disconnect request received\n"); |
| 870 | cl->state = MEI_FILE_DISCONNECTING; | 879 | cl->state = MEI_FILE_DISCONNECTING; |
| 871 | cl->timer_count = 0; | 880 | cl->timer_count = 0; |
| 872 | 881 | ||
| @@ -972,6 +981,9 @@ static void mei_hbm_config_features(struct mei_device *dev) | |||
| 972 | if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) | 981 | if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) |
| 973 | dev->hbm_f_dc_supported = 1; | 982 | dev->hbm_f_dc_supported = 1; |
| 974 | 983 | ||
| 984 | if (dev->version.major_version >= HBM_MAJOR_VERSION_IE) | ||
| 985 | dev->hbm_f_ie_supported = 1; | ||
| 986 | |||
| 975 | /* disconnect on connect timeout instead of link reset */ | 987 | /* disconnect on connect timeout instead of link reset */ |
| 976 | if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) | 988 | if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) |
| 977 | dev->hbm_f_dot_supported = 1; | 989 | dev->hbm_f_dot_supported = 1; |
| @@ -979,6 +991,10 @@ static void mei_hbm_config_features(struct mei_device *dev) | |||
| 979 | /* Notification Event Support */ | 991 | /* Notification Event Support */ |
| 980 | if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) | 992 | if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) |
| 981 | dev->hbm_f_ev_supported = 1; | 993 | dev->hbm_f_ev_supported = 1; |
| 994 | |||
| 995 | /* Fixed Address Client Support */ | ||
| 996 | if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) | ||
| 997 | dev->hbm_f_fa_supported = 1; | ||
| 982 | } | 998 | } |
| 983 | 999 | ||
| 984 | /** | 1000 | /** |
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 25b1997a62cb..e2fb44cc5c37 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c | |||
| @@ -189,8 +189,11 @@ static int mei_me_fw_status(struct mei_device *dev, | |||
| 189 | 189 | ||
| 190 | fw_status->count = fw_src->count; | 190 | fw_status->count = fw_src->count; |
| 191 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { | 191 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { |
| 192 | ret = pci_read_config_dword(pdev, | 192 | ret = pci_read_config_dword(pdev, fw_src->status[i], |
| 193 | fw_src->status[i], &fw_status->status[i]); | 193 | &fw_status->status[i]); |
| 194 | trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X", | ||
| 195 | fw_src->status[i], | ||
| 196 | fw_status->status[i]); | ||
| 194 | if (ret) | 197 | if (ret) |
| 195 | return ret; | 198 | return ret; |
| 196 | } | 199 | } |
| @@ -215,6 +218,7 @@ static void mei_me_hw_config(struct mei_device *dev) | |||
| 215 | 218 | ||
| 216 | reg = 0; | 219 | reg = 0; |
| 217 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); | 220 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); |
| 221 | trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); | ||
| 218 | hw->d0i3_supported = | 222 | hw->d0i3_supported = |
| 219 | ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); | 223 | ((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK); |
| 220 | 224 | ||
| @@ -1248,6 +1252,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev) | |||
| 1248 | u32 reg; | 1252 | u32 reg; |
| 1249 | 1253 | ||
| 1250 | pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); | 1254 | pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); |
| 1255 | trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg); | ||
| 1251 | /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ | 1256 | /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ |
| 1252 | return (reg & 0x600) == 0x200; | 1257 | return (reg & 0x600) == 0x200; |
| 1253 | } | 1258 | } |
| @@ -1260,6 +1265,7 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev) | |||
| 1260 | u32 reg; | 1265 | u32 reg; |
| 1261 | /* Read ME FW Status check for SPS Firmware */ | 1266 | /* Read ME FW Status check for SPS Firmware */ |
| 1262 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); | 1267 | pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); |
| 1268 | trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg); | ||
| 1263 | /* if bits [19:16] = 15, running SPS Firmware */ | 1269 | /* if bits [19:16] = 15, running SPS Firmware */ |
| 1264 | return (reg & 0xf0000) == 0xf0000; | 1270 | return (reg & 0xf0000) == 0xf0000; |
| 1265 | } | 1271 | } |
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index bae680c648ff..4a6c1b85f11e 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c | |||
| @@ -28,6 +28,9 @@ | |||
| 28 | #include "client.h" | 28 | #include "client.h" |
| 29 | #include "hbm.h" | 29 | #include "hbm.h" |
| 30 | 30 | ||
| 31 | #include "mei-trace.h" | ||
| 32 | |||
| 33 | |||
| 31 | /** | 34 | /** |
| 32 | * mei_txe_reg_read - Reads 32bit data from the txe device | 35 | * mei_txe_reg_read - Reads 32bit data from the txe device |
| 33 | * | 36 | * |
| @@ -640,8 +643,11 @@ static int mei_txe_fw_status(struct mei_device *dev, | |||
| 640 | 643 | ||
| 641 | fw_status->count = fw_src->count; | 644 | fw_status->count = fw_src->count; |
| 642 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { | 645 | for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) { |
| 643 | ret = pci_read_config_dword(pdev, | 646 | ret = pci_read_config_dword(pdev, fw_src->status[i], |
| 644 | fw_src->status[i], &fw_status->status[i]); | 647 | &fw_status->status[i]); |
| 648 | trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X", | ||
| 649 | fw_src->status[i], | ||
| 650 | fw_status->status[i]); | ||
| 645 | if (ret) | 651 | if (ret) |
| 646 | return ret; | 652 | return ret; |
| 647 | } | 653 | } |
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 4cebde85924f..9daf3f9aed25 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ | 29 | #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ |
| 30 | 30 | ||
| 31 | #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ | 31 | #define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */ |
| 32 | #define MEI_IAMTHIF_READ_TIMER 10 /* HPS */ | ||
| 33 | 32 | ||
| 34 | #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ | 33 | #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ |
| 35 | #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ | 34 | #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ |
| @@ -54,6 +53,12 @@ | |||
| 54 | #define HBM_MAJOR_VERSION_DC 2 | 53 | #define HBM_MAJOR_VERSION_DC 2 |
| 55 | 54 | ||
| 56 | /* | 55 | /* |
| 56 | * MEI version with immediate reply to enum request support | ||
| 57 | */ | ||
| 58 | #define HBM_MINOR_VERSION_IE 0 | ||
| 59 | #define HBM_MAJOR_VERSION_IE 2 | ||
| 60 | |||
| 61 | /* | ||
| 57 | * MEI version with disconnect on connection timeout support | 62 | * MEI version with disconnect on connection timeout support |
| 58 | */ | 63 | */ |
| 59 | #define HBM_MINOR_VERSION_DOT 0 | 64 | #define HBM_MINOR_VERSION_DOT 0 |
| @@ -65,6 +70,12 @@ | |||
| 65 | #define HBM_MINOR_VERSION_EV 0 | 70 | #define HBM_MINOR_VERSION_EV 0 |
| 66 | #define HBM_MAJOR_VERSION_EV 2 | 71 | #define HBM_MAJOR_VERSION_EV 2 |
| 67 | 72 | ||
| 73 | /* | ||
| 74 | * MEI version with fixed address client support | ||
| 75 | */ | ||
| 76 | #define HBM_MINOR_VERSION_FA 0 | ||
| 77 | #define HBM_MAJOR_VERSION_FA 2 | ||
| 78 | |||
| 68 | /* Host bus message command opcode */ | 79 | /* Host bus message command opcode */ |
| 69 | #define MEI_HBM_CMD_OP_MSK 0x7f | 80 | #define MEI_HBM_CMD_OP_MSK 0x7f |
| 70 | /* Host bus message command RESPONSE */ | 81 | /* Host bus message command RESPONSE */ |
| @@ -241,15 +252,26 @@ struct hbm_me_stop_request { | |||
| 241 | } __packed; | 252 | } __packed; |
| 242 | 253 | ||
| 243 | /** | 254 | /** |
| 244 | * struct hbm_host_enum_request - enumeration request from host to fw | 255 | * enum hbm_host_enum_flags - enumeration request flags (HBM version >= 2.0) |
| 245 | * | 256 | * |
| 246 | * @hbm_cmd: bus message command header | 257 | * @MEI_HBM_ENUM_F_ALLOW_ADD: allow dynamic clients add |
| 247 | * @allow_add: allow dynamic clients add HBM version >= 2.0 | 258 | * @MEI_HBM_ENUM_F_IMMEDIATE_ENUM: allow FW to send answer immediately |
| 259 | */ | ||
| 260 | enum hbm_host_enum_flags { | ||
| 261 | MEI_HBM_ENUM_F_ALLOW_ADD = BIT(0), | ||
| 262 | MEI_HBM_ENUM_F_IMMEDIATE_ENUM = BIT(1), | ||
| 263 | }; | ||
| 264 | |||
| 265 | /** | ||
| 266 | * struct hbm_host_enum_request - enumeration request from host to fw | ||
| 267 | * | ||
| 268 | * @hbm_cmd : bus message command header | ||
| 269 | * @flags : request flags | ||
| 248 | * @reserved: reserved | 270 | * @reserved: reserved |
| 249 | */ | 271 | */ |
| 250 | struct hbm_host_enum_request { | 272 | struct hbm_host_enum_request { |
| 251 | u8 hbm_cmd; | 273 | u8 hbm_cmd; |
| 252 | u8 allow_add; | 274 | u8 flags; |
| 253 | u8 reserved[2]; | 275 | u8 reserved[2]; |
| 254 | } __packed; | 276 | } __packed; |
| 255 | 277 | ||
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 3edafc8d3ad4..f7c8dfdb6a12 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
| @@ -91,8 +91,8 @@ EXPORT_SYMBOL_GPL(mei_fw_status2str); | |||
| 91 | */ | 91 | */ |
| 92 | void mei_cancel_work(struct mei_device *dev) | 92 | void mei_cancel_work(struct mei_device *dev) |
| 93 | { | 93 | { |
| 94 | cancel_work_sync(&dev->init_work); | ||
| 95 | cancel_work_sync(&dev->reset_work); | 94 | cancel_work_sync(&dev->reset_work); |
| 95 | cancel_work_sync(&dev->bus_rescan_work); | ||
| 96 | 96 | ||
| 97 | cancel_delayed_work(&dev->timer_work); | 97 | cancel_delayed_work(&dev->timer_work); |
| 98 | } | 98 | } |
| @@ -148,16 +148,10 @@ int mei_reset(struct mei_device *dev) | |||
| 148 | state != MEI_DEV_POWER_UP) { | 148 | state != MEI_DEV_POWER_UP) { |
| 149 | 149 | ||
| 150 | /* remove all waiting requests */ | 150 | /* remove all waiting requests */ |
| 151 | mei_cl_all_write_clear(dev); | ||
| 152 | |||
| 153 | mei_cl_all_disconnect(dev); | 151 | mei_cl_all_disconnect(dev); |
| 154 | 152 | ||
| 155 | /* wake up all readers and writers so they can be interrupted */ | ||
| 156 | mei_cl_all_wakeup(dev); | ||
| 157 | |||
| 158 | /* remove entry if already in list */ | 153 | /* remove entry if already in list */ |
| 159 | dev_dbg(dev->dev, "remove iamthif and wd from the file list.\n"); | 154 | dev_dbg(dev->dev, "remove iamthif from the file list.\n"); |
| 160 | mei_cl_unlink(&dev->wd_cl); | ||
| 161 | mei_cl_unlink(&dev->iamthif_cl); | 155 | mei_cl_unlink(&dev->iamthif_cl); |
| 162 | mei_amthif_reset_params(dev); | 156 | mei_amthif_reset_params(dev); |
| 163 | } | 157 | } |
| @@ -165,7 +159,6 @@ int mei_reset(struct mei_device *dev) | |||
| 165 | mei_hbm_reset(dev); | 159 | mei_hbm_reset(dev); |
| 166 | 160 | ||
| 167 | dev->rd_msg_hdr = 0; | 161 | dev->rd_msg_hdr = 0; |
| 168 | dev->wd_pending = false; | ||
| 169 | 162 | ||
| 170 | if (ret) { | 163 | if (ret) { |
| 171 | dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); | 164 | dev_err(dev->dev, "hw_reset failed ret = %d\n", ret); |
| @@ -335,16 +328,12 @@ void mei_stop(struct mei_device *dev) | |||
| 335 | 328 | ||
| 336 | mutex_lock(&dev->device_lock); | 329 | mutex_lock(&dev->device_lock); |
| 337 | 330 | ||
| 338 | mei_wd_stop(dev); | ||
| 339 | |||
| 340 | dev->dev_state = MEI_DEV_POWER_DOWN; | 331 | dev->dev_state = MEI_DEV_POWER_DOWN; |
| 341 | mei_reset(dev); | 332 | mei_reset(dev); |
| 342 | /* move device to disabled state unconditionally */ | 333 | /* move device to disabled state unconditionally */ |
| 343 | dev->dev_state = MEI_DEV_DISABLED; | 334 | dev->dev_state = MEI_DEV_DISABLED; |
| 344 | 335 | ||
| 345 | mutex_unlock(&dev->device_lock); | 336 | mutex_unlock(&dev->device_lock); |
| 346 | |||
| 347 | mei_watchdog_unregister(dev); | ||
| 348 | } | 337 | } |
| 349 | EXPORT_SYMBOL_GPL(mei_stop); | 338 | EXPORT_SYMBOL_GPL(mei_stop); |
| 350 | 339 | ||
| @@ -394,7 +383,6 @@ void mei_device_init(struct mei_device *dev, | |||
| 394 | init_waitqueue_head(&dev->wait_hw_ready); | 383 | init_waitqueue_head(&dev->wait_hw_ready); |
| 395 | init_waitqueue_head(&dev->wait_pg); | 384 | init_waitqueue_head(&dev->wait_pg); |
| 396 | init_waitqueue_head(&dev->wait_hbm_start); | 385 | init_waitqueue_head(&dev->wait_hbm_start); |
| 397 | init_waitqueue_head(&dev->wait_stop_wd); | ||
| 398 | dev->dev_state = MEI_DEV_INITIALIZING; | 386 | dev->dev_state = MEI_DEV_INITIALIZING; |
| 399 | dev->reset_count = 0; | 387 | dev->reset_count = 0; |
| 400 | 388 | ||
| @@ -404,13 +392,11 @@ void mei_device_init(struct mei_device *dev, | |||
| 404 | mei_io_list_init(&dev->ctrl_rd_list); | 392 | mei_io_list_init(&dev->ctrl_rd_list); |
| 405 | 393 | ||
| 406 | INIT_DELAYED_WORK(&dev->timer_work, mei_timer); | 394 | INIT_DELAYED_WORK(&dev->timer_work, mei_timer); |
| 407 | INIT_WORK(&dev->init_work, mei_host_client_init); | ||
| 408 | INIT_WORK(&dev->reset_work, mei_reset_work); | 395 | INIT_WORK(&dev->reset_work, mei_reset_work); |
| 396 | INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work); | ||
| 409 | 397 | ||
| 410 | INIT_LIST_HEAD(&dev->wd_cl.link); | ||
| 411 | INIT_LIST_HEAD(&dev->iamthif_cl.link); | 398 | INIT_LIST_HEAD(&dev->iamthif_cl.link); |
| 412 | mei_io_list_init(&dev->amthif_cmd_list); | 399 | mei_io_list_init(&dev->amthif_cmd_list); |
| 413 | mei_io_list_init(&dev->amthif_rd_complete_list); | ||
| 414 | 400 | ||
| 415 | bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); | 401 | bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); |
| 416 | dev->open_handle_count = 0; | 402 | dev->open_handle_count = 0; |
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 64b568a0268d..1e5cb1f704f8 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
| @@ -48,7 +48,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) | |||
| 48 | 48 | ||
| 49 | dev_dbg(dev->dev, "completing call back.\n"); | 49 | dev_dbg(dev->dev, "completing call back.\n"); |
| 50 | if (cl == &dev->iamthif_cl) | 50 | if (cl == &dev->iamthif_cl) |
| 51 | mei_amthif_complete(dev, cb); | 51 | mei_amthif_complete(cl, cb); |
| 52 | else | 52 | else |
| 53 | mei_cl_complete(cl, cb); | 53 | mei_cl_complete(cl, cb); |
| 54 | } | 54 | } |
| @@ -104,6 +104,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl, | |||
| 104 | struct mei_device *dev = cl->dev; | 104 | struct mei_device *dev = cl->dev; |
| 105 | struct mei_cl_cb *cb; | 105 | struct mei_cl_cb *cb; |
| 106 | unsigned char *buffer = NULL; | 106 | unsigned char *buffer = NULL; |
| 107 | size_t buf_sz; | ||
| 107 | 108 | ||
| 108 | cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); | 109 | cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); |
| 109 | if (!cb) { | 110 | if (!cb) { |
| @@ -124,11 +125,21 @@ int mei_cl_irq_read_msg(struct mei_cl *cl, | |||
| 124 | goto out; | 125 | goto out; |
| 125 | } | 126 | } |
| 126 | 127 | ||
| 127 | if (cb->buf.size < mei_hdr->length + cb->buf_idx) { | 128 | buf_sz = mei_hdr->length + cb->buf_idx; |
| 128 | cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", | 129 | /* catch for integer overflow */ |
| 130 | if (buf_sz < cb->buf_idx) { | ||
| 131 | cl_err(dev, cl, "message is too big len %d idx %zu\n", | ||
| 132 | mei_hdr->length, cb->buf_idx); | ||
| 133 | |||
| 134 | list_move_tail(&cb->list, &complete_list->list); | ||
| 135 | cb->status = -EMSGSIZE; | ||
| 136 | goto out; | ||
| 137 | } | ||
| 138 | |||
| 139 | if (cb->buf.size < buf_sz) { | ||
| 140 | cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", | ||
| 129 | cb->buf.size, mei_hdr->length, cb->buf_idx); | 141 | cb->buf.size, mei_hdr->length, cb->buf_idx); |
| 130 | buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx, | 142 | buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL); |
| 131 | GFP_KERNEL); | ||
| 132 | 143 | ||
| 133 | if (!buffer) { | 144 | if (!buffer) { |
| 134 | cb->status = -ENOMEM; | 145 | cb->status = -ENOMEM; |
| @@ -136,7 +147,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl, | |||
| 136 | goto out; | 147 | goto out; |
| 137 | } | 148 | } |
| 138 | cb->buf.data = buffer; | 149 | cb->buf.data = buffer; |
| 139 | cb->buf.size = mei_hdr->length + cb->buf_idx; | 150 | cb->buf.size = buf_sz; |
| 140 | } | 151 | } |
| 141 | 152 | ||
| 142 | buffer = cb->buf.data + cb->buf_idx; | 153 | buffer = cb->buf.data + cb->buf_idx; |
| @@ -145,8 +156,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl, | |||
| 145 | cb->buf_idx += mei_hdr->length; | 156 | cb->buf_idx += mei_hdr->length; |
| 146 | 157 | ||
| 147 | if (mei_hdr->msg_complete) { | 158 | if (mei_hdr->msg_complete) { |
| 148 | cb->read_time = jiffies; | 159 | cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); |
| 149 | cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx); | ||
| 150 | list_move_tail(&cb->list, &complete_list->list); | 160 | list_move_tail(&cb->list, &complete_list->list); |
| 151 | } else { | 161 | } else { |
| 152 | pm_runtime_mark_last_busy(dev->dev); | 162 | pm_runtime_mark_last_busy(dev->dev); |
| @@ -229,6 +239,16 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 229 | return 0; | 239 | return 0; |
| 230 | } | 240 | } |
| 231 | 241 | ||
| 242 | static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr) | ||
| 243 | { | ||
| 244 | return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0; | ||
| 245 | } | ||
| 246 | |||
| 247 | static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr) | ||
| 248 | { | ||
| 249 | return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0; | ||
| 250 | } | ||
| 251 | |||
| 232 | /** | 252 | /** |
| 233 | * mei_irq_read_handler - bottom half read routine after ISR to | 253 | * mei_irq_read_handler - bottom half read routine after ISR to |
| 234 | * handle the read processing. | 254 | * handle the read processing. |
| @@ -270,7 +290,7 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
| 270 | } | 290 | } |
| 271 | 291 | ||
| 272 | /* HBM message */ | 292 | /* HBM message */ |
| 273 | if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { | 293 | if (hdr_is_hbm(mei_hdr)) { |
| 274 | ret = mei_hbm_dispatch(dev, mei_hdr); | 294 | ret = mei_hbm_dispatch(dev, mei_hdr); |
| 275 | if (ret) { | 295 | if (ret) { |
| 276 | dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", | 296 | dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", |
| @@ -290,6 +310,14 @@ int mei_irq_read_handler(struct mei_device *dev, | |||
| 290 | 310 | ||
| 291 | /* if no recipient cl was found we assume corrupted header */ | 311 | /* if no recipient cl was found we assume corrupted header */ |
| 292 | if (&cl->link == &dev->file_list) { | 312 | if (&cl->link == &dev->file_list) { |
| 313 | /* A message for not connected fixed address clients | ||
| 314 | * should be silently discarded | ||
| 315 | */ | ||
| 316 | if (hdr_is_fixed(mei_hdr)) { | ||
| 317 | mei_irq_discard_msg(dev, mei_hdr); | ||
| 318 | ret = 0; | ||
| 319 | goto reset_slots; | ||
| 320 | } | ||
| 293 | dev_err(dev->dev, "no destination client found 0x%08X\n", | 321 | dev_err(dev->dev, "no destination client found 0x%08X\n", |
| 294 | dev->rd_msg_hdr); | 322 | dev->rd_msg_hdr); |
| 295 | ret = -EBADMSG; | 323 | ret = -EBADMSG; |
| @@ -360,21 +388,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) | |||
| 360 | list_move_tail(&cb->list, &cmpl_list->list); | 388 | list_move_tail(&cb->list, &cmpl_list->list); |
| 361 | } | 389 | } |
| 362 | 390 | ||
| 363 | if (dev->wd_state == MEI_WD_STOPPING) { | ||
| 364 | dev->wd_state = MEI_WD_IDLE; | ||
| 365 | wake_up(&dev->wait_stop_wd); | ||
| 366 | } | ||
| 367 | |||
| 368 | if (mei_cl_is_connected(&dev->wd_cl)) { | ||
| 369 | if (dev->wd_pending && | ||
| 370 | mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { | ||
| 371 | ret = mei_wd_send(dev); | ||
| 372 | if (ret) | ||
| 373 | return ret; | ||
| 374 | dev->wd_pending = false; | ||
| 375 | } | ||
| 376 | } | ||
| 377 | |||
| 378 | /* complete control write list CB */ | 391 | /* complete control write list CB */ |
| 379 | dev_dbg(dev->dev, "complete control write list cb.\n"); | 392 | dev_dbg(dev->dev, "complete control write list cb.\n"); |
| 380 | list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { | 393 | list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { |
| @@ -462,7 +475,6 @@ static void mei_connect_timeout(struct mei_cl *cl) | |||
| 462 | */ | 475 | */ |
| 463 | void mei_timer(struct work_struct *work) | 476 | void mei_timer(struct work_struct *work) |
| 464 | { | 477 | { |
| 465 | unsigned long timeout; | ||
| 466 | struct mei_cl *cl; | 478 | struct mei_cl *cl; |
| 467 | 479 | ||
| 468 | struct mei_device *dev = container_of(work, | 480 | struct mei_device *dev = container_of(work, |
| @@ -508,45 +520,15 @@ void mei_timer(struct work_struct *work) | |||
| 508 | mei_reset(dev); | 520 | mei_reset(dev); |
| 509 | dev->iamthif_canceled = false; | 521 | dev->iamthif_canceled = false; |
| 510 | dev->iamthif_state = MEI_IAMTHIF_IDLE; | 522 | dev->iamthif_state = MEI_IAMTHIF_IDLE; |
| 511 | dev->iamthif_timer = 0; | ||
| 512 | 523 | ||
| 513 | mei_io_cb_free(dev->iamthif_current_cb); | 524 | mei_io_cb_free(dev->iamthif_current_cb); |
| 514 | dev->iamthif_current_cb = NULL; | 525 | dev->iamthif_current_cb = NULL; |
| 515 | 526 | ||
| 516 | dev->iamthif_file_object = NULL; | 527 | dev->iamthif_fp = NULL; |
| 517 | mei_amthif_run_next_cmd(dev); | 528 | mei_amthif_run_next_cmd(dev); |
| 518 | } | 529 | } |
| 519 | } | 530 | } |
| 520 | 531 | ||
| 521 | if (dev->iamthif_timer) { | ||
| 522 | |||
| 523 | timeout = dev->iamthif_timer + | ||
| 524 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); | ||
| 525 | |||
| 526 | dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", | ||
| 527 | dev->iamthif_timer); | ||
| 528 | dev_dbg(dev->dev, "timeout = %ld\n", timeout); | ||
| 529 | dev_dbg(dev->dev, "jiffies = %ld\n", jiffies); | ||
| 530 | if (time_after(jiffies, timeout)) { | ||
| 531 | /* | ||
| 532 | * User didn't read the AMTHI data on time (15sec) | ||
| 533 | * freeing AMTHI for other requests | ||
| 534 | */ | ||
| 535 | |||
| 536 | dev_dbg(dev->dev, "freeing AMTHI for other requests\n"); | ||
| 537 | |||
| 538 | mei_io_list_flush(&dev->amthif_rd_complete_list, | ||
| 539 | &dev->iamthif_cl); | ||
| 540 | mei_io_cb_free(dev->iamthif_current_cb); | ||
| 541 | dev->iamthif_current_cb = NULL; | ||
| 542 | |||
| 543 | dev->iamthif_file_object->private_data = NULL; | ||
| 544 | dev->iamthif_file_object = NULL; | ||
| 545 | dev->iamthif_timer = 0; | ||
| 546 | mei_amthif_run_next_cmd(dev); | ||
| 547 | |||
| 548 | } | ||
| 549 | } | ||
| 550 | out: | 532 | out: |
| 551 | if (dev->dev_state != MEI_DEV_DISABLED) | 533 | if (dev->dev_state != MEI_DEV_DISABLED) |
| 552 | schedule_delayed_work(&dev->timer_work, 2 * HZ); | 534 | schedule_delayed_work(&dev->timer_work, 2 * HZ); |
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 677d0362f334..52635b063873 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c | |||
| @@ -65,7 +65,7 @@ static int mei_open(struct inode *inode, struct file *file) | |||
| 65 | goto err_unlock; | 65 | goto err_unlock; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY); | 68 | cl = mei_cl_alloc_linked(dev); |
| 69 | if (IS_ERR(cl)) { | 69 | if (IS_ERR(cl)) { |
| 70 | err = PTR_ERR(cl); | 70 | err = PTR_ERR(cl); |
| 71 | goto err_unlock; | 71 | goto err_unlock; |
| @@ -159,27 +159,22 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, | |||
| 159 | goto out; | 159 | goto out; |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | if (ubuf == NULL) { | ||
| 163 | rets = -EMSGSIZE; | ||
| 164 | goto out; | ||
| 165 | } | ||
| 166 | |||
| 162 | if (cl == &dev->iamthif_cl) { | 167 | if (cl == &dev->iamthif_cl) { |
| 163 | rets = mei_amthif_read(dev, file, ubuf, length, offset); | 168 | rets = mei_amthif_read(dev, file, ubuf, length, offset); |
| 164 | goto out; | 169 | goto out; |
| 165 | } | 170 | } |
| 166 | 171 | ||
| 167 | cb = mei_cl_read_cb(cl, file); | 172 | cb = mei_cl_read_cb(cl, file); |
| 168 | if (cb) { | 173 | if (cb) |
| 169 | /* read what left */ | 174 | goto copy_buffer; |
| 170 | if (cb->buf_idx > *offset) | 175 | |
| 171 | goto copy_buffer; | 176 | if (*offset > 0) |
| 172 | /* offset is beyond buf_idx we have no more data return 0 */ | ||
| 173 | if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { | ||
| 174 | rets = 0; | ||
| 175 | goto free; | ||
| 176 | } | ||
| 177 | /* Offset needs to be cleaned for contiguous reads*/ | ||
| 178 | if (cb->buf_idx == 0 && *offset > 0) | ||
| 179 | *offset = 0; | ||
| 180 | } else if (*offset > 0) { | ||
| 181 | *offset = 0; | 177 | *offset = 0; |
| 182 | } | ||
| 183 | 178 | ||
| 184 | err = mei_cl_read_start(cl, length, file); | 179 | err = mei_cl_read_start(cl, length, file); |
| 185 | if (err && err != -EBUSY) { | 180 | if (err && err != -EBUSY) { |
| @@ -214,11 +209,6 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, | |||
| 214 | 209 | ||
| 215 | cb = mei_cl_read_cb(cl, file); | 210 | cb = mei_cl_read_cb(cl, file); |
| 216 | if (!cb) { | 211 | if (!cb) { |
| 217 | if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) { | ||
| 218 | cb = mei_cl_read_cb(cl, NULL); | ||
| 219 | if (cb) | ||
| 220 | goto copy_buffer; | ||
| 221 | } | ||
| 222 | rets = 0; | 212 | rets = 0; |
| 223 | goto out; | 213 | goto out; |
| 224 | } | 214 | } |
| @@ -231,10 +221,10 @@ copy_buffer: | |||
| 231 | goto free; | 221 | goto free; |
| 232 | } | 222 | } |
| 233 | 223 | ||
| 234 | cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n", | 224 | cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n", |
| 235 | cb->buf.size, cb->buf_idx); | 225 | cb->buf.size, cb->buf_idx, *offset); |
| 236 | if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { | 226 | if (*offset >= cb->buf_idx) { |
| 237 | rets = -EMSGSIZE; | 227 | rets = 0; |
| 238 | goto free; | 228 | goto free; |
| 239 | } | 229 | } |
| 240 | 230 | ||
| @@ -250,11 +240,13 @@ copy_buffer: | |||
| 250 | 240 | ||
| 251 | rets = length; | 241 | rets = length; |
| 252 | *offset += length; | 242 | *offset += length; |
| 253 | if ((unsigned long)*offset < cb->buf_idx) | 243 | /* not all data was read, keep the cb */ |
| 244 | if (*offset < cb->buf_idx) | ||
| 254 | goto out; | 245 | goto out; |
| 255 | 246 | ||
| 256 | free: | 247 | free: |
| 257 | mei_io_cb_free(cb); | 248 | mei_io_cb_free(cb); |
| 249 | *offset = 0; | ||
| 258 | 250 | ||
| 259 | out: | 251 | out: |
| 260 | cl_dbg(dev, cl, "end mei read rets = %d\n", rets); | 252 | cl_dbg(dev, cl, "end mei read rets = %d\n", rets); |
| @@ -275,9 +267,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
| 275 | size_t length, loff_t *offset) | 267 | size_t length, loff_t *offset) |
| 276 | { | 268 | { |
| 277 | struct mei_cl *cl = file->private_data; | 269 | struct mei_cl *cl = file->private_data; |
| 278 | struct mei_cl_cb *write_cb = NULL; | 270 | struct mei_cl_cb *cb; |
| 279 | struct mei_device *dev; | 271 | struct mei_device *dev; |
| 280 | unsigned long timeout = 0; | ||
| 281 | int rets; | 272 | int rets; |
| 282 | 273 | ||
| 283 | if (WARN_ON(!cl || !cl->dev)) | 274 | if (WARN_ON(!cl || !cl->dev)) |
| @@ -313,52 +304,31 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, | |||
| 313 | goto out; | 304 | goto out; |
| 314 | } | 305 | } |
| 315 | 306 | ||
| 316 | if (cl == &dev->iamthif_cl) { | ||
| 317 | write_cb = mei_amthif_find_read_list_entry(dev, file); | ||
| 318 | |||
| 319 | if (write_cb) { | ||
| 320 | timeout = write_cb->read_time + | ||
| 321 | mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); | ||
| 322 | |||
| 323 | if (time_after(jiffies, timeout)) { | ||
| 324 | *offset = 0; | ||
| 325 | mei_io_cb_free(write_cb); | ||
| 326 | write_cb = NULL; | ||
| 327 | } | ||
| 328 | } | ||
| 329 | } | ||
| 330 | |||
| 331 | *offset = 0; | 307 | *offset = 0; |
| 332 | write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); | 308 | cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); |
| 333 | if (!write_cb) { | 309 | if (!cb) { |
| 334 | rets = -ENOMEM; | 310 | rets = -ENOMEM; |
| 335 | goto out; | 311 | goto out; |
| 336 | } | 312 | } |
| 337 | 313 | ||
| 338 | rets = copy_from_user(write_cb->buf.data, ubuf, length); | 314 | rets = copy_from_user(cb->buf.data, ubuf, length); |
| 339 | if (rets) { | 315 | if (rets) { |
| 340 | dev_dbg(dev->dev, "failed to copy data from userland\n"); | 316 | dev_dbg(dev->dev, "failed to copy data from userland\n"); |
| 341 | rets = -EFAULT; | 317 | rets = -EFAULT; |
| 318 | mei_io_cb_free(cb); | ||
| 342 | goto out; | 319 | goto out; |
| 343 | } | 320 | } |
| 344 | 321 | ||
| 345 | if (cl == &dev->iamthif_cl) { | 322 | if (cl == &dev->iamthif_cl) { |
| 346 | rets = mei_amthif_write(cl, write_cb); | 323 | rets = mei_amthif_write(cl, cb); |
| 347 | 324 | if (!rets) | |
| 348 | if (rets) { | 325 | rets = length; |
| 349 | dev_err(dev->dev, | 326 | goto out; |
| 350 | "amthif write failed with status = %d\n", rets); | ||
| 351 | goto out; | ||
| 352 | } | ||
| 353 | mutex_unlock(&dev->device_lock); | ||
| 354 | return length; | ||
| 355 | } | 327 | } |
| 356 | 328 | ||
| 357 | rets = mei_cl_write(cl, write_cb, false); | 329 | rets = mei_cl_write(cl, cb, false); |
| 358 | out: | 330 | out: |
| 359 | mutex_unlock(&dev->device_lock); | 331 | mutex_unlock(&dev->device_lock); |
| 360 | if (rets < 0) | ||
| 361 | mei_io_cb_free(write_cb); | ||
| 362 | return rets; | 332 | return rets; |
| 363 | } | 333 | } |
| 364 | 334 | ||
| @@ -393,12 +363,22 @@ static int mei_ioctl_connect_client(struct file *file, | |||
| 393 | 363 | ||
| 394 | /* find ME client we're trying to connect to */ | 364 | /* find ME client we're trying to connect to */ |
| 395 | me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); | 365 | me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); |
| 396 | if (!me_cl || | 366 | if (!me_cl) { |
| 397 | (me_cl->props.fixed_address && !dev->allow_fixed_address)) { | ||
| 398 | dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", | 367 | dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", |
| 399 | &data->in_client_uuid); | 368 | &data->in_client_uuid); |
| 400 | mei_me_cl_put(me_cl); | 369 | rets = -ENOTTY; |
| 401 | return -ENOTTY; | 370 | goto end; |
| 371 | } | ||
| 372 | |||
| 373 | if (me_cl->props.fixed_address) { | ||
| 374 | bool forbidden = dev->override_fixed_address ? | ||
| 375 | !dev->allow_fixed_address : !dev->hbm_f_fa_supported; | ||
| 376 | if (forbidden) { | ||
| 377 | dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", | ||
| 378 | &data->in_client_uuid); | ||
| 379 | rets = -ENOTTY; | ||
| 380 | goto end; | ||
| 381 | } | ||
| 402 | } | 382 | } |
| 403 | 383 | ||
| 404 | dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", | 384 | dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", |
| @@ -454,11 +434,15 @@ end: | |||
| 454 | * | 434 | * |
| 455 | * Return: 0 on success , <0 on error | 435 | * Return: 0 on success , <0 on error |
| 456 | */ | 436 | */ |
| 457 | static int mei_ioctl_client_notify_request(struct file *file, u32 request) | 437 | static int mei_ioctl_client_notify_request(const struct file *file, u32 request) |
| 458 | { | 438 | { |
| 459 | struct mei_cl *cl = file->private_data; | 439 | struct mei_cl *cl = file->private_data; |
| 460 | 440 | ||
| 461 | return mei_cl_notify_request(cl, file, request); | 441 | if (request != MEI_HBM_NOTIFICATION_START && |
| 442 | request != MEI_HBM_NOTIFICATION_STOP) | ||
| 443 | return -EINVAL; | ||
| 444 | |||
| 445 | return mei_cl_notify_request(cl, file, (u8)request); | ||
| 462 | } | 446 | } |
| 463 | 447 | ||
| 464 | /** | 448 | /** |
| @@ -469,7 +453,7 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request) | |||
| 469 | * | 453 | * |
| 470 | * Return: 0 on success , <0 on error | 454 | * Return: 0 on success , <0 on error |
| 471 | */ | 455 | */ |
| 472 | static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get) | 456 | static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get) |
| 473 | { | 457 | { |
| 474 | struct mei_cl *cl = file->private_data; | 458 | struct mei_cl *cl = file->private_data; |
| 475 | bool notify_ev; | 459 | bool notify_ev; |
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c index 388efb519138..e19e6acb191b 100644 --- a/drivers/misc/mei/mei-trace.c +++ b/drivers/misc/mei/mei-trace.c | |||
| @@ -22,4 +22,6 @@ | |||
| 22 | 22 | ||
| 23 | EXPORT_TRACEPOINT_SYMBOL(mei_reg_read); | 23 | EXPORT_TRACEPOINT_SYMBOL(mei_reg_read); |
| 24 | EXPORT_TRACEPOINT_SYMBOL(mei_reg_write); | 24 | EXPORT_TRACEPOINT_SYMBOL(mei_reg_write); |
| 25 | EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read); | ||
| 26 | EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write); | ||
| 25 | #endif /* __CHECKER__ */ | 27 | #endif /* __CHECKER__ */ |
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h index 47e1bc6551d4..7d2d5d4a1624 100644 --- a/drivers/misc/mei/mei-trace.h +++ b/drivers/misc/mei/mei-trace.h | |||
| @@ -60,7 +60,45 @@ TRACE_EVENT(mei_reg_write, | |||
| 60 | __entry->offs = offs; | 60 | __entry->offs = offs; |
| 61 | __entry->val = val; | 61 | __entry->val = val; |
| 62 | ), | 62 | ), |
| 63 | TP_printk("[%s] write %s[%#x] = %#x)", | 63 | TP_printk("[%s] write %s[%#x] = %#x", |
| 64 | __get_str(dev), __entry->reg, __entry->offs, __entry->val) | ||
| 65 | ); | ||
| 66 | |||
| 67 | TRACE_EVENT(mei_pci_cfg_read, | ||
| 68 | TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val), | ||
| 69 | TP_ARGS(dev, reg, offs, val), | ||
| 70 | TP_STRUCT__entry( | ||
| 71 | __string(dev, dev_name(dev)) | ||
| 72 | __field(const char *, reg) | ||
| 73 | __field(u32, offs) | ||
| 74 | __field(u32, val) | ||
| 75 | ), | ||
| 76 | TP_fast_assign( | ||
| 77 | __assign_str(dev, dev_name(dev)) | ||
| 78 | __entry->reg = reg; | ||
| 79 | __entry->offs = offs; | ||
| 80 | __entry->val = val; | ||
| 81 | ), | ||
| 82 | TP_printk("[%s] pci cfg read %s:[%#x] = %#x", | ||
| 83 | __get_str(dev), __entry->reg, __entry->offs, __entry->val) | ||
| 84 | ); | ||
| 85 | |||
| 86 | TRACE_EVENT(mei_pci_cfg_write, | ||
| 87 | TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val), | ||
| 88 | TP_ARGS(dev, reg, offs, val), | ||
| 89 | TP_STRUCT__entry( | ||
| 90 | __string(dev, dev_name(dev)) | ||
| 91 | __field(const char *, reg) | ||
| 92 | __field(u32, offs) | ||
| 93 | __field(u32, val) | ||
| 94 | ), | ||
| 95 | TP_fast_assign( | ||
| 96 | __assign_str(dev, dev_name(dev)) | ||
| 97 | __entry->reg = reg; | ||
| 98 | __entry->offs = offs; | ||
| 99 | __entry->val = val; | ||
| 100 | ), | ||
| 101 | TP_printk("[%s] pci cfg write %s[%#x] = %#x", | ||
| 64 | __get_str(dev), __entry->reg, __entry->offs, __entry->val) | 102 | __get_str(dev), __entry->reg, __entry->offs, __entry->val) |
| 65 | ); | 103 | ); |
| 66 | 104 | ||
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 4250555d5e72..db78e6d99456 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | #define _MEI_DEV_H_ | 18 | #define _MEI_DEV_H_ |
| 19 | 19 | ||
| 20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
| 21 | #include <linux/watchdog.h> | 21 | #include <linux/cdev.h> |
| 22 | #include <linux/poll.h> | 22 | #include <linux/poll.h> |
| 23 | #include <linux/mei.h> | 23 | #include <linux/mei.h> |
| 24 | #include <linux/mei_cl_bus.h> | 24 | #include <linux/mei_cl_bus.h> |
| @@ -26,33 +26,13 @@ | |||
| 26 | #include "hw.h" | 26 | #include "hw.h" |
| 27 | #include "hbm.h" | 27 | #include "hbm.h" |
| 28 | 28 | ||
| 29 | /* | ||
| 30 | * watch dog definition | ||
| 31 | */ | ||
| 32 | #define MEI_WD_HDR_SIZE 4 | ||
| 33 | #define MEI_WD_STOP_MSG_SIZE MEI_WD_HDR_SIZE | ||
| 34 | #define MEI_WD_START_MSG_SIZE (MEI_WD_HDR_SIZE + 16) | ||
| 35 | |||
| 36 | #define MEI_WD_DEFAULT_TIMEOUT 120 /* seconds */ | ||
| 37 | #define MEI_WD_MIN_TIMEOUT 120 /* seconds */ | ||
| 38 | #define MEI_WD_MAX_TIMEOUT 65535 /* seconds */ | ||
| 39 | |||
| 40 | #define MEI_WD_STOP_TIMEOUT 10 /* msecs */ | ||
| 41 | |||
| 42 | #define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0) | ||
| 43 | |||
| 44 | #define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32)) | ||
| 45 | |||
| 46 | 29 | ||
| 47 | /* | 30 | /* |
| 48 | * AMTHI Client UUID | 31 | * AMTHI Client UUID |
| 49 | */ | 32 | */ |
| 50 | extern const uuid_le mei_amthif_guid; | 33 | extern const uuid_le mei_amthif_guid; |
| 51 | 34 | ||
| 52 | /* | 35 | #define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32)) |
| 53 | * Watchdog Client UUID | ||
| 54 | */ | ||
| 55 | extern const uuid_le mei_wd_guid; | ||
| 56 | 36 | ||
| 57 | /* | 37 | /* |
| 58 | * Number of Maximum MEI Clients | 38 | * Number of Maximum MEI Clients |
| @@ -73,15 +53,6 @@ extern const uuid_le mei_wd_guid; | |||
| 73 | */ | 53 | */ |
| 74 | #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) | 54 | #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) |
| 75 | 55 | ||
| 76 | /* | ||
| 77 | * Internal Clients Number | ||
| 78 | */ | ||
| 79 | #define MEI_HOST_CLIENT_ID_ANY (-1) | ||
| 80 | #define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */ | ||
| 81 | #define MEI_WD_HOST_CLIENT_ID 1 | ||
| 82 | #define MEI_IAMTHIF_HOST_CLIENT_ID 2 | ||
| 83 | |||
| 84 | |||
| 85 | /* File state */ | 56 | /* File state */ |
| 86 | enum file_state { | 57 | enum file_state { |
| 87 | MEI_FILE_INITIALIZING = 0, | 58 | MEI_FILE_INITIALIZING = 0, |
| @@ -123,12 +94,6 @@ enum mei_file_transaction_states { | |||
| 123 | MEI_READ_COMPLETE | 94 | MEI_READ_COMPLETE |
| 124 | }; | 95 | }; |
| 125 | 96 | ||
| 126 | enum mei_wd_states { | ||
| 127 | MEI_WD_IDLE, | ||
| 128 | MEI_WD_RUNNING, | ||
| 129 | MEI_WD_STOPPING, | ||
| 130 | }; | ||
| 131 | |||
| 132 | /** | 97 | /** |
| 133 | * enum mei_cb_file_ops - file operation associated with the callback | 98 | * enum mei_cb_file_ops - file operation associated with the callback |
| 134 | * @MEI_FOP_READ: read | 99 | * @MEI_FOP_READ: read |
| @@ -153,7 +118,7 @@ enum mei_cb_file_ops { | |||
| 153 | * Intel MEI message data struct | 118 | * Intel MEI message data struct |
| 154 | */ | 119 | */ |
| 155 | struct mei_msg_data { | 120 | struct mei_msg_data { |
| 156 | u32 size; | 121 | size_t size; |
| 157 | unsigned char *data; | 122 | unsigned char *data; |
| 158 | }; | 123 | }; |
| 159 | 124 | ||
| @@ -206,8 +171,7 @@ struct mei_cl; | |||
| 206 | * @fop_type: file operation type | 171 | * @fop_type: file operation type |
| 207 | * @buf: buffer for data associated with the callback | 172 | * @buf: buffer for data associated with the callback |
| 208 | * @buf_idx: last read index | 173 | * @buf_idx: last read index |
| 209 | * @read_time: last read operation time stamp (iamthif) | 174 | * @fp: pointer to file structure |
| 210 | * @file_object: pointer to file structure | ||
| 211 | * @status: io status of the cb | 175 | * @status: io status of the cb |
| 212 | * @internal: communication between driver and FW flag | 176 | * @internal: communication between driver and FW flag |
| 213 | * @completed: the transfer or reception has completed | 177 | * @completed: the transfer or reception has completed |
| @@ -217,9 +181,8 @@ struct mei_cl_cb { | |||
| 217 | struct mei_cl *cl; | 181 | struct mei_cl *cl; |
| 218 | enum mei_cb_file_ops fop_type; | 182 | enum mei_cb_file_ops fop_type; |
| 219 | struct mei_msg_data buf; | 183 | struct mei_msg_data buf; |
| 220 | unsigned long buf_idx; | 184 | size_t buf_idx; |
| 221 | unsigned long read_time; | 185 | const struct file *fp; |
| 222 | struct file *file_object; | ||
| 223 | int status; | 186 | int status; |
| 224 | u32 internal:1; | 187 | u32 internal:1; |
| 225 | u32 completed:1; | 188 | u32 completed:1; |
| @@ -341,12 +304,13 @@ struct mei_hw_ops { | |||
| 341 | 304 | ||
| 342 | /* MEI bus API*/ | 305 | /* MEI bus API*/ |
| 343 | void mei_cl_bus_rescan(struct mei_device *bus); | 306 | void mei_cl_bus_rescan(struct mei_device *bus); |
| 307 | void mei_cl_bus_rescan_work(struct work_struct *work); | ||
| 344 | void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); | 308 | void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); |
| 345 | ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, | 309 | ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, |
| 346 | bool blocking); | 310 | bool blocking); |
| 347 | ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); | 311 | ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length); |
| 348 | void mei_cl_bus_rx_event(struct mei_cl *cl); | 312 | bool mei_cl_bus_rx_event(struct mei_cl *cl); |
| 349 | void mei_cl_bus_notify_event(struct mei_cl *cl); | 313 | bool mei_cl_bus_notify_event(struct mei_cl *cl); |
| 350 | void mei_cl_bus_remove_devices(struct mei_device *bus); | 314 | void mei_cl_bus_remove_devices(struct mei_device *bus); |
| 351 | int mei_cl_bus_init(void); | 315 | int mei_cl_bus_init(void); |
| 352 | void mei_cl_bus_exit(void); | 316 | void mei_cl_bus_exit(void); |
| @@ -404,7 +368,6 @@ const char *mei_pg_state_str(enum mei_pg_state state); | |||
| 404 | * @wait_hw_ready : wait queue for receive HW ready message form FW | 368 | * @wait_hw_ready : wait queue for receive HW ready message form FW |
| 405 | * @wait_pg : wait queue for receive PG message from FW | 369 | * @wait_pg : wait queue for receive PG message from FW |
| 406 | * @wait_hbm_start : wait queue for receive HBM start message from FW | 370 | * @wait_hbm_start : wait queue for receive HBM start message from FW |
| 407 | * @wait_stop_wd : wait queue for receive WD stop message from FW | ||
| 408 | * | 371 | * |
| 409 | * @reset_count : number of consecutive resets | 372 | * @reset_count : number of consecutive resets |
| 410 | * @dev_state : device state | 373 | * @dev_state : device state |
| @@ -426,6 +389,8 @@ const char *mei_pg_state_str(enum mei_pg_state state); | |||
| 426 | * @hbm_f_dc_supported : hbm feature dynamic clients | 389 | * @hbm_f_dc_supported : hbm feature dynamic clients |
| 427 | * @hbm_f_dot_supported : hbm feature disconnect on timeout | 390 | * @hbm_f_dot_supported : hbm feature disconnect on timeout |
| 428 | * @hbm_f_ev_supported : hbm feature event notification | 391 | * @hbm_f_ev_supported : hbm feature event notification |
| 392 | * @hbm_f_fa_supported : hbm feature fixed address client | ||
| 393 | * @hbm_f_ie_supported : hbm feature immediate reply to enum request | ||
| 429 | * | 394 | * |
| 430 | * @me_clients_rwsem: rw lock over me_clients list | 395 | * @me_clients_rwsem: rw lock over me_clients list |
| 431 | * @me_clients : list of FW clients | 396 | * @me_clients : list of FW clients |
| @@ -434,26 +399,19 @@ const char *mei_pg_state_str(enum mei_pg_state state); | |||
| 434 | * @me_client_index : last FW client index in enumeration | 399 | * @me_client_index : last FW client index in enumeration |
| 435 | * | 400 | * |
| 436 | * @allow_fixed_address: allow user space to connect a fixed client | 401 | * @allow_fixed_address: allow user space to connect a fixed client |
| 437 | * | 402 | * @override_fixed_address: force allow fixed address behavior |
| 438 | * @wd_cl : watchdog client | ||
| 439 | * @wd_state : watchdog client state | ||
| 440 | * @wd_pending : watchdog command is pending | ||
| 441 | * @wd_timeout : watchdog expiration timeout | ||
| 442 | * @wd_data : watchdog message buffer | ||
| 443 | * | 403 | * |
| 444 | * @amthif_cmd_list : amthif list for cmd waiting | 404 | * @amthif_cmd_list : amthif list for cmd waiting |
| 445 | * @amthif_rd_complete_list : amthif list for reading completed cmd data | 405 | * @iamthif_fp : file for current amthif operation |
| 446 | * @iamthif_file_object : file for current amthif operation | ||
| 447 | * @iamthif_cl : amthif host client | 406 | * @iamthif_cl : amthif host client |
| 448 | * @iamthif_current_cb : amthif current operation callback | 407 | * @iamthif_current_cb : amthif current operation callback |
| 449 | * @iamthif_open_count : number of opened amthif connections | 408 | * @iamthif_open_count : number of opened amthif connections |
| 450 | * @iamthif_timer : time stamp of current amthif command completion | ||
| 451 | * @iamthif_stall_timer : timer to detect amthif hang | 409 | * @iamthif_stall_timer : timer to detect amthif hang |
| 452 | * @iamthif_state : amthif processor state | 410 | * @iamthif_state : amthif processor state |
| 453 | * @iamthif_canceled : current amthif command is canceled | 411 | * @iamthif_canceled : current amthif command is canceled |
| 454 | * | 412 | * |
| 455 | * @init_work : work item for the device init | ||
| 456 | * @reset_work : work item for the device reset | 413 | * @reset_work : work item for the device reset |
| 414 | * @bus_rescan_work : work item for the bus rescan | ||
| 457 | * | 415 | * |
| 458 | * @device_list : mei client bus list | 416 | * @device_list : mei client bus list |
| 459 | * @cl_bus_lock : client bus list lock | 417 | * @cl_bus_lock : client bus list lock |
| @@ -486,7 +444,6 @@ struct mei_device { | |||
| 486 | wait_queue_head_t wait_hw_ready; | 444 | wait_queue_head_t wait_hw_ready; |
| 487 | wait_queue_head_t wait_pg; | 445 | wait_queue_head_t wait_pg; |
| 488 | wait_queue_head_t wait_hbm_start; | 446 | wait_queue_head_t wait_hbm_start; |
| 489 | wait_queue_head_t wait_stop_wd; | ||
| 490 | 447 | ||
| 491 | /* | 448 | /* |
| 492 | * mei device states | 449 | * mei device states |
| @@ -522,6 +479,8 @@ struct mei_device { | |||
| 522 | unsigned int hbm_f_dc_supported:1; | 479 | unsigned int hbm_f_dc_supported:1; |
| 523 | unsigned int hbm_f_dot_supported:1; | 480 | unsigned int hbm_f_dot_supported:1; |
| 524 | unsigned int hbm_f_ev_supported:1; | 481 | unsigned int hbm_f_ev_supported:1; |
| 482 | unsigned int hbm_f_fa_supported:1; | ||
| 483 | unsigned int hbm_f_ie_supported:1; | ||
| 525 | 484 | ||
| 526 | struct rw_semaphore me_clients_rwsem; | 485 | struct rw_semaphore me_clients_rwsem; |
| 527 | struct list_head me_clients; | 486 | struct list_head me_clients; |
| @@ -530,29 +489,21 @@ struct mei_device { | |||
| 530 | unsigned long me_client_index; | 489 | unsigned long me_client_index; |
| 531 | 490 | ||
| 532 | bool allow_fixed_address; | 491 | bool allow_fixed_address; |
| 533 | 492 | bool override_fixed_address; | |
| 534 | struct mei_cl wd_cl; | ||
| 535 | enum mei_wd_states wd_state; | ||
| 536 | bool wd_pending; | ||
| 537 | u16 wd_timeout; | ||
| 538 | unsigned char wd_data[MEI_WD_START_MSG_SIZE]; | ||
| 539 | |||
| 540 | 493 | ||
| 541 | /* amthif list for cmd waiting */ | 494 | /* amthif list for cmd waiting */ |
| 542 | struct mei_cl_cb amthif_cmd_list; | 495 | struct mei_cl_cb amthif_cmd_list; |
| 543 | /* driver managed amthif list for reading completed amthif cmd data */ | 496 | /* driver managed amthif list for reading completed amthif cmd data */ |
| 544 | struct mei_cl_cb amthif_rd_complete_list; | 497 | const struct file *iamthif_fp; |
| 545 | struct file *iamthif_file_object; | ||
| 546 | struct mei_cl iamthif_cl; | 498 | struct mei_cl iamthif_cl; |
| 547 | struct mei_cl_cb *iamthif_current_cb; | 499 | struct mei_cl_cb *iamthif_current_cb; |
| 548 | long iamthif_open_count; | 500 | long iamthif_open_count; |
| 549 | unsigned long iamthif_timer; | ||
| 550 | u32 iamthif_stall_timer; | 501 | u32 iamthif_stall_timer; |
| 551 | enum iamthif_states iamthif_state; | 502 | enum iamthif_states iamthif_state; |
| 552 | bool iamthif_canceled; | 503 | bool iamthif_canceled; |
| 553 | 504 | ||
| 554 | struct work_struct init_work; | ||
| 555 | struct work_struct reset_work; | 505 | struct work_struct reset_work; |
| 506 | struct work_struct bus_rescan_work; | ||
| 556 | 507 | ||
| 557 | /* List of bus devices */ | 508 | /* List of bus devices */ |
| 558 | struct list_head device_list; | 509 | struct list_head device_list; |
| @@ -635,47 +586,18 @@ unsigned int mei_amthif_poll(struct mei_device *dev, | |||
| 635 | 586 | ||
| 636 | int mei_amthif_release(struct mei_device *dev, struct file *file); | 587 | int mei_amthif_release(struct mei_device *dev, struct file *file); |
| 637 | 588 | ||
| 638 | struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev, | ||
| 639 | struct file *file); | ||
| 640 | |||
| 641 | int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb); | 589 | int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb); |
| 642 | int mei_amthif_run_next_cmd(struct mei_device *dev); | 590 | int mei_amthif_run_next_cmd(struct mei_device *dev); |
| 643 | int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, | 591 | int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, |
| 644 | struct mei_cl_cb *cmpl_list); | 592 | struct mei_cl_cb *cmpl_list); |
| 645 | 593 | ||
| 646 | void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb); | 594 | void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb); |
| 647 | int mei_amthif_irq_read_msg(struct mei_cl *cl, | 595 | int mei_amthif_irq_read_msg(struct mei_cl *cl, |
| 648 | struct mei_msg_hdr *mei_hdr, | 596 | struct mei_msg_hdr *mei_hdr, |
| 649 | struct mei_cl_cb *complete_list); | 597 | struct mei_cl_cb *complete_list); |
| 650 | int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); | 598 | int mei_amthif_irq_read(struct mei_device *dev, s32 *slots); |
| 651 | 599 | ||
| 652 | /* | 600 | /* |
| 653 | * NFC functions | ||
| 654 | */ | ||
| 655 | int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl); | ||
| 656 | void mei_nfc_host_exit(struct mei_device *dev); | ||
| 657 | |||
| 658 | /* | ||
| 659 | * NFC Client UUID | ||
| 660 | */ | ||
| 661 | extern const uuid_le mei_nfc_guid; | ||
| 662 | |||
| 663 | int mei_wd_send(struct mei_device *dev); | ||
| 664 | int mei_wd_stop(struct mei_device *dev); | ||
| 665 | int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl); | ||
| 666 | /* | ||
| 667 | * mei_watchdog_register - Registering watchdog interface | ||
| 668 | * once we got connection to the WD Client | ||
| 669 | * @dev: mei device | ||
| 670 | */ | ||
| 671 | int mei_watchdog_register(struct mei_device *dev); | ||
| 672 | /* | ||
| 673 | * mei_watchdog_unregister - Unregistering watchdog interface | ||
| 674 | * @dev: mei device | ||
| 675 | */ | ||
| 676 | void mei_watchdog_unregister(struct mei_device *dev); | ||
| 677 | |||
| 678 | /* | ||
| 679 | * Register Access Function | 601 | * Register Access Function |
| 680 | */ | 602 | */ |
| 681 | 603 | ||
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 75fc9c688df8..996344f6c32d 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
| @@ -210,7 +210,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 210 | 210 | ||
| 211 | err = mei_register(dev, &pdev->dev); | 211 | err = mei_register(dev, &pdev->dev); |
| 212 | if (err) | 212 | if (err) |
| 213 | goto release_irq; | 213 | goto stop; |
| 214 | 214 | ||
| 215 | pci_set_drvdata(pdev, dev); | 215 | pci_set_drvdata(pdev, dev); |
| 216 | 216 | ||
| @@ -231,6 +231,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 231 | 231 | ||
| 232 | return 0; | 232 | return 0; |
| 233 | 233 | ||
| 234 | stop: | ||
| 235 | mei_stop(dev); | ||
| 234 | release_irq: | 236 | release_irq: |
| 235 | mei_cancel_work(dev); | 237 | mei_cancel_work(dev); |
| 236 | mei_disable_interrupts(dev); | 238 | mei_disable_interrupts(dev); |
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index 71f8a7475717..30cc30683c07 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c | |||
| @@ -154,7 +154,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 154 | 154 | ||
| 155 | err = mei_register(dev, &pdev->dev); | 155 | err = mei_register(dev, &pdev->dev); |
| 156 | if (err) | 156 | if (err) |
| 157 | goto release_irq; | 157 | goto stop; |
| 158 | 158 | ||
| 159 | pci_set_drvdata(pdev, dev); | 159 | pci_set_drvdata(pdev, dev); |
| 160 | 160 | ||
| @@ -170,6 +170,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 170 | 170 | ||
| 171 | return 0; | 171 | return 0; |
| 172 | 172 | ||
| 173 | stop: | ||
| 174 | mei_stop(dev); | ||
| 173 | release_irq: | 175 | release_irq: |
| 174 | 176 | ||
| 175 | mei_cancel_work(dev); | 177 | mei_cancel_work(dev); |
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c deleted file mode 100644 index b346638833b0..000000000000 --- a/drivers/misc/mei/wd.c +++ /dev/null | |||
| @@ -1,391 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * | ||
| 3 | * Intel Management Engine Interface (Intel MEI) Linux driver | ||
| 4 | * Copyright (c) 2003-2012, Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | */ | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/moduleparam.h> | ||
| 19 | #include <linux/device.h> | ||
| 20 | #include <linux/sched.h> | ||
| 21 | #include <linux/watchdog.h> | ||
| 22 | |||
| 23 | #include <linux/mei.h> | ||
| 24 | |||
| 25 | #include "mei_dev.h" | ||
| 26 | #include "hbm.h" | ||
| 27 | #include "client.h" | ||
| 28 | |||
| 29 | static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 }; | ||
| 30 | static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 }; | ||
| 31 | |||
| 32 | /* | ||
| 33 | * AMT Watchdog Device | ||
| 34 | */ | ||
| 35 | #define INTEL_AMT_WATCHDOG_ID "INTCAMT" | ||
| 36 | |||
| 37 | /* UUIDs for AMT F/W clients */ | ||
| 38 | const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89, | ||
| 39 | 0x9D, 0xA9, 0x15, 0x14, 0xCB, | ||
| 40 | 0x32, 0xAB); | ||
| 41 | |||
| 42 | static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout) | ||
| 43 | { | ||
| 44 | dev_dbg(dev->dev, "wd: set timeout=%d.\n", timeout); | ||
| 45 | memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE); | ||
| 46 | memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16)); | ||
| 47 | } | ||
| 48 | |||
| 49 | /** | ||
| 50 | * mei_wd_host_init - connect to the watchdog client | ||
| 51 | * | ||
| 52 | * @dev: the device structure | ||
| 53 | * @me_cl: me client | ||
| 54 | * | ||
| 55 | * Return: -ENOTTY if wd client cannot be found | ||
| 56 | * -EIO if write has failed | ||
| 57 | * 0 on success | ||
| 58 | */ | ||
| 59 | int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl) | ||
| 60 | { | ||
| 61 | struct mei_cl *cl = &dev->wd_cl; | ||
| 62 | int ret; | ||
| 63 | |||
| 64 | mei_cl_init(cl, dev); | ||
| 65 | |||
| 66 | dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT; | ||
| 67 | dev->wd_state = MEI_WD_IDLE; | ||
| 68 | |||
| 69 | ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID); | ||
| 70 | if (ret < 0) { | ||
| 71 | dev_info(dev->dev, "wd: failed link client\n"); | ||
| 72 | return ret; | ||
| 73 | } | ||
| 74 | |||
| 75 | ret = mei_cl_connect(cl, me_cl, NULL); | ||
| 76 | if (ret) { | ||
| 77 | dev_err(dev->dev, "wd: failed to connect = %d\n", ret); | ||
| 78 | mei_cl_unlink(cl); | ||
| 79 | return ret; | ||
| 80 | } | ||
| 81 | |||
| 82 | ret = mei_watchdog_register(dev); | ||
| 83 | if (ret) { | ||
| 84 | mei_cl_disconnect(cl); | ||
| 85 | mei_cl_unlink(cl); | ||
| 86 | } | ||
| 87 | return ret; | ||
| 88 | } | ||
| 89 | |||
| 90 | /** | ||
| 91 | * mei_wd_send - sends watch dog message to fw. | ||
| 92 | * | ||
| 93 | * @dev: the device structure | ||
| 94 | * | ||
| 95 | * Return: 0 if success, | ||
| 96 | * -EIO when message send fails | ||
| 97 | * -EINVAL when invalid message is to be sent | ||
| 98 | * -ENODEV on flow control failure | ||
| 99 | */ | ||
| 100 | int mei_wd_send(struct mei_device *dev) | ||
| 101 | { | ||
| 102 | struct mei_cl *cl = &dev->wd_cl; | ||
| 103 | struct mei_msg_hdr hdr; | ||
| 104 | int ret; | ||
| 105 | |||
| 106 | hdr.host_addr = cl->host_client_id; | ||
| 107 | hdr.me_addr = mei_cl_me_id(cl); | ||
| 108 | hdr.msg_complete = 1; | ||
| 109 | hdr.reserved = 0; | ||
| 110 | hdr.internal = 0; | ||
| 111 | |||
| 112 | if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) | ||
| 113 | hdr.length = MEI_WD_START_MSG_SIZE; | ||
| 114 | else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE)) | ||
| 115 | hdr.length = MEI_WD_STOP_MSG_SIZE; | ||
| 116 | else { | ||
| 117 | dev_err(dev->dev, "wd: invalid message is to be sent, aborting\n"); | ||
| 118 | return -EINVAL; | ||
| 119 | } | ||
| 120 | |||
| 121 | ret = mei_write_message(dev, &hdr, dev->wd_data); | ||
| 122 | if (ret) { | ||
| 123 | dev_err(dev->dev, "wd: write message failed\n"); | ||
| 124 | return ret; | ||
| 125 | } | ||
| 126 | |||
| 127 | ret = mei_cl_flow_ctrl_reduce(cl); | ||
| 128 | if (ret) { | ||
| 129 | dev_err(dev->dev, "wd: flow_ctrl_reduce failed.\n"); | ||
| 130 | return ret; | ||
| 131 | } | ||
| 132 | |||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | /** | ||
| 137 | * mei_wd_stop - sends watchdog stop message to fw. | ||
| 138 | * | ||
| 139 | * @dev: the device structure | ||
| 140 | * | ||
| 141 | * Return: 0 if success | ||
| 142 | * on error: | ||
| 143 | * -EIO when message send fails | ||
| 144 | * -EINVAL when invalid message is to be sent | ||
| 145 | * -ETIME on message timeout | ||
| 146 | */ | ||
| 147 | int mei_wd_stop(struct mei_device *dev) | ||
| 148 | { | ||
| 149 | struct mei_cl *cl = &dev->wd_cl; | ||
| 150 | int ret; | ||
| 151 | |||
| 152 | if (!mei_cl_is_connected(cl) || | ||
| 153 | dev->wd_state != MEI_WD_RUNNING) | ||
| 154 | return 0; | ||
| 155 | |||
| 156 | memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_STOP_MSG_SIZE); | ||
| 157 | |||
| 158 | dev->wd_state = MEI_WD_STOPPING; | ||
| 159 | |||
| 160 | ret = mei_cl_flow_ctrl_creds(cl); | ||
| 161 | if (ret < 0) | ||
| 162 | goto err; | ||
| 163 | |||
| 164 | if (ret && mei_hbuf_acquire(dev)) { | ||
| 165 | ret = mei_wd_send(dev); | ||
| 166 | if (ret) | ||
| 167 | goto err; | ||
| 168 | dev->wd_pending = false; | ||
| 169 | } else { | ||
| 170 | dev->wd_pending = true; | ||
| 171 | } | ||
| 172 | |||
| 173 | mutex_unlock(&dev->device_lock); | ||
| 174 | |||
| 175 | ret = wait_event_timeout(dev->wait_stop_wd, | ||
| 176 | dev->wd_state == MEI_WD_IDLE, | ||
| 177 | msecs_to_jiffies(MEI_WD_STOP_TIMEOUT)); | ||
| 178 | mutex_lock(&dev->device_lock); | ||
| 179 | if (dev->wd_state != MEI_WD_IDLE) { | ||
| 180 | /* timeout */ | ||
| 181 | ret = -ETIME; | ||
| 182 | dev_warn(dev->dev, "wd: stop failed to complete ret=%d\n", ret); | ||
| 183 | goto err; | ||
| 184 | } | ||
| 185 | dev_dbg(dev->dev, "wd: stop completed after %u msec\n", | ||
| 186 | MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret)); | ||
| 187 | return 0; | ||
| 188 | err: | ||
| 189 | return ret; | ||
| 190 | } | ||
| 191 | |||
| 192 | /** | ||
| 193 | * mei_wd_ops_start - wd start command from the watchdog core. | ||
| 194 | * | ||
| 195 | * @wd_dev: watchdog device struct | ||
| 196 | * | ||
| 197 | * Return: 0 if success, negative errno code for failure | ||
| 198 | */ | ||
| 199 | static int mei_wd_ops_start(struct watchdog_device *wd_dev) | ||
| 200 | { | ||
| 201 | struct mei_device *dev; | ||
| 202 | struct mei_cl *cl; | ||
| 203 | int err = -ENODEV; | ||
| 204 | |||
| 205 | dev = watchdog_get_drvdata(wd_dev); | ||
| 206 | if (!dev) | ||
| 207 | return -ENODEV; | ||
| 208 | |||
| 209 | cl = &dev->wd_cl; | ||
| 210 | |||
| 211 | mutex_lock(&dev->device_lock); | ||
| 212 | |||
| 213 | if (dev->dev_state != MEI_DEV_ENABLED) { | ||
| 214 | dev_dbg(dev->dev, "wd: dev_state != MEI_DEV_ENABLED dev_state = %s\n", | ||
| 215 | mei_dev_state_str(dev->dev_state)); | ||
| 216 | goto end_unlock; | ||
| 217 | } | ||
| 218 | |||
| 219 | if (!mei_cl_is_connected(cl)) { | ||
| 220 | cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n"); | ||
| 221 | goto end_unlock; | ||
| 222 | } | ||
| 223 | |||
| 224 | mei_wd_set_start_timeout(dev, dev->wd_timeout); | ||
| 225 | |||
| 226 | err = 0; | ||
| 227 | end_unlock: | ||
| 228 | mutex_unlock(&dev->device_lock); | ||
| 229 | return err; | ||
| 230 | } | ||
| 231 | |||
| 232 | /** | ||
| 233 | * mei_wd_ops_stop - wd stop command from the watchdog core. | ||
| 234 | * | ||
| 235 | * @wd_dev: watchdog device struct | ||
| 236 | * | ||
| 237 | * Return: 0 if success, negative errno code for failure | ||
| 238 | */ | ||
| 239 | static int mei_wd_ops_stop(struct watchdog_device *wd_dev) | ||
| 240 | { | ||
| 241 | struct mei_device *dev; | ||
| 242 | |||
| 243 | dev = watchdog_get_drvdata(wd_dev); | ||
| 244 | if (!dev) | ||
| 245 | return -ENODEV; | ||
| 246 | |||
| 247 | mutex_lock(&dev->device_lock); | ||
| 248 | mei_wd_stop(dev); | ||
| 249 | mutex_unlock(&dev->device_lock); | ||
| 250 | |||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | /** | ||
| 255 | * mei_wd_ops_ping - wd ping command from the watchdog core. | ||
| 256 | * | ||
| 257 | * @wd_dev: watchdog device struct | ||
| 258 | * | ||
| 259 | * Return: 0 if success, negative errno code for failure | ||
| 260 | */ | ||
| 261 | static int mei_wd_ops_ping(struct watchdog_device *wd_dev) | ||
| 262 | { | ||
| 263 | struct mei_device *dev; | ||
| 264 | struct mei_cl *cl; | ||
| 265 | int ret; | ||
| 266 | |||
| 267 | dev = watchdog_get_drvdata(wd_dev); | ||
| 268 | if (!dev) | ||
| 269 | return -ENODEV; | ||
| 270 | |||
| 271 | cl = &dev->wd_cl; | ||
| 272 | |||
| 273 | mutex_lock(&dev->device_lock); | ||
| 274 | |||
| 275 | if (!mei_cl_is_connected(cl)) { | ||
| 276 | cl_err(dev, cl, "wd: not connected.\n"); | ||
| 277 | ret = -ENODEV; | ||
| 278 | goto end; | ||
| 279 | } | ||
| 280 | |||
| 281 | dev->wd_state = MEI_WD_RUNNING; | ||
| 282 | |||
| 283 | ret = mei_cl_flow_ctrl_creds(cl); | ||
| 284 | if (ret < 0) | ||
| 285 | goto end; | ||
| 286 | |||
| 287 | /* Check if we can send the ping to HW*/ | ||
| 288 | if (ret && mei_hbuf_acquire(dev)) { | ||
| 289 | dev_dbg(dev->dev, "wd: sending ping\n"); | ||
| 290 | |||
| 291 | ret = mei_wd_send(dev); | ||
| 292 | if (ret) | ||
| 293 | goto end; | ||
| 294 | dev->wd_pending = false; | ||
| 295 | } else { | ||
| 296 | dev->wd_pending = true; | ||
| 297 | } | ||
| 298 | |||
| 299 | end: | ||
| 300 | mutex_unlock(&dev->device_lock); | ||
| 301 | return ret; | ||
| 302 | } | ||
| 303 | |||
| 304 | /** | ||
| 305 | * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core. | ||
| 306 | * | ||
| 307 | * @wd_dev: watchdog device struct | ||
| 308 | * @timeout: timeout value to set | ||
| 309 | * | ||
| 310 | * Return: 0 if success, negative errno code for failure | ||
| 311 | */ | ||
| 312 | static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, | ||
| 313 | unsigned int timeout) | ||
| 314 | { | ||
| 315 | struct mei_device *dev; | ||
| 316 | |||
| 317 | dev = watchdog_get_drvdata(wd_dev); | ||
| 318 | if (!dev) | ||
| 319 | return -ENODEV; | ||
| 320 | |||
| 321 | /* Check Timeout value */ | ||
| 322 | if (timeout < MEI_WD_MIN_TIMEOUT || timeout > MEI_WD_MAX_TIMEOUT) | ||
| 323 | return -EINVAL; | ||
| 324 | |||
| 325 | mutex_lock(&dev->device_lock); | ||
| 326 | |||
| 327 | dev->wd_timeout = timeout; | ||
| 328 | wd_dev->timeout = timeout; | ||
| 329 | mei_wd_set_start_timeout(dev, dev->wd_timeout); | ||
| 330 | |||
| 331 | mutex_unlock(&dev->device_lock); | ||
| 332 | |||
| 333 | return 0; | ||
| 334 | } | ||
| 335 | |||
| 336 | /* | ||
| 337 | * Watchdog Device structs | ||
| 338 | */ | ||
| 339 | static const struct watchdog_ops wd_ops = { | ||
| 340 | .owner = THIS_MODULE, | ||
| 341 | .start = mei_wd_ops_start, | ||
| 342 | .stop = mei_wd_ops_stop, | ||
| 343 | .ping = mei_wd_ops_ping, | ||
| 344 | .set_timeout = mei_wd_ops_set_timeout, | ||
| 345 | }; | ||
| 346 | static const struct watchdog_info wd_info = { | ||
| 347 | .identity = INTEL_AMT_WATCHDOG_ID, | ||
| 348 | .options = WDIOF_KEEPALIVEPING | | ||
| 349 | WDIOF_SETTIMEOUT | | ||
| 350 | WDIOF_ALARMONLY, | ||
| 351 | }; | ||
| 352 | |||
| 353 | static struct watchdog_device amt_wd_dev = { | ||
| 354 | .info = &wd_info, | ||
| 355 | .ops = &wd_ops, | ||
| 356 | .timeout = MEI_WD_DEFAULT_TIMEOUT, | ||
| 357 | .min_timeout = MEI_WD_MIN_TIMEOUT, | ||
| 358 | .max_timeout = MEI_WD_MAX_TIMEOUT, | ||
| 359 | }; | ||
| 360 | |||
| 361 | |||
| 362 | int mei_watchdog_register(struct mei_device *dev) | ||
| 363 | { | ||
| 364 | |||
| 365 | int ret; | ||
| 366 | |||
| 367 | amt_wd_dev.parent = dev->dev; | ||
| 368 | /* unlock to perserve correct locking order */ | ||
| 369 | mutex_unlock(&dev->device_lock); | ||
| 370 | ret = watchdog_register_device(&amt_wd_dev); | ||
| 371 | mutex_lock(&dev->device_lock); | ||
| 372 | if (ret) { | ||
| 373 | dev_err(dev->dev, "wd: unable to register watchdog device = %d.\n", | ||
| 374 | ret); | ||
| 375 | return ret; | ||
| 376 | } | ||
| 377 | |||
| 378 | dev_dbg(dev->dev, "wd: successfully register watchdog interface.\n"); | ||
| 379 | watchdog_set_drvdata(&amt_wd_dev, dev); | ||
| 380 | return 0; | ||
| 381 | } | ||
| 382 | |||
| 383 | void mei_watchdog_unregister(struct mei_device *dev) | ||
| 384 | { | ||
| 385 | if (watchdog_get_drvdata(&amt_wd_dev) == NULL) | ||
| 386 | return; | ||
| 387 | |||
| 388 | watchdog_set_drvdata(&amt_wd_dev, NULL); | ||
| 389 | watchdog_unregister_device(&amt_wd_dev); | ||
| 390 | } | ||
| 391 | |||
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig index 40677df7f996..2e4f3ba75c8e 100644 --- a/drivers/misc/mic/Kconfig +++ b/drivers/misc/mic/Kconfig | |||
| @@ -32,12 +32,29 @@ config SCIF_BUS | |||
| 32 | OS and tools for MIC to use with this driver are available from | 32 | OS and tools for MIC to use with this driver are available from |
| 33 | <http://software.intel.com/en-us/mic-developer>. | 33 | <http://software.intel.com/en-us/mic-developer>. |
| 34 | 34 | ||
| 35 | comment "VOP Bus Driver" | ||
| 36 | |||
| 37 | config VOP_BUS | ||
| 38 | tristate "VOP Bus Driver" | ||
| 39 | depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS | ||
| 40 | help | ||
| 41 | This option is selected by any driver which registers a | ||
| 42 | device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST | ||
| 43 | and CONFIG_INTEL_MIC_CARD. | ||
| 44 | |||
| 45 | If you are building a host/card kernel with an Intel MIC device | ||
| 46 | then say M (recommended) or Y, else say N. If unsure say N. | ||
| 47 | |||
| 48 | More information about the Intel MIC family as well as the Linux | ||
| 49 | OS and tools for MIC to use with this driver are available from | ||
| 50 | <http://software.intel.com/en-us/mic-developer>. | ||
| 51 | |||
| 35 | comment "Intel MIC Host Driver" | 52 | comment "Intel MIC Host Driver" |
| 36 | 53 | ||
| 37 | config INTEL_MIC_HOST | 54 | config INTEL_MIC_HOST |
| 38 | tristate "Intel MIC Host Driver" | 55 | tristate "Intel MIC Host Driver" |
| 39 | depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM | 56 | depends on 64BIT && PCI && X86 |
| 40 | select VHOST_RING | 57 | depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS |
| 41 | help | 58 | help |
| 42 | This enables Host Driver support for the Intel Many Integrated | 59 | This enables Host Driver support for the Intel Many Integrated |
| 43 | Core (MIC) family of PCIe form factor coprocessor devices that | 60 | Core (MIC) family of PCIe form factor coprocessor devices that |
| @@ -56,7 +73,8 @@ comment "Intel MIC Card Driver" | |||
| 56 | 73 | ||
| 57 | config INTEL_MIC_CARD | 74 | config INTEL_MIC_CARD |
| 58 | tristate "Intel MIC Card Driver" | 75 | tristate "Intel MIC Card Driver" |
| 59 | depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM | 76 | depends on 64BIT && X86 |
| 77 | depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS | ||
| 60 | select VIRTIO | 78 | select VIRTIO |
| 61 | help | 79 | help |
| 62 | This enables card driver support for the Intel Many Integrated | 80 | This enables card driver support for the Intel Many Integrated |
| @@ -107,3 +125,23 @@ config MIC_COSM | |||
| 107 | More information about the Intel MIC family as well as the Linux | 125 | More information about the Intel MIC family as well as the Linux |
| 108 | OS and tools for MIC to use with this driver are available from | 126 | OS and tools for MIC to use with this driver are available from |
| 109 | <http://software.intel.com/en-us/mic-developer>. | 127 | <http://software.intel.com/en-us/mic-developer>. |
| 128 | |||
| 129 | comment "VOP Driver" | ||
| 130 | |||
| 131 | config VOP | ||
| 132 | tristate "VOP Driver" | ||
| 133 | depends on 64BIT && PCI && X86 && VOP_BUS | ||
| 134 | select VHOST_RING | ||
| 135 | help | ||
| 136 | This enables VOP (Virtio over PCIe) Driver support for the Intel | ||
| 137 | Many Integrated Core (MIC) family of PCIe form factor coprocessor | ||
| 138 | devices. The VOP driver allows virtio drivers, e.g. net, console | ||
| 139 | and block drivers, on the card connect to user space virtio | ||
| 140 | devices on the host. | ||
| 141 | |||
| 142 | If you are building a host kernel with an Intel MIC device then | ||
| 143 | say M (recommended) or Y, else say N. If unsure say N. | ||
| 144 | |||
| 145 | More information about the Intel MIC family as well as the Linux | ||
| 146 | OS and tools for MIC to use with this driver are available from | ||
| 147 | <http://software.intel.com/en-us/mic-developer>. | ||
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile index e288a1106738..f2b1323ff96c 100644 --- a/drivers/misc/mic/Makefile +++ b/drivers/misc/mic/Makefile | |||
| @@ -8,3 +8,4 @@ obj-y += bus/ | |||
| 8 | obj-$(CONFIG_SCIF) += scif/ | 8 | obj-$(CONFIG_SCIF) += scif/ |
| 9 | obj-$(CONFIG_MIC_COSM) += cosm/ | 9 | obj-$(CONFIG_MIC_COSM) += cosm/ |
| 10 | obj-$(CONFIG_MIC_COSM) += cosm_client/ | 10 | obj-$(CONFIG_MIC_COSM) += cosm_client/ |
| 11 | obj-$(CONFIG_VOP) += vop/ | ||
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile index 761842b0d0bb..8758a7daa52c 100644 --- a/drivers/misc/mic/bus/Makefile +++ b/drivers/misc/mic/bus/Makefile | |||
| @@ -5,3 +5,4 @@ | |||
| 5 | obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o | 5 | obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o |
| 6 | obj-$(CONFIG_SCIF_BUS) += scif_bus.o | 6 | obj-$(CONFIG_SCIF_BUS) += scif_bus.o |
| 7 | obj-$(CONFIG_MIC_COSM) += cosm_bus.o | 7 | obj-$(CONFIG_MIC_COSM) += cosm_bus.o |
| 8 | obj-$(CONFIG_VOP_BUS) += vop_bus.o | ||
diff --git a/drivers/misc/mic/bus/cosm_bus.h b/drivers/misc/mic/bus/cosm_bus.h index f7c57f266916..8b6341855dc3 100644 --- a/drivers/misc/mic/bus/cosm_bus.h +++ b/drivers/misc/mic/bus/cosm_bus.h | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | * @attr_group: Pointer to list of sysfs attribute groups. | 30 | * @attr_group: Pointer to list of sysfs attribute groups. |
| 31 | * @sdev: Device for sysfs entries. | 31 | * @sdev: Device for sysfs entries. |
| 32 | * @state: MIC state. | 32 | * @state: MIC state. |
| 33 | * @prev_state: MIC state previous to MIC_RESETTING | ||
| 33 | * @shutdown_status: MIC status reported by card for shutdown/crashes. | 34 | * @shutdown_status: MIC status reported by card for shutdown/crashes. |
| 34 | * @shutdown_status_int: Internal shutdown status maintained by the driver | 35 | * @shutdown_status_int: Internal shutdown status maintained by the driver |
| 35 | * @cosm_mutex: Mutex for synchronizing access to data structures. | 36 | * @cosm_mutex: Mutex for synchronizing access to data structures. |
| @@ -55,6 +56,7 @@ struct cosm_device { | |||
| 55 | const struct attribute_group **attr_group; | 56 | const struct attribute_group **attr_group; |
| 56 | struct device *sdev; | 57 | struct device *sdev; |
| 57 | u8 state; | 58 | u8 state; |
| 59 | u8 prev_state; | ||
| 58 | u8 shutdown_status; | 60 | u8 shutdown_status; |
| 59 | u8 shutdown_status_int; | 61 | u8 shutdown_status_int; |
| 60 | struct mutex cosm_mutex; | 62 | struct mutex cosm_mutex; |
diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c new file mode 100644 index 000000000000..303da222f5b6 --- /dev/null +++ b/drivers/misc/mic/bus/vop_bus.c | |||
| @@ -0,0 +1,203 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2016 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Intel Virtio Over PCIe (VOP) Bus driver. | ||
| 19 | */ | ||
| 20 | #include <linux/slab.h> | ||
| 21 | #include <linux/module.h> | ||
| 22 | #include <linux/idr.h> | ||
| 23 | #include <linux/dma-mapping.h> | ||
| 24 | |||
| 25 | #include "vop_bus.h" | ||
| 26 | |||
| 27 | static ssize_t device_show(struct device *d, | ||
| 28 | struct device_attribute *attr, char *buf) | ||
| 29 | { | ||
| 30 | struct vop_device *dev = dev_to_vop(d); | ||
| 31 | |||
| 32 | return sprintf(buf, "0x%04x\n", dev->id.device); | ||
| 33 | } | ||
| 34 | static DEVICE_ATTR_RO(device); | ||
| 35 | |||
| 36 | static ssize_t vendor_show(struct device *d, | ||
| 37 | struct device_attribute *attr, char *buf) | ||
| 38 | { | ||
| 39 | struct vop_device *dev = dev_to_vop(d); | ||
| 40 | |||
| 41 | return sprintf(buf, "0x%04x\n", dev->id.vendor); | ||
| 42 | } | ||
| 43 | static DEVICE_ATTR_RO(vendor); | ||
| 44 | |||
| 45 | static ssize_t modalias_show(struct device *d, | ||
| 46 | struct device_attribute *attr, char *buf) | ||
| 47 | { | ||
| 48 | struct vop_device *dev = dev_to_vop(d); | ||
| 49 | |||
| 50 | return sprintf(buf, "vop:d%08Xv%08X\n", | ||
| 51 | dev->id.device, dev->id.vendor); | ||
| 52 | } | ||
| 53 | static DEVICE_ATTR_RO(modalias); | ||
| 54 | |||
| 55 | static struct attribute *vop_dev_attrs[] = { | ||
| 56 | &dev_attr_device.attr, | ||
| 57 | &dev_attr_vendor.attr, | ||
| 58 | &dev_attr_modalias.attr, | ||
| 59 | NULL, | ||
| 60 | }; | ||
| 61 | ATTRIBUTE_GROUPS(vop_dev); | ||
| 62 | |||
| 63 | static inline int vop_id_match(const struct vop_device *dev, | ||
| 64 | const struct vop_device_id *id) | ||
| 65 | { | ||
| 66 | if (id->device != dev->id.device && id->device != VOP_DEV_ANY_ID) | ||
| 67 | return 0; | ||
| 68 | |||
| 69 | return id->vendor == VOP_DEV_ANY_ID || id->vendor == dev->id.vendor; | ||
| 70 | } | ||
| 71 | |||
| 72 | /* | ||
| 73 | * This looks through all the IDs a driver claims to support. If any of them | ||
| 74 | * match, we return 1 and the kernel will call vop_dev_probe(). | ||
| 75 | */ | ||
| 76 | static int vop_dev_match(struct device *dv, struct device_driver *dr) | ||
| 77 | { | ||
| 78 | unsigned int i; | ||
| 79 | struct vop_device *dev = dev_to_vop(dv); | ||
| 80 | const struct vop_device_id *ids; | ||
| 81 | |||
| 82 | ids = drv_to_vop(dr)->id_table; | ||
| 83 | for (i = 0; ids[i].device; i++) | ||
| 84 | if (vop_id_match(dev, &ids[i])) | ||
| 85 | return 1; | ||
| 86 | return 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | static int vop_uevent(struct device *dv, struct kobj_uevent_env *env) | ||
| 90 | { | ||
| 91 | struct vop_device *dev = dev_to_vop(dv); | ||
| 92 | |||
| 93 | return add_uevent_var(env, "MODALIAS=vop:d%08Xv%08X", | ||
| 94 | dev->id.device, dev->id.vendor); | ||
| 95 | } | ||
| 96 | |||
| 97 | static int vop_dev_probe(struct device *d) | ||
| 98 | { | ||
| 99 | struct vop_device *dev = dev_to_vop(d); | ||
| 100 | struct vop_driver *drv = drv_to_vop(dev->dev.driver); | ||
| 101 | |||
| 102 | return drv->probe(dev); | ||
| 103 | } | ||
| 104 | |||
| 105 | static int vop_dev_remove(struct device *d) | ||
| 106 | { | ||
| 107 | struct vop_device *dev = dev_to_vop(d); | ||
| 108 | struct vop_driver *drv = drv_to_vop(dev->dev.driver); | ||
| 109 | |||
| 110 | drv->remove(dev); | ||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | static struct bus_type vop_bus = { | ||
| 115 | .name = "vop_bus", | ||
| 116 | .match = vop_dev_match, | ||
| 117 | .dev_groups = vop_dev_groups, | ||
| 118 | .uevent = vop_uevent, | ||
| 119 | .probe = vop_dev_probe, | ||
| 120 | .remove = vop_dev_remove, | ||
| 121 | }; | ||
| 122 | |||
| 123 | int vop_register_driver(struct vop_driver *driver) | ||
| 124 | { | ||
| 125 | driver->driver.bus = &vop_bus; | ||
| 126 | return driver_register(&driver->driver); | ||
| 127 | } | ||
| 128 | EXPORT_SYMBOL_GPL(vop_register_driver); | ||
| 129 | |||
| 130 | void vop_unregister_driver(struct vop_driver *driver) | ||
| 131 | { | ||
| 132 | driver_unregister(&driver->driver); | ||
| 133 | } | ||
| 134 | EXPORT_SYMBOL_GPL(vop_unregister_driver); | ||
| 135 | |||
| 136 | static void vop_release_dev(struct device *d) | ||
| 137 | { | ||
| 138 | put_device(d); | ||
| 139 | } | ||
| 140 | |||
| 141 | struct vop_device * | ||
| 142 | vop_register_device(struct device *pdev, int id, | ||
| 143 | const struct dma_map_ops *dma_ops, | ||
| 144 | struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper, | ||
| 145 | struct dma_chan *chan) | ||
| 146 | { | ||
| 147 | int ret; | ||
| 148 | struct vop_device *vdev; | ||
| 149 | |||
| 150 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
| 151 | if (!vdev) | ||
| 152 | return ERR_PTR(-ENOMEM); | ||
| 153 | |||
| 154 | vdev->dev.parent = pdev; | ||
| 155 | vdev->id.device = id; | ||
| 156 | vdev->id.vendor = VOP_DEV_ANY_ID; | ||
| 157 | vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops; | ||
| 158 | vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask; | ||
| 159 | dma_set_mask(&vdev->dev, DMA_BIT_MASK(64)); | ||
| 160 | vdev->dev.release = vop_release_dev; | ||
| 161 | vdev->hw_ops = hw_ops; | ||
| 162 | vdev->dev.bus = &vop_bus; | ||
| 163 | vdev->dnode = dnode; | ||
| 164 | vdev->aper = aper; | ||
| 165 | vdev->dma_ch = chan; | ||
| 166 | vdev->index = dnode - 1; | ||
| 167 | dev_set_name(&vdev->dev, "vop-dev%u", vdev->index); | ||
| 168 | /* | ||
| 169 | * device_register() causes the bus infrastructure to look for a | ||
| 170 | * matching driver. | ||
| 171 | */ | ||
| 172 | ret = device_register(&vdev->dev); | ||
| 173 | if (ret) | ||
| 174 | goto free_vdev; | ||
| 175 | return vdev; | ||
| 176 | free_vdev: | ||
| 177 | kfree(vdev); | ||
| 178 | return ERR_PTR(ret); | ||
| 179 | } | ||
| 180 | EXPORT_SYMBOL_GPL(vop_register_device); | ||
| 181 | |||
| 182 | void vop_unregister_device(struct vop_device *dev) | ||
| 183 | { | ||
| 184 | device_unregister(&dev->dev); | ||
| 185 | } | ||
| 186 | EXPORT_SYMBOL_GPL(vop_unregister_device); | ||
| 187 | |||
| 188 | static int __init vop_init(void) | ||
| 189 | { | ||
| 190 | return bus_register(&vop_bus); | ||
| 191 | } | ||
| 192 | |||
| 193 | static void __exit vop_exit(void) | ||
| 194 | { | ||
| 195 | bus_unregister(&vop_bus); | ||
| 196 | } | ||
| 197 | |||
| 198 | core_initcall(vop_init); | ||
| 199 | module_exit(vop_exit); | ||
| 200 | |||
| 201 | MODULE_AUTHOR("Intel Corporation"); | ||
| 202 | MODULE_DESCRIPTION("Intel(R) VOP Bus driver"); | ||
| 203 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h new file mode 100644 index 000000000000..fff7a865d721 --- /dev/null +++ b/drivers/misc/mic/bus/vop_bus.h | |||
| @@ -0,0 +1,140 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2016 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Intel Virtio over PCIe Bus driver. | ||
| 19 | */ | ||
| 20 | #ifndef _VOP_BUS_H_ | ||
| 21 | #define _VOP_BUS_H_ | ||
| 22 | /* | ||
| 23 | * Everything a vop driver needs to work with any particular vop | ||
| 24 | * implementation. | ||
| 25 | */ | ||
| 26 | #include <linux/dmaengine.h> | ||
| 27 | #include <linux/interrupt.h> | ||
| 28 | |||
| 29 | #include "../common/mic_dev.h" | ||
| 30 | |||
| 31 | struct vop_device_id { | ||
| 32 | u32 device; | ||
| 33 | u32 vendor; | ||
| 34 | }; | ||
| 35 | |||
| 36 | #define VOP_DEV_TRNSP 1 | ||
| 37 | #define VOP_DEV_ANY_ID 0xffffffff | ||
| 38 | /* | ||
| 39 | * Size of the internal buffer used during DMA's as an intermediate buffer | ||
| 40 | * for copy to/from user. Must be an integral number of pages. | ||
| 41 | */ | ||
| 42 | #define VOP_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL) | ||
| 43 | |||
| 44 | /** | ||
| 45 | * vop_device - representation of a device using vop | ||
| 46 | * @hw_ops: the hardware ops supported by this device. | ||
| 47 | * @id: the device type identification (used to match it with a driver). | ||
| 48 | * @dev: underlying device. | ||
| 49 | * @dnode - The destination node which this device will communicate with. | ||
| 50 | * @aper: Aperture memory window | ||
| 51 | * @dma_ch - DMA channel | ||
| 52 | * @index: unique position on the vop bus | ||
| 53 | */ | ||
| 54 | struct vop_device { | ||
| 55 | struct vop_hw_ops *hw_ops; | ||
| 56 | struct vop_device_id id; | ||
| 57 | struct device dev; | ||
| 58 | u8 dnode; | ||
| 59 | struct mic_mw *aper; | ||
| 60 | struct dma_chan *dma_ch; | ||
| 61 | int index; | ||
| 62 | }; | ||
| 63 | |||
| 64 | /** | ||
| 65 | * vop_driver - operations for a vop I/O driver | ||
| 66 | * @driver: underlying device driver (populate name and owner). | ||
| 67 | * @id_table: the ids serviced by this driver. | ||
| 68 | * @probe: the function to call when a device is found. Returns 0 or -errno. | ||
| 69 | * @remove: the function to call when a device is removed. | ||
| 70 | */ | ||
| 71 | struct vop_driver { | ||
| 72 | struct device_driver driver; | ||
| 73 | const struct vop_device_id *id_table; | ||
| 74 | int (*probe)(struct vop_device *dev); | ||
| 75 | void (*remove)(struct vop_device *dev); | ||
| 76 | }; | ||
| 77 | |||
| 78 | /** | ||
| 79 | * vop_hw_ops - Hardware operations for accessing a VOP device on the VOP bus. | ||
| 80 | * | ||
| 81 | * @next_db: Obtain the next available doorbell. | ||
| 82 | * @request_irq: Request an interrupt on a particular doorbell. | ||
| 83 | * @free_irq: Free an interrupt requested previously. | ||
| 84 | * @ack_interrupt: acknowledge an interrupt in the ISR. | ||
| 85 | * @get_remote_dp: Get access to the virtio device page used by the remote | ||
| 86 | * node to add/remove/configure virtio devices. | ||
| 87 | * @get_dp: Get access to the virtio device page used by the self | ||
| 88 | * node to add/remove/configure virtio devices. | ||
| 89 | * @send_intr: Send an interrupt to the peer node on a specified doorbell. | ||
| 90 | * @ioremap: Map a buffer with the specified DMA address and length. | ||
| 91 | * @iounmap: Unmap a buffer previously mapped. | ||
| 92 | * @dma_filter: The DMA filter function to use for obtaining access to | ||
| 93 | * a DMA channel on the peer node. | ||
| 94 | */ | ||
| 95 | struct vop_hw_ops { | ||
| 96 | int (*next_db)(struct vop_device *vpdev); | ||
| 97 | struct mic_irq *(*request_irq)(struct vop_device *vpdev, | ||
| 98 | irqreturn_t (*func)(int irq, void *data), | ||
| 99 | const char *name, void *data, | ||
| 100 | int intr_src); | ||
| 101 | void (*free_irq)(struct vop_device *vpdev, | ||
| 102 | struct mic_irq *cookie, void *data); | ||
| 103 | void (*ack_interrupt)(struct vop_device *vpdev, int num); | ||
| 104 | void __iomem * (*get_remote_dp)(struct vop_device *vpdev); | ||
| 105 | void * (*get_dp)(struct vop_device *vpdev); | ||
| 106 | void (*send_intr)(struct vop_device *vpdev, int db); | ||
| 107 | void __iomem * (*ioremap)(struct vop_device *vpdev, | ||
| 108 | dma_addr_t pa, size_t len); | ||
| 109 | void (*iounmap)(struct vop_device *vpdev, void __iomem *va); | ||
| 110 | }; | ||
| 111 | |||
| 112 | struct vop_device * | ||
| 113 | vop_register_device(struct device *pdev, int id, | ||
| 114 | const struct dma_map_ops *dma_ops, | ||
| 115 | struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper, | ||
| 116 | struct dma_chan *chan); | ||
| 117 | void vop_unregister_device(struct vop_device *dev); | ||
| 118 | int vop_register_driver(struct vop_driver *drv); | ||
| 119 | void vop_unregister_driver(struct vop_driver *drv); | ||
| 120 | |||
| 121 | /* | ||
| 122 | * module_vop_driver() - Helper macro for drivers that don't do | ||
| 123 | * anything special in module init/exit. This eliminates a lot of | ||
| 124 | * boilerplate. Each module may only use this macro once, and | ||
| 125 | * calling it replaces module_init() and module_exit() | ||
| 126 | */ | ||
| 127 | #define module_vop_driver(__vop_driver) \ | ||
| 128 | module_driver(__vop_driver, vop_register_driver, \ | ||
| 129 | vop_unregister_driver) | ||
| 130 | |||
| 131 | static inline struct vop_device *dev_to_vop(struct device *dev) | ||
| 132 | { | ||
| 133 | return container_of(dev, struct vop_device, dev); | ||
| 134 | } | ||
| 135 | |||
| 136 | static inline struct vop_driver *drv_to_vop(struct device_driver *drv) | ||
| 137 | { | ||
| 138 | return container_of(drv, struct vop_driver, driver); | ||
| 139 | } | ||
| 140 | #endif /* _VOP_BUS_H */ | ||
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile index 69d58bef92ce..6e9675e12a09 100644 --- a/drivers/misc/mic/card/Makefile +++ b/drivers/misc/mic/card/Makefile | |||
| @@ -8,4 +8,3 @@ obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o | |||
| 8 | mic_card-y += mic_x100.o | 8 | mic_card-y += mic_x100.o |
| 9 | mic_card-y += mic_device.o | 9 | mic_card-y += mic_device.o |
| 10 | mic_card-y += mic_debugfs.o | 10 | mic_card-y += mic_debugfs.o |
| 11 | mic_card-y += mic_virtio.o | ||
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c index d0edaf7e0cd5..e749af48f736 100644 --- a/drivers/misc/mic/card/mic_device.c +++ b/drivers/misc/mic/card/mic_device.c | |||
| @@ -34,7 +34,6 @@ | |||
| 34 | #include <linux/mic_common.h> | 34 | #include <linux/mic_common.h> |
| 35 | #include "../common/mic_dev.h" | 35 | #include "../common/mic_dev.h" |
| 36 | #include "mic_device.h" | 36 | #include "mic_device.h" |
| 37 | #include "mic_virtio.h" | ||
| 38 | 37 | ||
| 39 | static struct mic_driver *g_drv; | 38 | static struct mic_driver *g_drv; |
| 40 | 39 | ||
| @@ -250,12 +249,82 @@ static struct scif_hw_ops scif_hw_ops = { | |||
| 250 | .iounmap = ___mic_iounmap, | 249 | .iounmap = ___mic_iounmap, |
| 251 | }; | 250 | }; |
| 252 | 251 | ||
| 252 | static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev) | ||
| 253 | { | ||
| 254 | return dev_get_drvdata(vpdev->dev.parent); | ||
| 255 | } | ||
| 256 | |||
| 257 | static struct mic_irq * | ||
| 258 | __mic_request_irq(struct vop_device *vpdev, | ||
| 259 | irqreturn_t (*func)(int irq, void *data), | ||
| 260 | const char *name, void *data, int intr_src) | ||
| 261 | { | ||
| 262 | return mic_request_card_irq(func, NULL, name, data, intr_src); | ||
| 263 | } | ||
| 264 | |||
| 265 | static void __mic_free_irq(struct vop_device *vpdev, | ||
| 266 | struct mic_irq *cookie, void *data) | ||
| 267 | { | ||
| 268 | return mic_free_card_irq(cookie, data); | ||
| 269 | } | ||
| 270 | |||
| 271 | static void __mic_ack_interrupt(struct vop_device *vpdev, int num) | ||
| 272 | { | ||
| 273 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
| 274 | |||
| 275 | mic_ack_interrupt(&mdrv->mdev); | ||
| 276 | } | ||
| 277 | |||
| 278 | static int __mic_next_db(struct vop_device *vpdev) | ||
| 279 | { | ||
| 280 | return mic_next_card_db(); | ||
| 281 | } | ||
| 282 | |||
| 283 | static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev) | ||
| 284 | { | ||
| 285 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
| 286 | |||
| 287 | return mdrv->dp; | ||
| 288 | } | ||
| 289 | |||
| 290 | static void __mic_send_intr(struct vop_device *vpdev, int db) | ||
| 291 | { | ||
| 292 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
| 293 | |||
| 294 | mic_send_intr(&mdrv->mdev, db); | ||
| 295 | } | ||
| 296 | |||
| 297 | static void __iomem *__mic_ioremap(struct vop_device *vpdev, | ||
| 298 | dma_addr_t pa, size_t len) | ||
| 299 | { | ||
| 300 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
| 301 | |||
| 302 | return mic_card_map(&mdrv->mdev, pa, len); | ||
| 303 | } | ||
| 304 | |||
| 305 | static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va) | ||
| 306 | { | ||
| 307 | struct mic_driver *mdrv = vpdev_to_mdrv(vpdev); | ||
| 308 | |||
| 309 | mic_card_unmap(&mdrv->mdev, va); | ||
| 310 | } | ||
| 311 | |||
| 312 | static struct vop_hw_ops vop_hw_ops = { | ||
| 313 | .request_irq = __mic_request_irq, | ||
| 314 | .free_irq = __mic_free_irq, | ||
| 315 | .ack_interrupt = __mic_ack_interrupt, | ||
| 316 | .next_db = __mic_next_db, | ||
| 317 | .get_remote_dp = __mic_get_remote_dp, | ||
| 318 | .send_intr = __mic_send_intr, | ||
| 319 | .ioremap = __mic_ioremap, | ||
| 320 | .iounmap = __mic_iounmap, | ||
| 321 | }; | ||
| 322 | |||
| 253 | static int mic_request_dma_chans(struct mic_driver *mdrv) | 323 | static int mic_request_dma_chans(struct mic_driver *mdrv) |
| 254 | { | 324 | { |
| 255 | dma_cap_mask_t mask; | 325 | dma_cap_mask_t mask; |
| 256 | struct dma_chan *chan; | 326 | struct dma_chan *chan; |
| 257 | 327 | ||
| 258 | request_module("mic_x100_dma"); | ||
| 259 | dma_cap_zero(mask); | 328 | dma_cap_zero(mask); |
| 260 | dma_cap_set(DMA_MEMCPY, mask); | 329 | dma_cap_set(DMA_MEMCPY, mask); |
| 261 | 330 | ||
| @@ -309,9 +378,13 @@ int __init mic_driver_init(struct mic_driver *mdrv) | |||
| 309 | rc = -ENODEV; | 378 | rc = -ENODEV; |
| 310 | goto irq_uninit; | 379 | goto irq_uninit; |
| 311 | } | 380 | } |
| 312 | rc = mic_devices_init(mdrv); | 381 | mdrv->vpdev = vop_register_device(mdrv->dev, VOP_DEV_TRNSP, |
| 313 | if (rc) | 382 | NULL, &vop_hw_ops, 0, |
| 383 | NULL, mdrv->dma_ch[0]); | ||
| 384 | if (IS_ERR(mdrv->vpdev)) { | ||
| 385 | rc = PTR_ERR(mdrv->vpdev); | ||
| 314 | goto dma_free; | 386 | goto dma_free; |
| 387 | } | ||
| 315 | bootparam = mdrv->dp; | 388 | bootparam = mdrv->dp; |
| 316 | node_id = ioread8(&bootparam->node_id); | 389 | node_id = ioread8(&bootparam->node_id); |
| 317 | mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV, | 390 | mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV, |
| @@ -321,13 +394,13 @@ int __init mic_driver_init(struct mic_driver *mdrv) | |||
| 321 | mdrv->num_dma_ch, true); | 394 | mdrv->num_dma_ch, true); |
| 322 | if (IS_ERR(mdrv->scdev)) { | 395 | if (IS_ERR(mdrv->scdev)) { |
| 323 | rc = PTR_ERR(mdrv->scdev); | 396 | rc = PTR_ERR(mdrv->scdev); |
| 324 | goto device_uninit; | 397 | goto vop_remove; |
| 325 | } | 398 | } |
| 326 | mic_create_card_debug_dir(mdrv); | 399 | mic_create_card_debug_dir(mdrv); |
| 327 | done: | 400 | done: |
| 328 | return rc; | 401 | return rc; |
| 329 | device_uninit: | 402 | vop_remove: |
| 330 | mic_devices_uninit(mdrv); | 403 | vop_unregister_device(mdrv->vpdev); |
| 331 | dma_free: | 404 | dma_free: |
| 332 | mic_free_dma_chans(mdrv); | 405 | mic_free_dma_chans(mdrv); |
| 333 | irq_uninit: | 406 | irq_uninit: |
| @@ -348,7 +421,7 @@ void mic_driver_uninit(struct mic_driver *mdrv) | |||
| 348 | { | 421 | { |
| 349 | mic_delete_card_debug_dir(mdrv); | 422 | mic_delete_card_debug_dir(mdrv); |
| 350 | scif_unregister_device(mdrv->scdev); | 423 | scif_unregister_device(mdrv->scdev); |
| 351 | mic_devices_uninit(mdrv); | 424 | vop_unregister_device(mdrv->vpdev); |
| 352 | mic_free_dma_chans(mdrv); | 425 | mic_free_dma_chans(mdrv); |
| 353 | mic_uninit_irq(); | 426 | mic_uninit_irq(); |
| 354 | mic_dp_uninit(); | 427 | mic_dp_uninit(); |
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h index 1dbf83c41289..333dbed972f6 100644 --- a/drivers/misc/mic/card/mic_device.h +++ b/drivers/misc/mic/card/mic_device.h | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/interrupt.h> | 32 | #include <linux/interrupt.h> |
| 33 | #include <linux/mic_bus.h> | 33 | #include <linux/mic_bus.h> |
| 34 | #include "../bus/scif_bus.h" | 34 | #include "../bus/scif_bus.h" |
| 35 | #include "../bus/vop_bus.h" | ||
| 35 | 36 | ||
| 36 | /** | 37 | /** |
| 37 | * struct mic_intr_info - Contains h/w specific interrupt sources info | 38 | * struct mic_intr_info - Contains h/w specific interrupt sources info |
| @@ -76,6 +77,7 @@ struct mic_device { | |||
| 76 | * @dma_ch - Array of DMA channels | 77 | * @dma_ch - Array of DMA channels |
| 77 | * @num_dma_ch - Number of DMA channels available | 78 | * @num_dma_ch - Number of DMA channels available |
| 78 | * @scdev: SCIF device on the SCIF virtual bus. | 79 | * @scdev: SCIF device on the SCIF virtual bus. |
| 80 | * @vpdev: Virtio over PCIe device on the VOP virtual bus. | ||
| 79 | */ | 81 | */ |
| 80 | struct mic_driver { | 82 | struct mic_driver { |
| 81 | char name[20]; | 83 | char name[20]; |
| @@ -90,6 +92,7 @@ struct mic_driver { | |||
| 90 | struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; | 92 | struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; |
| 91 | int num_dma_ch; | 93 | int num_dma_ch; |
| 92 | struct scif_hw_dev *scdev; | 94 | struct scif_hw_dev *scdev; |
| 95 | struct vop_device *vpdev; | ||
| 93 | }; | 96 | }; |
| 94 | 97 | ||
| 95 | /** | 98 | /** |
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c deleted file mode 100644 index f6ed57d3125c..000000000000 --- a/drivers/misc/mic/card/mic_virtio.c +++ /dev/null | |||
| @@ -1,634 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2013 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Disclaimer: The codes contained in these modules may be specific to | ||
| 19 | * the Intel Software Development Platform codenamed: Knights Ferry, and | ||
| 20 | * the Intel product codenamed: Knights Corner, and are not backward | ||
| 21 | * compatible with other Intel products. Additionally, Intel will NOT | ||
| 22 | * support the codes or instruction set in future products. | ||
| 23 | * | ||
| 24 | * Adapted from: | ||
| 25 | * | ||
| 26 | * virtio for kvm on s390 | ||
| 27 | * | ||
| 28 | * Copyright IBM Corp. 2008 | ||
| 29 | * | ||
| 30 | * This program is free software; you can redistribute it and/or modify | ||
| 31 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 32 | * as published by the Free Software Foundation. | ||
| 33 | * | ||
| 34 | * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> | ||
| 35 | * | ||
| 36 | * Intel MIC Card driver. | ||
| 37 | * | ||
| 38 | */ | ||
| 39 | #include <linux/delay.h> | ||
| 40 | #include <linux/slab.h> | ||
| 41 | #include <linux/virtio_config.h> | ||
| 42 | |||
| 43 | #include "../common/mic_dev.h" | ||
| 44 | #include "mic_virtio.h" | ||
| 45 | |||
| 46 | #define VIRTIO_SUBCODE_64 0x0D00 | ||
| 47 | |||
| 48 | #define MIC_MAX_VRINGS 4 | ||
| 49 | struct mic_vdev { | ||
| 50 | struct virtio_device vdev; | ||
| 51 | struct mic_device_desc __iomem *desc; | ||
| 52 | struct mic_device_ctrl __iomem *dc; | ||
| 53 | struct mic_device *mdev; | ||
| 54 | void __iomem *vr[MIC_MAX_VRINGS]; | ||
| 55 | int used_size[MIC_MAX_VRINGS]; | ||
| 56 | struct completion reset_done; | ||
| 57 | struct mic_irq *virtio_cookie; | ||
| 58 | int c2h_vdev_db; | ||
| 59 | }; | ||
| 60 | |||
| 61 | static struct mic_irq *virtio_config_cookie; | ||
| 62 | #define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev) | ||
| 63 | |||
| 64 | /* Helper API to obtain the parent of the virtio device */ | ||
| 65 | static inline struct device *mic_dev(struct mic_vdev *mvdev) | ||
| 66 | { | ||
| 67 | return mvdev->vdev.dev.parent; | ||
| 68 | } | ||
| 69 | |||
| 70 | /* This gets the device's feature bits. */ | ||
| 71 | static u64 mic_get_features(struct virtio_device *vdev) | ||
| 72 | { | ||
| 73 | unsigned int i, bits; | ||
| 74 | u32 features = 0; | ||
| 75 | struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; | ||
| 76 | u8 __iomem *in_features = mic_vq_features(desc); | ||
| 77 | int feature_len = ioread8(&desc->feature_len); | ||
| 78 | |||
| 79 | bits = min_t(unsigned, feature_len, sizeof(features)) * 8; | ||
| 80 | for (i = 0; i < bits; i++) | ||
| 81 | if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) | ||
| 82 | features |= BIT(i); | ||
| 83 | |||
| 84 | return features; | ||
| 85 | } | ||
| 86 | |||
| 87 | static int mic_finalize_features(struct virtio_device *vdev) | ||
| 88 | { | ||
| 89 | unsigned int i, bits; | ||
| 90 | struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; | ||
| 91 | u8 feature_len = ioread8(&desc->feature_len); | ||
| 92 | /* Second half of bitmap is features we accept. */ | ||
| 93 | u8 __iomem *out_features = | ||
| 94 | mic_vq_features(desc) + feature_len; | ||
| 95 | |||
| 96 | /* Give virtio_ring a chance to accept features. */ | ||
| 97 | vring_transport_features(vdev); | ||
| 98 | |||
| 99 | /* Make sure we don't have any features > 32 bits! */ | ||
| 100 | BUG_ON((u32)vdev->features != vdev->features); | ||
| 101 | |||
| 102 | memset_io(out_features, 0, feature_len); | ||
| 103 | bits = min_t(unsigned, feature_len, | ||
| 104 | sizeof(vdev->features)) * 8; | ||
| 105 | for (i = 0; i < bits; i++) { | ||
| 106 | if (__virtio_test_bit(vdev, i)) | ||
| 107 | iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)), | ||
| 108 | &out_features[i / 8]); | ||
| 109 | } | ||
| 110 | |||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Reading and writing elements in config space | ||
| 116 | */ | ||
| 117 | static void mic_get(struct virtio_device *vdev, unsigned int offset, | ||
| 118 | void *buf, unsigned len) | ||
| 119 | { | ||
| 120 | struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; | ||
| 121 | |||
| 122 | if (offset + len > ioread8(&desc->config_len)) | ||
| 123 | return; | ||
| 124 | memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len); | ||
| 125 | } | ||
| 126 | |||
| 127 | static void mic_set(struct virtio_device *vdev, unsigned int offset, | ||
| 128 | const void *buf, unsigned len) | ||
| 129 | { | ||
| 130 | struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc; | ||
| 131 | |||
| 132 | if (offset + len > ioread8(&desc->config_len)) | ||
| 133 | return; | ||
| 134 | memcpy_toio(mic_vq_configspace(desc) + offset, buf, len); | ||
| 135 | } | ||
| 136 | |||
| 137 | /* | ||
| 138 | * The operations to get and set the status word just access the status | ||
| 139 | * field of the device descriptor. set_status also interrupts the host | ||
| 140 | * to tell about status changes. | ||
| 141 | */ | ||
| 142 | static u8 mic_get_status(struct virtio_device *vdev) | ||
| 143 | { | ||
| 144 | return ioread8(&to_micvdev(vdev)->desc->status); | ||
| 145 | } | ||
| 146 | |||
| 147 | static void mic_set_status(struct virtio_device *vdev, u8 status) | ||
| 148 | { | ||
| 149 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
| 150 | if (!status) | ||
| 151 | return; | ||
| 152 | iowrite8(status, &mvdev->desc->status); | ||
| 153 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | ||
| 154 | } | ||
| 155 | |||
| 156 | /* Inform host on a virtio device reset and wait for ack from host */ | ||
| 157 | static void mic_reset_inform_host(struct virtio_device *vdev) | ||
| 158 | { | ||
| 159 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
| 160 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | ||
| 161 | int retry; | ||
| 162 | |||
| 163 | iowrite8(0, &dc->host_ack); | ||
| 164 | iowrite8(1, &dc->vdev_reset); | ||
| 165 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | ||
| 166 | |||
| 167 | /* Wait till host completes all card accesses and acks the reset */ | ||
| 168 | for (retry = 100; retry--;) { | ||
| 169 | if (ioread8(&dc->host_ack)) | ||
| 170 | break; | ||
| 171 | msleep(100); | ||
| 172 | }; | ||
| 173 | |||
| 174 | dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); | ||
| 175 | |||
| 176 | /* Reset status to 0 in case we timed out */ | ||
| 177 | iowrite8(0, &mvdev->desc->status); | ||
| 178 | } | ||
| 179 | |||
| 180 | static void mic_reset(struct virtio_device *vdev) | ||
| 181 | { | ||
| 182 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
| 183 | |||
| 184 | dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n", | ||
| 185 | __func__, vdev->id.device); | ||
| 186 | |||
| 187 | mic_reset_inform_host(vdev); | ||
| 188 | complete_all(&mvdev->reset_done); | ||
| 189 | } | ||
| 190 | |||
| 191 | /* | ||
| 192 | * The virtio_ring code calls this API when it wants to notify the Host. | ||
| 193 | */ | ||
| 194 | static bool mic_notify(struct virtqueue *vq) | ||
| 195 | { | ||
| 196 | struct mic_vdev *mvdev = vq->priv; | ||
| 197 | |||
| 198 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | ||
| 199 | return true; | ||
| 200 | } | ||
| 201 | |||
| 202 | static void mic_del_vq(struct virtqueue *vq, int n) | ||
| 203 | { | ||
| 204 | struct mic_vdev *mvdev = to_micvdev(vq->vdev); | ||
| 205 | struct vring *vr = (struct vring *)(vq + 1); | ||
| 206 | |||
| 207 | free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n])); | ||
| 208 | vring_del_virtqueue(vq); | ||
| 209 | mic_card_unmap(mvdev->mdev, mvdev->vr[n]); | ||
| 210 | mvdev->vr[n] = NULL; | ||
| 211 | } | ||
| 212 | |||
| 213 | static void mic_del_vqs(struct virtio_device *vdev) | ||
| 214 | { | ||
| 215 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
| 216 | struct virtqueue *vq, *n; | ||
| 217 | int idx = 0; | ||
| 218 | |||
| 219 | dev_dbg(mic_dev(mvdev), "%s\n", __func__); | ||
| 220 | |||
| 221 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | ||
| 222 | mic_del_vq(vq, idx++); | ||
| 223 | } | ||
| 224 | |||
| 225 | /* | ||
| 226 | * This routine will assign vring's allocated in host/io memory. Code in | ||
| 227 | * virtio_ring.c however continues to access this io memory as if it were local | ||
| 228 | * memory without io accessors. | ||
| 229 | */ | ||
| 230 | static struct virtqueue *mic_find_vq(struct virtio_device *vdev, | ||
| 231 | unsigned index, | ||
| 232 | void (*callback)(struct virtqueue *vq), | ||
| 233 | const char *name) | ||
| 234 | { | ||
| 235 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
| 236 | struct mic_vqconfig __iomem *vqconfig; | ||
| 237 | struct mic_vqconfig config; | ||
| 238 | struct virtqueue *vq; | ||
| 239 | void __iomem *va; | ||
| 240 | struct _mic_vring_info __iomem *info; | ||
| 241 | void *used; | ||
| 242 | int vr_size, _vr_size, err, magic; | ||
| 243 | struct vring *vr; | ||
| 244 | u8 type = ioread8(&mvdev->desc->type); | ||
| 245 | |||
| 246 | if (index >= ioread8(&mvdev->desc->num_vq)) | ||
| 247 | return ERR_PTR(-ENOENT); | ||
| 248 | |||
| 249 | if (!name) | ||
| 250 | return ERR_PTR(-ENOENT); | ||
| 251 | |||
| 252 | /* First assign the vring's allocated in host memory */ | ||
| 253 | vqconfig = mic_vq_config(mvdev->desc) + index; | ||
| 254 | memcpy_fromio(&config, vqconfig, sizeof(config)); | ||
| 255 | _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); | ||
| 256 | vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); | ||
| 257 | va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size); | ||
| 258 | if (!va) | ||
| 259 | return ERR_PTR(-ENOMEM); | ||
| 260 | mvdev->vr[index] = va; | ||
| 261 | memset_io(va, 0x0, _vr_size); | ||
| 262 | vq = vring_new_virtqueue(index, le16_to_cpu(config.num), | ||
| 263 | MIC_VIRTIO_RING_ALIGN, vdev, false, | ||
| 264 | (void __force *)va, mic_notify, callback, | ||
| 265 | name); | ||
| 266 | if (!vq) { | ||
| 267 | err = -ENOMEM; | ||
| 268 | goto unmap; | ||
| 269 | } | ||
| 270 | info = va + _vr_size; | ||
| 271 | magic = ioread32(&info->magic); | ||
| 272 | |||
| 273 | if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { | ||
| 274 | err = -EIO; | ||
| 275 | goto unmap; | ||
| 276 | } | ||
| 277 | |||
| 278 | /* Allocate and reassign used ring now */ | ||
| 279 | mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + | ||
| 280 | sizeof(struct vring_used_elem) * | ||
| 281 | le16_to_cpu(config.num)); | ||
| 282 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
| 283 | get_order(mvdev->used_size[index])); | ||
| 284 | if (!used) { | ||
| 285 | err = -ENOMEM; | ||
| 286 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 287 | __func__, __LINE__, err); | ||
| 288 | goto del_vq; | ||
| 289 | } | ||
| 290 | iowrite64(virt_to_phys(used), &vqconfig->used_address); | ||
| 291 | |||
| 292 | /* | ||
| 293 | * To reassign the used ring here we are directly accessing | ||
| 294 | * struct vring_virtqueue which is a private data structure | ||
| 295 | * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in | ||
| 296 | * vring_new_virtqueue() would ensure that | ||
| 297 | * (&vq->vring == (struct vring *) (&vq->vq + 1)); | ||
| 298 | */ | ||
| 299 | vr = (struct vring *)(vq + 1); | ||
| 300 | vr->used = used; | ||
| 301 | |||
| 302 | vq->priv = mvdev; | ||
| 303 | return vq; | ||
| 304 | del_vq: | ||
| 305 | vring_del_virtqueue(vq); | ||
| 306 | unmap: | ||
| 307 | mic_card_unmap(mvdev->mdev, mvdev->vr[index]); | ||
| 308 | return ERR_PTR(err); | ||
| 309 | } | ||
| 310 | |||
| 311 | static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, | ||
| 312 | struct virtqueue *vqs[], | ||
| 313 | vq_callback_t *callbacks[], | ||
| 314 | const char * const names[]) | ||
| 315 | { | ||
| 316 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
| 317 | struct mic_device_ctrl __iomem *dc = mvdev->dc; | ||
| 318 | int i, err, retry; | ||
| 319 | |||
| 320 | /* We must have this many virtqueues. */ | ||
| 321 | if (nvqs > ioread8(&mvdev->desc->num_vq)) | ||
| 322 | return -ENOENT; | ||
| 323 | |||
| 324 | for (i = 0; i < nvqs; ++i) { | ||
| 325 | dev_dbg(mic_dev(mvdev), "%s: %d: %s\n", | ||
| 326 | __func__, i, names[i]); | ||
| 327 | vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]); | ||
| 328 | if (IS_ERR(vqs[i])) { | ||
| 329 | err = PTR_ERR(vqs[i]); | ||
| 330 | goto error; | ||
| 331 | } | ||
| 332 | } | ||
| 333 | |||
| 334 | iowrite8(1, &dc->used_address_updated); | ||
| 335 | /* | ||
| 336 | * Send an interrupt to the host to inform it that used | ||
| 337 | * rings have been re-assigned. | ||
| 338 | */ | ||
| 339 | mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); | ||
| 340 | for (retry = 100; retry--;) { | ||
| 341 | if (!ioread8(&dc->used_address_updated)) | ||
| 342 | break; | ||
| 343 | msleep(100); | ||
| 344 | }; | ||
| 345 | |||
| 346 | dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); | ||
| 347 | if (!retry) { | ||
| 348 | err = -ENODEV; | ||
| 349 | goto error; | ||
| 350 | } | ||
| 351 | |||
| 352 | return 0; | ||
| 353 | error: | ||
| 354 | mic_del_vqs(vdev); | ||
| 355 | return err; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* | ||
| 359 | * The config ops structure as defined by virtio config | ||
| 360 | */ | ||
| 361 | static struct virtio_config_ops mic_vq_config_ops = { | ||
| 362 | .get_features = mic_get_features, | ||
| 363 | .finalize_features = mic_finalize_features, | ||
| 364 | .get = mic_get, | ||
| 365 | .set = mic_set, | ||
| 366 | .get_status = mic_get_status, | ||
| 367 | .set_status = mic_set_status, | ||
| 368 | .reset = mic_reset, | ||
| 369 | .find_vqs = mic_find_vqs, | ||
| 370 | .del_vqs = mic_del_vqs, | ||
| 371 | }; | ||
| 372 | |||
| 373 | static irqreturn_t | ||
| 374 | mic_virtio_intr_handler(int irq, void *data) | ||
| 375 | { | ||
| 376 | struct mic_vdev *mvdev = data; | ||
| 377 | struct virtqueue *vq; | ||
| 378 | |||
| 379 | mic_ack_interrupt(mvdev->mdev); | ||
| 380 | list_for_each_entry(vq, &mvdev->vdev.vqs, list) | ||
| 381 | vring_interrupt(0, vq); | ||
| 382 | |||
| 383 | return IRQ_HANDLED; | ||
| 384 | } | ||
| 385 | |||
| 386 | static void mic_virtio_release_dev(struct device *_d) | ||
| 387 | { | ||
| 388 | /* | ||
| 389 | * No need for a release method similar to virtio PCI. | ||
| 390 | * Provide an empty one to avoid getting a warning from core. | ||
| 391 | */ | ||
| 392 | } | ||
| 393 | |||
| 394 | /* | ||
| 395 | * adds a new device and register it with virtio | ||
| 396 | * appropriate drivers are loaded by the device model | ||
| 397 | */ | ||
| 398 | static int mic_add_device(struct mic_device_desc __iomem *d, | ||
| 399 | unsigned int offset, struct mic_driver *mdrv) | ||
| 400 | { | ||
| 401 | struct mic_vdev *mvdev; | ||
| 402 | int ret; | ||
| 403 | int virtio_db; | ||
| 404 | u8 type = ioread8(&d->type); | ||
| 405 | |||
| 406 | mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); | ||
| 407 | if (!mvdev) { | ||
| 408 | dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n", | ||
| 409 | offset, type); | ||
| 410 | return -ENOMEM; | ||
| 411 | } | ||
| 412 | |||
| 413 | mvdev->mdev = &mdrv->mdev; | ||
| 414 | mvdev->vdev.dev.parent = mdrv->dev; | ||
| 415 | mvdev->vdev.dev.release = mic_virtio_release_dev; | ||
| 416 | mvdev->vdev.id.device = type; | ||
| 417 | mvdev->vdev.config = &mic_vq_config_ops; | ||
| 418 | mvdev->desc = d; | ||
| 419 | mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d); | ||
| 420 | init_completion(&mvdev->reset_done); | ||
| 421 | |||
| 422 | virtio_db = mic_next_card_db(); | ||
| 423 | mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler, | ||
| 424 | NULL, "virtio intr", mvdev, virtio_db); | ||
| 425 | if (IS_ERR(mvdev->virtio_cookie)) { | ||
| 426 | ret = PTR_ERR(mvdev->virtio_cookie); | ||
| 427 | goto kfree; | ||
| 428 | } | ||
| 429 | iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db); | ||
| 430 | mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db); | ||
| 431 | |||
| 432 | ret = register_virtio_device(&mvdev->vdev); | ||
| 433 | if (ret) { | ||
| 434 | dev_err(mic_dev(mvdev), | ||
| 435 | "Failed to register mic device %u type %u\n", | ||
| 436 | offset, type); | ||
| 437 | goto free_irq; | ||
| 438 | } | ||
| 439 | iowrite64((u64)mvdev, &mvdev->dc->vdev); | ||
| 440 | dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n", | ||
| 441 | __func__, offset, type, mvdev); | ||
| 442 | |||
| 443 | return 0; | ||
| 444 | |||
| 445 | free_irq: | ||
| 446 | mic_free_card_irq(mvdev->virtio_cookie, mvdev); | ||
| 447 | kfree: | ||
| 448 | kfree(mvdev); | ||
| 449 | return ret; | ||
| 450 | } | ||
| 451 | |||
| 452 | /* | ||
| 453 | * match for a mic device with a specific desc pointer | ||
| 454 | */ | ||
| 455 | static int mic_match_desc(struct device *dev, void *data) | ||
| 456 | { | ||
| 457 | struct virtio_device *vdev = dev_to_virtio(dev); | ||
| 458 | struct mic_vdev *mvdev = to_micvdev(vdev); | ||
| 459 | |||
| 460 | return mvdev->desc == (void __iomem *)data; | ||
| 461 | } | ||
| 462 | |||
| 463 | static void mic_handle_config_change(struct mic_device_desc __iomem *d, | ||
| 464 | unsigned int offset, struct mic_driver *mdrv) | ||
| 465 | { | ||
| 466 | struct mic_device_ctrl __iomem *dc | ||
| 467 | = (void __iomem *)d + mic_aligned_desc_size(d); | ||
| 468 | struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); | ||
| 469 | |||
| 470 | if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) | ||
| 471 | return; | ||
| 472 | |||
| 473 | dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__); | ||
| 474 | virtio_config_changed(&mvdev->vdev); | ||
| 475 | iowrite8(1, &dc->guest_ack); | ||
| 476 | } | ||
| 477 | |||
| 478 | /* | ||
| 479 | * removes a virtio device if a hot remove event has been | ||
| 480 | * requested by the host. | ||
| 481 | */ | ||
| 482 | static int mic_remove_device(struct mic_device_desc __iomem *d, | ||
| 483 | unsigned int offset, struct mic_driver *mdrv) | ||
| 484 | { | ||
| 485 | struct mic_device_ctrl __iomem *dc | ||
| 486 | = (void __iomem *)d + mic_aligned_desc_size(d); | ||
| 487 | struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); | ||
| 488 | u8 status; | ||
| 489 | int ret = -1; | ||
| 490 | |||
| 491 | if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { | ||
| 492 | dev_dbg(mdrv->dev, | ||
| 493 | "%s %d config_change %d type %d mvdev %p\n", | ||
| 494 | __func__, __LINE__, | ||
| 495 | ioread8(&dc->config_change), ioread8(&d->type), mvdev); | ||
| 496 | |||
| 497 | status = ioread8(&d->status); | ||
| 498 | reinit_completion(&mvdev->reset_done); | ||
| 499 | unregister_virtio_device(&mvdev->vdev); | ||
| 500 | mic_free_card_irq(mvdev->virtio_cookie, mvdev); | ||
| 501 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) | ||
| 502 | wait_for_completion(&mvdev->reset_done); | ||
| 503 | kfree(mvdev); | ||
| 504 | iowrite8(1, &dc->guest_ack); | ||
| 505 | dev_dbg(mdrv->dev, "%s %d guest_ack %d\n", | ||
| 506 | __func__, __LINE__, ioread8(&dc->guest_ack)); | ||
| 507 | ret = 0; | ||
| 508 | } | ||
| 509 | |||
| 510 | return ret; | ||
| 511 | } | ||
| 512 | |||
| 513 | #define REMOVE_DEVICES true | ||
| 514 | |||
| 515 | static void mic_scan_devices(struct mic_driver *mdrv, bool remove) | ||
| 516 | { | ||
| 517 | s8 type; | ||
| 518 | unsigned int i; | ||
| 519 | struct mic_device_desc __iomem *d; | ||
| 520 | struct mic_device_ctrl __iomem *dc; | ||
| 521 | struct device *dev; | ||
| 522 | int ret; | ||
| 523 | |||
| 524 | for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE; | ||
| 525 | i += mic_total_desc_size(d)) { | ||
| 526 | d = mdrv->dp + i; | ||
| 527 | dc = (void __iomem *)d + mic_aligned_desc_size(d); | ||
| 528 | /* | ||
| 529 | * This read barrier is paired with the corresponding write | ||
| 530 | * barrier on the host which is inserted before adding or | ||
| 531 | * removing a virtio device descriptor, by updating the type. | ||
| 532 | */ | ||
| 533 | rmb(); | ||
| 534 | type = ioread8(&d->type); | ||
| 535 | |||
| 536 | /* end of list */ | ||
| 537 | if (type == 0) | ||
| 538 | break; | ||
| 539 | |||
| 540 | if (type == -1) | ||
| 541 | continue; | ||
| 542 | |||
| 543 | /* device already exists */ | ||
| 544 | dev = device_find_child(mdrv->dev, (void __force *)d, | ||
| 545 | mic_match_desc); | ||
| 546 | if (dev) { | ||
| 547 | if (remove) | ||
| 548 | iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, | ||
| 549 | &dc->config_change); | ||
| 550 | put_device(dev); | ||
| 551 | mic_handle_config_change(d, i, mdrv); | ||
| 552 | ret = mic_remove_device(d, i, mdrv); | ||
| 553 | if (!ret && !remove) | ||
| 554 | iowrite8(-1, &d->type); | ||
| 555 | if (remove) { | ||
| 556 | iowrite8(0, &dc->config_change); | ||
| 557 | iowrite8(0, &dc->guest_ack); | ||
| 558 | } | ||
| 559 | continue; | ||
| 560 | } | ||
| 561 | |||
| 562 | /* new device */ | ||
| 563 | dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n", | ||
| 564 | __func__, __LINE__, d); | ||
| 565 | if (!remove) | ||
| 566 | mic_add_device(d, i, mdrv); | ||
| 567 | } | ||
| 568 | } | ||
| 569 | |||
| 570 | /* | ||
| 571 | * mic_hotplug_device tries to find changes in the device page. | ||
| 572 | */ | ||
| 573 | static void mic_hotplug_devices(struct work_struct *work) | ||
| 574 | { | ||
| 575 | struct mic_driver *mdrv = container_of(work, | ||
| 576 | struct mic_driver, hotplug_work); | ||
| 577 | |||
| 578 | mic_scan_devices(mdrv, !REMOVE_DEVICES); | ||
| 579 | } | ||
| 580 | |||
| 581 | /* | ||
| 582 | * Interrupt handler for hot plug/config changes etc. | ||
| 583 | */ | ||
| 584 | static irqreturn_t | ||
| 585 | mic_extint_handler(int irq, void *data) | ||
| 586 | { | ||
| 587 | struct mic_driver *mdrv = (struct mic_driver *)data; | ||
| 588 | |||
| 589 | dev_dbg(mdrv->dev, "%s %d hotplug work\n", | ||
| 590 | __func__, __LINE__); | ||
| 591 | mic_ack_interrupt(&mdrv->mdev); | ||
| 592 | schedule_work(&mdrv->hotplug_work); | ||
| 593 | return IRQ_HANDLED; | ||
| 594 | } | ||
| 595 | |||
| 596 | /* | ||
| 597 | * Init function for virtio | ||
| 598 | */ | ||
| 599 | int mic_devices_init(struct mic_driver *mdrv) | ||
| 600 | { | ||
| 601 | int rc; | ||
| 602 | struct mic_bootparam __iomem *bootparam; | ||
| 603 | int config_db; | ||
| 604 | |||
| 605 | INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices); | ||
| 606 | mic_scan_devices(mdrv, !REMOVE_DEVICES); | ||
| 607 | |||
| 608 | config_db = mic_next_card_db(); | ||
| 609 | virtio_config_cookie = mic_request_card_irq(mic_extint_handler, NULL, | ||
| 610 | "virtio_config_intr", mdrv, | ||
| 611 | config_db); | ||
| 612 | if (IS_ERR(virtio_config_cookie)) { | ||
| 613 | rc = PTR_ERR(virtio_config_cookie); | ||
| 614 | goto exit; | ||
| 615 | } | ||
| 616 | |||
| 617 | bootparam = mdrv->dp; | ||
| 618 | iowrite8(config_db, &bootparam->h2c_config_db); | ||
| 619 | return 0; | ||
| 620 | exit: | ||
| 621 | return rc; | ||
| 622 | } | ||
| 623 | |||
| 624 | /* | ||
| 625 | * Uninit function for virtio | ||
| 626 | */ | ||
| 627 | void mic_devices_uninit(struct mic_driver *mdrv) | ||
| 628 | { | ||
| 629 | struct mic_bootparam __iomem *bootparam = mdrv->dp; | ||
| 630 | iowrite8(-1, &bootparam->h2c_config_db); | ||
| 631 | mic_free_card_irq(virtio_config_cookie, mdrv); | ||
| 632 | flush_work(&mdrv->hotplug_work); | ||
| 633 | mic_scan_devices(mdrv, REMOVE_DEVICES); | ||
| 634 | } | ||
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h deleted file mode 100644 index d0407ba53bb7..000000000000 --- a/drivers/misc/mic/card/mic_virtio.h +++ /dev/null | |||
| @@ -1,76 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2013 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Disclaimer: The codes contained in these modules may be specific to | ||
| 19 | * the Intel Software Development Platform codenamed: Knights Ferry, and | ||
| 20 | * the Intel product codenamed: Knights Corner, and are not backward | ||
| 21 | * compatible with other Intel products. Additionally, Intel will NOT | ||
| 22 | * support the codes or instruction set in future products. | ||
| 23 | * | ||
| 24 | * Intel MIC Card driver. | ||
| 25 | * | ||
| 26 | */ | ||
| 27 | #ifndef __MIC_CARD_VIRTIO_H | ||
| 28 | #define __MIC_CARD_VIRTIO_H | ||
| 29 | |||
| 30 | #include <linux/mic_common.h> | ||
| 31 | #include "mic_device.h" | ||
| 32 | |||
| 33 | /* | ||
| 34 | * 64 bit I/O access | ||
| 35 | */ | ||
| 36 | #ifndef ioread64 | ||
| 37 | #define ioread64 readq | ||
| 38 | #endif | ||
| 39 | #ifndef iowrite64 | ||
| 40 | #define iowrite64 writeq | ||
| 41 | #endif | ||
| 42 | |||
| 43 | static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) | ||
| 44 | { | ||
| 45 | return sizeof(*desc) | ||
| 46 | + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) | ||
| 47 | + ioread8(&desc->feature_len) * 2 | ||
| 48 | + ioread8(&desc->config_len); | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline struct mic_vqconfig __iomem * | ||
| 52 | mic_vq_config(struct mic_device_desc __iomem *desc) | ||
| 53 | { | ||
| 54 | return (struct mic_vqconfig __iomem *)(desc + 1); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline __u8 __iomem * | ||
| 58 | mic_vq_features(struct mic_device_desc __iomem *desc) | ||
| 59 | { | ||
| 60 | return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq)); | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline __u8 __iomem * | ||
| 64 | mic_vq_configspace(struct mic_device_desc __iomem *desc) | ||
| 65 | { | ||
| 66 | return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2; | ||
| 67 | } | ||
| 68 | static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) | ||
| 69 | { | ||
| 70 | return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); | ||
| 71 | } | ||
| 72 | |||
| 73 | int mic_devices_init(struct mic_driver *mdrv); | ||
| 74 | void mic_devices_uninit(struct mic_driver *mdrv); | ||
| 75 | |||
| 76 | #endif | ||
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c index b2958ce2368c..b9f0710ffa6b 100644 --- a/drivers/misc/mic/card/mic_x100.c +++ b/drivers/misc/mic/card/mic_x100.c | |||
| @@ -326,6 +326,7 @@ static int __init mic_init(void) | |||
| 326 | goto done; | 326 | goto done; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | request_module("mic_x100_dma"); | ||
| 329 | mic_init_card_debugfs(); | 330 | mic_init_card_debugfs(); |
| 330 | ret = platform_device_register(&mic_platform_dev); | 331 | ret = platform_device_register(&mic_platform_dev); |
| 331 | if (ret) { | 332 | if (ret) { |
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c index 4b4b356c797d..7005cb1e01d2 100644 --- a/drivers/misc/mic/cosm/cosm_main.c +++ b/drivers/misc/mic/cosm/cosm_main.c | |||
| @@ -153,8 +153,10 @@ void cosm_stop(struct cosm_device *cdev, bool force) | |||
| 153 | * stop(..) calls device_unregister and will crash the system if | 153 | * stop(..) calls device_unregister and will crash the system if |
| 154 | * called multiple times. | 154 | * called multiple times. |
| 155 | */ | 155 | */ |
| 156 | bool call_hw_ops = cdev->state != MIC_RESET_FAILED && | 156 | u8 state = cdev->state == MIC_RESETTING ? |
| 157 | cdev->state != MIC_READY; | 157 | cdev->prev_state : cdev->state; |
| 158 | bool call_hw_ops = state != MIC_RESET_FAILED && | ||
| 159 | state != MIC_READY; | ||
| 158 | 160 | ||
| 159 | if (cdev->state != MIC_RESETTING) | 161 | if (cdev->state != MIC_RESETTING) |
| 160 | cosm_set_state(cdev, MIC_RESETTING); | 162 | cosm_set_state(cdev, MIC_RESETTING); |
| @@ -195,8 +197,11 @@ int cosm_reset(struct cosm_device *cdev) | |||
| 195 | 197 | ||
| 196 | mutex_lock(&cdev->cosm_mutex); | 198 | mutex_lock(&cdev->cosm_mutex); |
| 197 | if (cdev->state != MIC_READY) { | 199 | if (cdev->state != MIC_READY) { |
| 198 | cosm_set_state(cdev, MIC_RESETTING); | 200 | if (cdev->state != MIC_RESETTING) { |
| 199 | schedule_work(&cdev->reset_trigger_work); | 201 | cdev->prev_state = cdev->state; |
| 202 | cosm_set_state(cdev, MIC_RESETTING); | ||
| 203 | schedule_work(&cdev->reset_trigger_work); | ||
| 204 | } | ||
| 200 | } else { | 205 | } else { |
| 201 | dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__); | 206 | dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__); |
| 202 | rc = -EINVAL; | 207 | rc = -EINVAL; |
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile index 004d3db0f990..f3b502333ded 100644 --- a/drivers/misc/mic/host/Makefile +++ b/drivers/misc/mic/host/Makefile | |||
| @@ -9,5 +9,3 @@ mic_host-objs += mic_smpt.o | |||
| 9 | mic_host-objs += mic_intr.o | 9 | mic_host-objs += mic_intr.o |
| 10 | mic_host-objs += mic_boot.o | 10 | mic_host-objs += mic_boot.o |
| 11 | mic_host-objs += mic_debugfs.o | 11 | mic_host-objs += mic_debugfs.o |
| 12 | mic_host-objs += mic_fops.o | ||
| 13 | mic_host-objs += mic_virtio.o | ||
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 7845564dff64..8c91c9950b54 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c | |||
| @@ -25,10 +25,117 @@ | |||
| 25 | #include <linux/mic_common.h> | 25 | #include <linux/mic_common.h> |
| 26 | #include <linux/mic_bus.h> | 26 | #include <linux/mic_bus.h> |
| 27 | #include "../bus/scif_bus.h" | 27 | #include "../bus/scif_bus.h" |
| 28 | #include "../bus/vop_bus.h" | ||
| 28 | #include "../common/mic_dev.h" | 29 | #include "../common/mic_dev.h" |
| 29 | #include "mic_device.h" | 30 | #include "mic_device.h" |
| 30 | #include "mic_smpt.h" | 31 | #include "mic_smpt.h" |
| 31 | #include "mic_virtio.h" | 32 | |
| 33 | static inline struct mic_device *vpdev_to_mdev(struct device *dev) | ||
| 34 | { | ||
| 35 | return dev_get_drvdata(dev->parent); | ||
| 36 | } | ||
| 37 | |||
| 38 | static dma_addr_t | ||
| 39 | _mic_dma_map_page(struct device *dev, struct page *page, | ||
| 40 | unsigned long offset, size_t size, | ||
| 41 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
| 42 | { | ||
| 43 | void *va = phys_to_virt(page_to_phys(page)) + offset; | ||
| 44 | struct mic_device *mdev = vpdev_to_mdev(dev); | ||
| 45 | |||
| 46 | return mic_map_single(mdev, va, size); | ||
| 47 | } | ||
| 48 | |||
| 49 | static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, | ||
| 50 | size_t size, enum dma_data_direction dir, | ||
| 51 | struct dma_attrs *attrs) | ||
| 52 | { | ||
| 53 | struct mic_device *mdev = vpdev_to_mdev(dev); | ||
| 54 | |||
| 55 | mic_unmap_single(mdev, dma_addr, size); | ||
| 56 | } | ||
| 57 | |||
| 58 | static const struct dma_map_ops _mic_dma_ops = { | ||
| 59 | .map_page = _mic_dma_map_page, | ||
| 60 | .unmap_page = _mic_dma_unmap_page, | ||
| 61 | }; | ||
| 62 | |||
| 63 | static struct mic_irq * | ||
| 64 | __mic_request_irq(struct vop_device *vpdev, | ||
| 65 | irqreturn_t (*func)(int irq, void *data), | ||
| 66 | const char *name, void *data, int intr_src) | ||
| 67 | { | ||
| 68 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
| 69 | |||
| 70 | return mic_request_threaded_irq(mdev, func, NULL, name, data, | ||
| 71 | intr_src, MIC_INTR_DB); | ||
| 72 | } | ||
| 73 | |||
| 74 | static void __mic_free_irq(struct vop_device *vpdev, | ||
| 75 | struct mic_irq *cookie, void *data) | ||
| 76 | { | ||
| 77 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
| 78 | |||
| 79 | return mic_free_irq(mdev, cookie, data); | ||
| 80 | } | ||
| 81 | |||
| 82 | static void __mic_ack_interrupt(struct vop_device *vpdev, int num) | ||
| 83 | { | ||
| 84 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
| 85 | |||
| 86 | mdev->ops->intr_workarounds(mdev); | ||
| 87 | } | ||
| 88 | |||
| 89 | static int __mic_next_db(struct vop_device *vpdev) | ||
| 90 | { | ||
| 91 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
| 92 | |||
| 93 | return mic_next_db(mdev); | ||
| 94 | } | ||
| 95 | |||
| 96 | static void *__mic_get_dp(struct vop_device *vpdev) | ||
| 97 | { | ||
| 98 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
| 99 | |||
| 100 | return mdev->dp; | ||
| 101 | } | ||
| 102 | |||
| 103 | static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev) | ||
| 104 | { | ||
| 105 | return NULL; | ||
| 106 | } | ||
| 107 | |||
| 108 | static void __mic_send_intr(struct vop_device *vpdev, int db) | ||
| 109 | { | ||
| 110 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
| 111 | |||
| 112 | mdev->ops->send_intr(mdev, db); | ||
| 113 | } | ||
| 114 | |||
| 115 | static void __iomem *__mic_ioremap(struct vop_device *vpdev, | ||
| 116 | dma_addr_t pa, size_t len) | ||
| 117 | { | ||
| 118 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | ||
| 119 | |||
| 120 | return mdev->aper.va + pa; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va) | ||
| 124 | { | ||
| 125 | /* nothing to do */ | ||
| 126 | } | ||
| 127 | |||
| 128 | static struct vop_hw_ops vop_hw_ops = { | ||
| 129 | .request_irq = __mic_request_irq, | ||
| 130 | .free_irq = __mic_free_irq, | ||
| 131 | .ack_interrupt = __mic_ack_interrupt, | ||
| 132 | .next_db = __mic_next_db, | ||
| 133 | .get_dp = __mic_get_dp, | ||
| 134 | .get_remote_dp = __mic_get_remote_dp, | ||
| 135 | .send_intr = __mic_send_intr, | ||
| 136 | .ioremap = __mic_ioremap, | ||
| 137 | .iounmap = __mic_iounmap, | ||
| 138 | }; | ||
| 32 | 139 | ||
| 33 | static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) | 140 | static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) |
| 34 | { | 141 | { |
| @@ -315,7 +422,6 @@ static int mic_request_dma_chans(struct mic_device *mdev) | |||
| 315 | dma_cap_mask_t mask; | 422 | dma_cap_mask_t mask; |
| 316 | struct dma_chan *chan; | 423 | struct dma_chan *chan; |
| 317 | 424 | ||
| 318 | request_module("mic_x100_dma"); | ||
| 319 | dma_cap_zero(mask); | 425 | dma_cap_zero(mask); |
| 320 | dma_cap_set(DMA_MEMCPY, mask); | 426 | dma_cap_set(DMA_MEMCPY, mask); |
| 321 | 427 | ||
| @@ -387,9 +493,18 @@ static int _mic_start(struct cosm_device *cdev, int id) | |||
| 387 | goto dma_free; | 493 | goto dma_free; |
| 388 | } | 494 | } |
| 389 | 495 | ||
| 496 | mdev->vpdev = vop_register_device(&mdev->pdev->dev, | ||
| 497 | VOP_DEV_TRNSP, &_mic_dma_ops, | ||
| 498 | &vop_hw_ops, id + 1, &mdev->aper, | ||
| 499 | mdev->dma_ch[0]); | ||
| 500 | if (IS_ERR(mdev->vpdev)) { | ||
| 501 | rc = PTR_ERR(mdev->vpdev); | ||
| 502 | goto scif_remove; | ||
| 503 | } | ||
| 504 | |||
| 390 | rc = mdev->ops->load_mic_fw(mdev, NULL); | 505 | rc = mdev->ops->load_mic_fw(mdev, NULL); |
| 391 | if (rc) | 506 | if (rc) |
| 392 | goto scif_remove; | 507 | goto vop_remove; |
| 393 | mic_smpt_restore(mdev); | 508 | mic_smpt_restore(mdev); |
| 394 | mic_intr_restore(mdev); | 509 | mic_intr_restore(mdev); |
| 395 | mdev->intr_ops->enable_interrupts(mdev); | 510 | mdev->intr_ops->enable_interrupts(mdev); |
| @@ -397,6 +512,8 @@ static int _mic_start(struct cosm_device *cdev, int id) | |||
| 397 | mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); | 512 | mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32); |
| 398 | mdev->ops->send_firmware_intr(mdev); | 513 | mdev->ops->send_firmware_intr(mdev); |
| 399 | goto unlock_ret; | 514 | goto unlock_ret; |
| 515 | vop_remove: | ||
| 516 | vop_unregister_device(mdev->vpdev); | ||
| 400 | scif_remove: | 517 | scif_remove: |
| 401 | scif_unregister_device(mdev->scdev); | 518 | scif_unregister_device(mdev->scdev); |
| 402 | dma_free: | 519 | dma_free: |
| @@ -423,7 +540,7 @@ static void _mic_stop(struct cosm_device *cdev, bool force) | |||
| 423 | * will be the first to be registered and the last to be | 540 | * will be the first to be registered and the last to be |
| 424 | * unregistered. | 541 | * unregistered. |
| 425 | */ | 542 | */ |
| 426 | mic_virtio_reset_devices(mdev); | 543 | vop_unregister_device(mdev->vpdev); |
| 427 | scif_unregister_device(mdev->scdev); | 544 | scif_unregister_device(mdev->scdev); |
| 428 | mic_free_dma_chans(mdev); | 545 | mic_free_dma_chans(mdev); |
| 429 | mbus_unregister_device(mdev->dma_mbdev); | 546 | mbus_unregister_device(mdev->dma_mbdev); |
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c index 10581600777a..0a9daba8bb5d 100644 --- a/drivers/misc/mic/host/mic_debugfs.c +++ b/drivers/misc/mic/host/mic_debugfs.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include "../common/mic_dev.h" | 26 | #include "../common/mic_dev.h" |
| 27 | #include "mic_device.h" | 27 | #include "mic_device.h" |
| 28 | #include "mic_smpt.h" | 28 | #include "mic_smpt.h" |
| 29 | #include "mic_virtio.h" | ||
| 30 | 29 | ||
| 31 | /* Debugfs parent dir */ | 30 | /* Debugfs parent dir */ |
| 32 | static struct dentry *mic_dbg; | 31 | static struct dentry *mic_dbg; |
| @@ -100,190 +99,6 @@ static const struct file_operations post_code_ops = { | |||
| 100 | .release = mic_post_code_debug_release | 99 | .release = mic_post_code_debug_release |
| 101 | }; | 100 | }; |
| 102 | 101 | ||
| 103 | static int mic_dp_show(struct seq_file *s, void *pos) | ||
| 104 | { | ||
| 105 | struct mic_device *mdev = s->private; | ||
| 106 | struct mic_device_desc *d; | ||
| 107 | struct mic_device_ctrl *dc; | ||
| 108 | struct mic_vqconfig *vqconfig; | ||
| 109 | __u32 *features; | ||
| 110 | __u8 *config; | ||
| 111 | struct mic_bootparam *bootparam = mdev->dp; | ||
| 112 | int i, j; | ||
| 113 | |||
| 114 | seq_printf(s, "Bootparam: magic 0x%x\n", | ||
| 115 | bootparam->magic); | ||
| 116 | seq_printf(s, "Bootparam: h2c_config_db %d\n", | ||
| 117 | bootparam->h2c_config_db); | ||
| 118 | seq_printf(s, "Bootparam: node_id %d\n", | ||
| 119 | bootparam->node_id); | ||
| 120 | seq_printf(s, "Bootparam: c2h_scif_db %d\n", | ||
| 121 | bootparam->c2h_scif_db); | ||
| 122 | seq_printf(s, "Bootparam: h2c_scif_db %d\n", | ||
| 123 | bootparam->h2c_scif_db); | ||
| 124 | seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n", | ||
| 125 | bootparam->scif_host_dma_addr); | ||
| 126 | seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n", | ||
| 127 | bootparam->scif_card_dma_addr); | ||
| 128 | |||
| 129 | |||
| 130 | for (i = sizeof(*bootparam); i < MIC_DP_SIZE; | ||
| 131 | i += mic_total_desc_size(d)) { | ||
| 132 | d = mdev->dp + i; | ||
| 133 | dc = (void *)d + mic_aligned_desc_size(d); | ||
| 134 | |||
| 135 | /* end of list */ | ||
| 136 | if (d->type == 0) | ||
| 137 | break; | ||
| 138 | |||
| 139 | if (d->type == -1) | ||
| 140 | continue; | ||
| 141 | |||
| 142 | seq_printf(s, "Type %d ", d->type); | ||
| 143 | seq_printf(s, "Num VQ %d ", d->num_vq); | ||
| 144 | seq_printf(s, "Feature Len %d\n", d->feature_len); | ||
| 145 | seq_printf(s, "Config Len %d ", d->config_len); | ||
| 146 | seq_printf(s, "Shutdown Status %d\n", d->status); | ||
| 147 | |||
| 148 | for (j = 0; j < d->num_vq; j++) { | ||
| 149 | vqconfig = mic_vq_config(d) + j; | ||
| 150 | seq_printf(s, "vqconfig[%d]: ", j); | ||
| 151 | seq_printf(s, "address 0x%llx ", vqconfig->address); | ||
| 152 | seq_printf(s, "num %d ", vqconfig->num); | ||
| 153 | seq_printf(s, "used address 0x%llx\n", | ||
| 154 | vqconfig->used_address); | ||
| 155 | } | ||
| 156 | |||
| 157 | features = (__u32 *)mic_vq_features(d); | ||
| 158 | seq_printf(s, "Features: Host 0x%x ", features[0]); | ||
| 159 | seq_printf(s, "Guest 0x%x\n", features[1]); | ||
| 160 | |||
| 161 | config = mic_vq_configspace(d); | ||
| 162 | for (j = 0; j < d->config_len; j++) | ||
| 163 | seq_printf(s, "config[%d]=%d\n", j, config[j]); | ||
| 164 | |||
| 165 | seq_puts(s, "Device control:\n"); | ||
| 166 | seq_printf(s, "Config Change %d ", dc->config_change); | ||
| 167 | seq_printf(s, "Vdev reset %d\n", dc->vdev_reset); | ||
| 168 | seq_printf(s, "Guest Ack %d ", dc->guest_ack); | ||
| 169 | seq_printf(s, "Host ack %d\n", dc->host_ack); | ||
| 170 | seq_printf(s, "Used address updated %d ", | ||
| 171 | dc->used_address_updated); | ||
| 172 | seq_printf(s, "Vdev 0x%llx\n", dc->vdev); | ||
| 173 | seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db); | ||
| 174 | seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db); | ||
| 175 | } | ||
| 176 | |||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 180 | static int mic_dp_debug_open(struct inode *inode, struct file *file) | ||
| 181 | { | ||
| 182 | return single_open(file, mic_dp_show, inode->i_private); | ||
| 183 | } | ||
| 184 | |||
| 185 | static int mic_dp_debug_release(struct inode *inode, struct file *file) | ||
| 186 | { | ||
| 187 | return single_release(inode, file); | ||
| 188 | } | ||
| 189 | |||
| 190 | static const struct file_operations dp_ops = { | ||
| 191 | .owner = THIS_MODULE, | ||
| 192 | .open = mic_dp_debug_open, | ||
| 193 | .read = seq_read, | ||
| 194 | .llseek = seq_lseek, | ||
| 195 | .release = mic_dp_debug_release | ||
| 196 | }; | ||
| 197 | |||
| 198 | static int mic_vdev_info_show(struct seq_file *s, void *unused) | ||
| 199 | { | ||
| 200 | struct mic_device *mdev = s->private; | ||
| 201 | struct list_head *pos, *tmp; | ||
| 202 | struct mic_vdev *mvdev; | ||
| 203 | int i, j; | ||
| 204 | |||
| 205 | mutex_lock(&mdev->mic_mutex); | ||
| 206 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | ||
| 207 | mvdev = list_entry(pos, struct mic_vdev, list); | ||
| 208 | seq_printf(s, "VDEV type %d state %s in %ld out %ld\n", | ||
| 209 | mvdev->virtio_id, | ||
| 210 | mic_vdevup(mvdev) ? "UP" : "DOWN", | ||
| 211 | mvdev->in_bytes, | ||
| 212 | mvdev->out_bytes); | ||
| 213 | for (i = 0; i < MIC_MAX_VRINGS; i++) { | ||
| 214 | struct vring_desc *desc; | ||
| 215 | struct vring_avail *avail; | ||
| 216 | struct vring_used *used; | ||
| 217 | struct mic_vringh *mvr = &mvdev->mvr[i]; | ||
| 218 | struct vringh *vrh = &mvr->vrh; | ||
| 219 | int num = vrh->vring.num; | ||
| 220 | if (!num) | ||
| 221 | continue; | ||
| 222 | desc = vrh->vring.desc; | ||
| 223 | seq_printf(s, "vring i %d avail_idx %d", | ||
| 224 | i, mvr->vring.info->avail_idx & (num - 1)); | ||
| 225 | seq_printf(s, " vring i %d avail_idx %d\n", | ||
| 226 | i, mvr->vring.info->avail_idx); | ||
| 227 | seq_printf(s, "vrh i %d weak_barriers %d", | ||
| 228 | i, vrh->weak_barriers); | ||
| 229 | seq_printf(s, " last_avail_idx %d last_used_idx %d", | ||
| 230 | vrh->last_avail_idx, vrh->last_used_idx); | ||
| 231 | seq_printf(s, " completed %d\n", vrh->completed); | ||
| 232 | for (j = 0; j < num; j++) { | ||
| 233 | seq_printf(s, "desc[%d] addr 0x%llx len %d", | ||
| 234 | j, desc->addr, desc->len); | ||
| 235 | seq_printf(s, " flags 0x%x next %d\n", | ||
| 236 | desc->flags, desc->next); | ||
| 237 | desc++; | ||
| 238 | } | ||
| 239 | avail = vrh->vring.avail; | ||
| 240 | seq_printf(s, "avail flags 0x%x idx %d\n", | ||
| 241 | vringh16_to_cpu(vrh, avail->flags), | ||
| 242 | vringh16_to_cpu(vrh, avail->idx) & (num - 1)); | ||
| 243 | seq_printf(s, "avail flags 0x%x idx %d\n", | ||
| 244 | vringh16_to_cpu(vrh, avail->flags), | ||
| 245 | vringh16_to_cpu(vrh, avail->idx)); | ||
| 246 | for (j = 0; j < num; j++) | ||
| 247 | seq_printf(s, "avail ring[%d] %d\n", | ||
| 248 | j, avail->ring[j]); | ||
| 249 | used = vrh->vring.used; | ||
| 250 | seq_printf(s, "used flags 0x%x idx %d\n", | ||
| 251 | vringh16_to_cpu(vrh, used->flags), | ||
| 252 | vringh16_to_cpu(vrh, used->idx) & (num - 1)); | ||
| 253 | seq_printf(s, "used flags 0x%x idx %d\n", | ||
| 254 | vringh16_to_cpu(vrh, used->flags), | ||
| 255 | vringh16_to_cpu(vrh, used->idx)); | ||
| 256 | for (j = 0; j < num; j++) | ||
| 257 | seq_printf(s, "used ring[%d] id %d len %d\n", | ||
| 258 | j, vringh32_to_cpu(vrh, | ||
| 259 | used->ring[j].id), | ||
| 260 | vringh32_to_cpu(vrh, | ||
| 261 | used->ring[j].len)); | ||
| 262 | } | ||
| 263 | } | ||
| 264 | mutex_unlock(&mdev->mic_mutex); | ||
| 265 | |||
| 266 | return 0; | ||
| 267 | } | ||
| 268 | |||
| 269 | static int mic_vdev_info_debug_open(struct inode *inode, struct file *file) | ||
| 270 | { | ||
| 271 | return single_open(file, mic_vdev_info_show, inode->i_private); | ||
| 272 | } | ||
| 273 | |||
| 274 | static int mic_vdev_info_debug_release(struct inode *inode, struct file *file) | ||
| 275 | { | ||
| 276 | return single_release(inode, file); | ||
| 277 | } | ||
| 278 | |||
| 279 | static const struct file_operations vdev_info_ops = { | ||
| 280 | .owner = THIS_MODULE, | ||
| 281 | .open = mic_vdev_info_debug_open, | ||
| 282 | .read = seq_read, | ||
| 283 | .llseek = seq_lseek, | ||
| 284 | .release = mic_vdev_info_debug_release | ||
| 285 | }; | ||
| 286 | |||
| 287 | static int mic_msi_irq_info_show(struct seq_file *s, void *pos) | 102 | static int mic_msi_irq_info_show(struct seq_file *s, void *pos) |
| 288 | { | 103 | { |
| 289 | struct mic_device *mdev = s->private; | 104 | struct mic_device *mdev = s->private; |
| @@ -367,11 +182,6 @@ void mic_create_debug_dir(struct mic_device *mdev) | |||
| 367 | debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev, | 182 | debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev, |
| 368 | &post_code_ops); | 183 | &post_code_ops); |
| 369 | 184 | ||
| 370 | debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops); | ||
| 371 | |||
| 372 | debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev, | ||
| 373 | &vdev_info_ops); | ||
| 374 | |||
| 375 | debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev, | 185 | debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev, |
| 376 | &msi_irq_info_ops); | 186 | &msi_irq_info_ops); |
| 377 | } | 187 | } |
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h index 461184a12fbb..52b12b22f4ae 100644 --- a/drivers/misc/mic/host/mic_device.h +++ b/drivers/misc/mic/host/mic_device.h | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/miscdevice.h> | 29 | #include <linux/miscdevice.h> |
| 30 | #include <linux/mic_bus.h> | 30 | #include <linux/mic_bus.h> |
| 31 | #include "../bus/scif_bus.h" | 31 | #include "../bus/scif_bus.h" |
| 32 | #include "../bus/vop_bus.h" | ||
| 32 | #include "../bus/cosm_bus.h" | 33 | #include "../bus/cosm_bus.h" |
| 33 | #include "mic_intr.h" | 34 | #include "mic_intr.h" |
| 34 | 35 | ||
| @@ -64,13 +65,11 @@ extern struct cosm_hw_ops cosm_hw_ops; | |||
| 64 | * @bootaddr: MIC boot address. | 65 | * @bootaddr: MIC boot address. |
| 65 | * @dp: virtio device page | 66 | * @dp: virtio device page |
| 66 | * @dp_dma_addr: virtio device page DMA address. | 67 | * @dp_dma_addr: virtio device page DMA address. |
| 67 | * @name: name for the misc char device | ||
| 68 | * @miscdev: registered misc char device | ||
| 69 | * @vdev_list: list of virtio devices. | ||
| 70 | * @dma_mbdev: MIC BUS DMA device. | 68 | * @dma_mbdev: MIC BUS DMA device. |
| 71 | * @dma_ch - Array of DMA channels | 69 | * @dma_ch - Array of DMA channels |
| 72 | * @num_dma_ch - Number of DMA channels available | 70 | * @num_dma_ch - Number of DMA channels available |
| 73 | * @scdev: SCIF device on the SCIF virtual bus. | 71 | * @scdev: SCIF device on the SCIF virtual bus. |
| 72 | * @vpdev: Virtio over PCIe device on the VOP virtual bus. | ||
| 74 | * @cosm_dev: COSM device | 73 | * @cosm_dev: COSM device |
| 75 | */ | 74 | */ |
| 76 | struct mic_device { | 75 | struct mic_device { |
| @@ -91,13 +90,11 @@ struct mic_device { | |||
| 91 | u32 bootaddr; | 90 | u32 bootaddr; |
| 92 | void *dp; | 91 | void *dp; |
| 93 | dma_addr_t dp_dma_addr; | 92 | dma_addr_t dp_dma_addr; |
| 94 | char name[16]; | ||
| 95 | struct miscdevice miscdev; | ||
| 96 | struct list_head vdev_list; | ||
| 97 | struct mbus_device *dma_mbdev; | 93 | struct mbus_device *dma_mbdev; |
| 98 | struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; | 94 | struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN]; |
| 99 | int num_dma_ch; | 95 | int num_dma_ch; |
| 100 | struct scif_hw_dev *scdev; | 96 | struct scif_hw_dev *scdev; |
| 97 | struct vop_device *vpdev; | ||
| 101 | struct cosm_device *cosm_dev; | 98 | struct cosm_device *cosm_dev; |
| 102 | }; | 99 | }; |
| 103 | 100 | ||
diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c deleted file mode 100644 index 8cc1d90cd949..000000000000 --- a/drivers/misc/mic/host/mic_fops.c +++ /dev/null | |||
| @@ -1,222 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2013 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Intel MIC Host driver. | ||
| 19 | * | ||
| 20 | */ | ||
| 21 | #include <linux/poll.h> | ||
| 22 | #include <linux/pci.h> | ||
| 23 | |||
| 24 | #include <linux/mic_common.h> | ||
| 25 | #include "../common/mic_dev.h" | ||
| 26 | #include "mic_device.h" | ||
| 27 | #include "mic_fops.h" | ||
| 28 | #include "mic_virtio.h" | ||
| 29 | |||
| 30 | int mic_open(struct inode *inode, struct file *f) | ||
| 31 | { | ||
| 32 | struct mic_vdev *mvdev; | ||
| 33 | struct mic_device *mdev = container_of(f->private_data, | ||
| 34 | struct mic_device, miscdev); | ||
| 35 | |||
| 36 | mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL); | ||
| 37 | if (!mvdev) | ||
| 38 | return -ENOMEM; | ||
| 39 | |||
| 40 | init_waitqueue_head(&mvdev->waitq); | ||
| 41 | INIT_LIST_HEAD(&mvdev->list); | ||
| 42 | mvdev->mdev = mdev; | ||
| 43 | mvdev->virtio_id = -1; | ||
| 44 | |||
| 45 | f->private_data = mvdev; | ||
| 46 | return 0; | ||
| 47 | } | ||
| 48 | |||
| 49 | int mic_release(struct inode *inode, struct file *f) | ||
| 50 | { | ||
| 51 | struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; | ||
| 52 | |||
| 53 | if (-1 != mvdev->virtio_id) | ||
| 54 | mic_virtio_del_device(mvdev); | ||
| 55 | f->private_data = NULL; | ||
| 56 | kfree(mvdev); | ||
| 57 | return 0; | ||
| 58 | } | ||
| 59 | |||
| 60 | long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | ||
| 61 | { | ||
| 62 | struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; | ||
| 63 | void __user *argp = (void __user *)arg; | ||
| 64 | int ret; | ||
| 65 | |||
| 66 | switch (cmd) { | ||
| 67 | case MIC_VIRTIO_ADD_DEVICE: | ||
| 68 | { | ||
| 69 | ret = mic_virtio_add_device(mvdev, argp); | ||
| 70 | if (ret < 0) { | ||
| 71 | dev_err(mic_dev(mvdev), | ||
| 72 | "%s %d errno ret %d\n", | ||
| 73 | __func__, __LINE__, ret); | ||
| 74 | return ret; | ||
| 75 | } | ||
| 76 | break; | ||
| 77 | } | ||
| 78 | case MIC_VIRTIO_COPY_DESC: | ||
| 79 | { | ||
| 80 | struct mic_copy_desc copy; | ||
| 81 | |||
| 82 | ret = mic_vdev_inited(mvdev); | ||
| 83 | if (ret) | ||
| 84 | return ret; | ||
| 85 | |||
| 86 | if (copy_from_user(©, argp, sizeof(copy))) | ||
| 87 | return -EFAULT; | ||
| 88 | |||
| 89 | dev_dbg(mic_dev(mvdev), | ||
| 90 | "%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n", | ||
| 91 | __func__, __LINE__, copy.iovcnt, copy.vr_idx, | ||
| 92 | copy.update_used); | ||
| 93 | |||
| 94 | ret = mic_virtio_copy_desc(mvdev, ©); | ||
| 95 | if (ret < 0) { | ||
| 96 | dev_err(mic_dev(mvdev), | ||
| 97 | "%s %d errno ret %d\n", | ||
| 98 | __func__, __LINE__, ret); | ||
| 99 | return ret; | ||
| 100 | } | ||
| 101 | if (copy_to_user( | ||
| 102 | &((struct mic_copy_desc __user *)argp)->out_len, | ||
| 103 | ©.out_len, sizeof(copy.out_len))) { | ||
| 104 | dev_err(mic_dev(mvdev), "%s %d errno ret %d\n", | ||
| 105 | __func__, __LINE__, -EFAULT); | ||
| 106 | return -EFAULT; | ||
| 107 | } | ||
| 108 | break; | ||
| 109 | } | ||
| 110 | case MIC_VIRTIO_CONFIG_CHANGE: | ||
| 111 | { | ||
| 112 | ret = mic_vdev_inited(mvdev); | ||
| 113 | if (ret) | ||
| 114 | return ret; | ||
| 115 | |||
| 116 | ret = mic_virtio_config_change(mvdev, argp); | ||
| 117 | if (ret < 0) { | ||
| 118 | dev_err(mic_dev(mvdev), | ||
| 119 | "%s %d errno ret %d\n", | ||
| 120 | __func__, __LINE__, ret); | ||
| 121 | return ret; | ||
| 122 | } | ||
| 123 | break; | ||
| 124 | } | ||
| 125 | default: | ||
| 126 | return -ENOIOCTLCMD; | ||
| 127 | }; | ||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | /* | ||
| 132 | * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and | ||
| 133 | * not when previously enqueued buffers may be available. This means that | ||
| 134 | * in the card->host (TX) path, when userspace is unblocked by poll it | ||
| 135 | * must drain all available descriptors or it can stall. | ||
| 136 | */ | ||
| 137 | unsigned int mic_poll(struct file *f, poll_table *wait) | ||
| 138 | { | ||
| 139 | struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; | ||
| 140 | int mask = 0; | ||
| 141 | |||
| 142 | poll_wait(f, &mvdev->waitq, wait); | ||
| 143 | |||
| 144 | if (mic_vdev_inited(mvdev)) { | ||
| 145 | mask = POLLERR; | ||
| 146 | } else if (mvdev->poll_wake) { | ||
| 147 | mvdev->poll_wake = 0; | ||
| 148 | mask = POLLIN | POLLOUT; | ||
| 149 | } | ||
| 150 | |||
| 151 | return mask; | ||
| 152 | } | ||
| 153 | |||
| 154 | static inline int | ||
| 155 | mic_query_offset(struct mic_vdev *mvdev, unsigned long offset, | ||
| 156 | unsigned long *size, unsigned long *pa) | ||
| 157 | { | ||
| 158 | struct mic_device *mdev = mvdev->mdev; | ||
| 159 | unsigned long start = MIC_DP_SIZE; | ||
| 160 | int i; | ||
| 161 | |||
| 162 | /* | ||
| 163 | * MMAP interface is as follows: | ||
| 164 | * offset region | ||
| 165 | * 0x0 virtio device_page | ||
| 166 | * 0x1000 first vring | ||
| 167 | * 0x1000 + size of 1st vring second vring | ||
| 168 | * .... | ||
| 169 | */ | ||
| 170 | if (!offset) { | ||
| 171 | *pa = virt_to_phys(mdev->dp); | ||
| 172 | *size = MIC_DP_SIZE; | ||
| 173 | return 0; | ||
| 174 | } | ||
| 175 | |||
| 176 | for (i = 0; i < mvdev->dd->num_vq; i++) { | ||
| 177 | struct mic_vringh *mvr = &mvdev->mvr[i]; | ||
| 178 | if (offset == start) { | ||
| 179 | *pa = virt_to_phys(mvr->vring.va); | ||
| 180 | *size = mvr->vring.len; | ||
| 181 | return 0; | ||
| 182 | } | ||
| 183 | start += mvr->vring.len; | ||
| 184 | } | ||
| 185 | return -1; | ||
| 186 | } | ||
| 187 | |||
| 188 | /* | ||
| 189 | * Maps the device page and virtio rings to user space for readonly access. | ||
| 190 | */ | ||
| 191 | int | ||
| 192 | mic_mmap(struct file *f, struct vm_area_struct *vma) | ||
| 193 | { | ||
| 194 | struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data; | ||
| 195 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
| 196 | unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; | ||
| 197 | int i, err; | ||
| 198 | |||
| 199 | err = mic_vdev_inited(mvdev); | ||
| 200 | if (err) | ||
| 201 | return err; | ||
| 202 | |||
| 203 | if (vma->vm_flags & VM_WRITE) | ||
| 204 | return -EACCES; | ||
| 205 | |||
| 206 | while (size_rem) { | ||
| 207 | i = mic_query_offset(mvdev, offset, &size, &pa); | ||
| 208 | if (i < 0) | ||
| 209 | return -EINVAL; | ||
| 210 | err = remap_pfn_range(vma, vma->vm_start + offset, | ||
| 211 | pa >> PAGE_SHIFT, size, vma->vm_page_prot); | ||
| 212 | if (err) | ||
| 213 | return err; | ||
| 214 | dev_dbg(mic_dev(mvdev), | ||
| 215 | "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n", | ||
| 216 | __func__, __LINE__, mvdev->virtio_id, size, offset, | ||
| 217 | pa, vma->vm_start + offset); | ||
| 218 | size_rem -= size; | ||
| 219 | offset += size; | ||
| 220 | } | ||
| 221 | return 0; | ||
| 222 | } | ||
diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h deleted file mode 100644 index dc3893dff667..000000000000 --- a/drivers/misc/mic/host/mic_fops.h +++ /dev/null | |||
| @@ -1,32 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2013 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Intel MIC Host driver. | ||
| 19 | * | ||
| 20 | */ | ||
| 21 | #ifndef _MIC_FOPS_H_ | ||
| 22 | #define _MIC_FOPS_H_ | ||
| 23 | |||
| 24 | int mic_open(struct inode *inode, struct file *filp); | ||
| 25 | int mic_release(struct inode *inode, struct file *filp); | ||
| 26 | ssize_t mic_read(struct file *filp, char __user *buf, | ||
| 27 | size_t count, loff_t *pos); | ||
| 28 | long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); | ||
| 29 | int mic_mmap(struct file *f, struct vm_area_struct *vma); | ||
| 30 | unsigned int mic_poll(struct file *f, poll_table *wait); | ||
| 31 | |||
| 32 | #endif | ||
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c index 153894e7ed5b..035be3e9ceba 100644 --- a/drivers/misc/mic/host/mic_main.c +++ b/drivers/misc/mic/host/mic_main.c | |||
| @@ -27,8 +27,6 @@ | |||
| 27 | #include "mic_device.h" | 27 | #include "mic_device.h" |
| 28 | #include "mic_x100.h" | 28 | #include "mic_x100.h" |
| 29 | #include "mic_smpt.h" | 29 | #include "mic_smpt.h" |
| 30 | #include "mic_fops.h" | ||
| 31 | #include "mic_virtio.h" | ||
| 32 | 30 | ||
| 33 | static const char mic_driver_name[] = "mic"; | 31 | static const char mic_driver_name[] = "mic"; |
| 34 | 32 | ||
| @@ -57,17 +55,6 @@ MODULE_DEVICE_TABLE(pci, mic_pci_tbl); | |||
| 57 | 55 | ||
| 58 | /* ID allocator for MIC devices */ | 56 | /* ID allocator for MIC devices */ |
| 59 | static struct ida g_mic_ida; | 57 | static struct ida g_mic_ida; |
| 60 | /* Base device node number for MIC devices */ | ||
| 61 | static dev_t g_mic_devno; | ||
| 62 | |||
| 63 | static const struct file_operations mic_fops = { | ||
| 64 | .open = mic_open, | ||
| 65 | .release = mic_release, | ||
| 66 | .unlocked_ioctl = mic_ioctl, | ||
| 67 | .poll = mic_poll, | ||
| 68 | .mmap = mic_mmap, | ||
| 69 | .owner = THIS_MODULE, | ||
| 70 | }; | ||
| 71 | 58 | ||
| 72 | /* Initialize the device page */ | 59 | /* Initialize the device page */ |
| 73 | static int mic_dp_init(struct mic_device *mdev) | 60 | static int mic_dp_init(struct mic_device *mdev) |
| @@ -169,7 +156,6 @@ mic_device_init(struct mic_device *mdev, struct pci_dev *pdev) | |||
| 169 | mic_ops_init(mdev); | 156 | mic_ops_init(mdev); |
| 170 | mutex_init(&mdev->mic_mutex); | 157 | mutex_init(&mdev->mic_mutex); |
| 171 | mdev->irq_info.next_avail_src = 0; | 158 | mdev->irq_info.next_avail_src = 0; |
| 172 | INIT_LIST_HEAD(&mdev->vdev_list); | ||
| 173 | } | 159 | } |
| 174 | 160 | ||
| 175 | /** | 161 | /** |
| @@ -259,30 +245,15 @@ static int mic_probe(struct pci_dev *pdev, | |||
| 259 | goto smpt_uninit; | 245 | goto smpt_uninit; |
| 260 | } | 246 | } |
| 261 | mic_bootparam_init(mdev); | 247 | mic_bootparam_init(mdev); |
| 262 | |||
| 263 | mic_create_debug_dir(mdev); | 248 | mic_create_debug_dir(mdev); |
| 264 | 249 | ||
| 265 | mdev->miscdev.minor = MISC_DYNAMIC_MINOR; | ||
| 266 | snprintf(mdev->name, sizeof(mdev->name), "mic%d", mdev->id); | ||
| 267 | mdev->miscdev.name = mdev->name; | ||
| 268 | mdev->miscdev.fops = &mic_fops; | ||
| 269 | mdev->miscdev.parent = &mdev->pdev->dev; | ||
| 270 | rc = misc_register(&mdev->miscdev); | ||
| 271 | if (rc) { | ||
| 272 | dev_err(&pdev->dev, "misc_register err id %d rc %d\n", | ||
| 273 | mdev->id, rc); | ||
| 274 | goto cleanup_debug_dir; | ||
| 275 | } | ||
| 276 | |||
| 277 | mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops); | 250 | mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops); |
| 278 | if (IS_ERR(mdev->cosm_dev)) { | 251 | if (IS_ERR(mdev->cosm_dev)) { |
| 279 | rc = PTR_ERR(mdev->cosm_dev); | 252 | rc = PTR_ERR(mdev->cosm_dev); |
| 280 | dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc); | 253 | dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc); |
| 281 | goto misc_dereg; | 254 | goto cleanup_debug_dir; |
| 282 | } | 255 | } |
| 283 | return 0; | 256 | return 0; |
| 284 | misc_dereg: | ||
| 285 | misc_deregister(&mdev->miscdev); | ||
| 286 | cleanup_debug_dir: | 257 | cleanup_debug_dir: |
| 287 | mic_delete_debug_dir(mdev); | 258 | mic_delete_debug_dir(mdev); |
| 288 | mic_dp_uninit(mdev); | 259 | mic_dp_uninit(mdev); |
| @@ -323,7 +294,6 @@ static void mic_remove(struct pci_dev *pdev) | |||
| 323 | return; | 294 | return; |
| 324 | 295 | ||
| 325 | cosm_unregister_device(mdev->cosm_dev); | 296 | cosm_unregister_device(mdev->cosm_dev); |
| 326 | misc_deregister(&mdev->miscdev); | ||
| 327 | mic_delete_debug_dir(mdev); | 297 | mic_delete_debug_dir(mdev); |
| 328 | mic_dp_uninit(mdev); | 298 | mic_dp_uninit(mdev); |
| 329 | mic_smpt_uninit(mdev); | 299 | mic_smpt_uninit(mdev); |
| @@ -347,26 +317,18 @@ static int __init mic_init(void) | |||
| 347 | { | 317 | { |
| 348 | int ret; | 318 | int ret; |
| 349 | 319 | ||
| 350 | ret = alloc_chrdev_region(&g_mic_devno, 0, | 320 | request_module("mic_x100_dma"); |
| 351 | MIC_MAX_NUM_DEVS, mic_driver_name); | ||
| 352 | if (ret) { | ||
| 353 | pr_err("alloc_chrdev_region failed ret %d\n", ret); | ||
| 354 | goto error; | ||
| 355 | } | ||
| 356 | |||
| 357 | mic_init_debugfs(); | 321 | mic_init_debugfs(); |
| 358 | ida_init(&g_mic_ida); | 322 | ida_init(&g_mic_ida); |
| 359 | ret = pci_register_driver(&mic_driver); | 323 | ret = pci_register_driver(&mic_driver); |
| 360 | if (ret) { | 324 | if (ret) { |
| 361 | pr_err("pci_register_driver failed ret %d\n", ret); | 325 | pr_err("pci_register_driver failed ret %d\n", ret); |
| 362 | goto cleanup_chrdev; | 326 | goto cleanup_debugfs; |
| 363 | } | 327 | } |
| 364 | return ret; | 328 | return 0; |
| 365 | cleanup_chrdev: | 329 | cleanup_debugfs: |
| 366 | ida_destroy(&g_mic_ida); | 330 | ida_destroy(&g_mic_ida); |
| 367 | mic_exit_debugfs(); | 331 | mic_exit_debugfs(); |
| 368 | unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); | ||
| 369 | error: | ||
| 370 | return ret; | 332 | return ret; |
| 371 | } | 333 | } |
| 372 | 334 | ||
| @@ -375,7 +337,6 @@ static void __exit mic_exit(void) | |||
| 375 | pci_unregister_driver(&mic_driver); | 337 | pci_unregister_driver(&mic_driver); |
| 376 | ida_destroy(&g_mic_ida); | 338 | ida_destroy(&g_mic_ida); |
| 377 | mic_exit_debugfs(); | 339 | mic_exit_debugfs(); |
| 378 | unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS); | ||
| 379 | } | 340 | } |
| 380 | 341 | ||
| 381 | module_init(mic_init); | 342 | module_init(mic_init); |
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c deleted file mode 100644 index 58b107a24a8b..000000000000 --- a/drivers/misc/mic/host/mic_virtio.c +++ /dev/null | |||
| @@ -1,811 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2013 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Intel MIC Host driver. | ||
| 19 | * | ||
| 20 | */ | ||
| 21 | #include <linux/pci.h> | ||
| 22 | #include <linux/sched.h> | ||
| 23 | #include <linux/uaccess.h> | ||
| 24 | #include <linux/dmaengine.h> | ||
| 25 | #include <linux/mic_common.h> | ||
| 26 | #include "../common/mic_dev.h" | ||
| 27 | #include "mic_device.h" | ||
| 28 | #include "mic_smpt.h" | ||
| 29 | #include "mic_virtio.h" | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Size of the internal buffer used during DMA's as an intermediate buffer | ||
| 33 | * for copy to/from user. | ||
| 34 | */ | ||
| 35 | #define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL) | ||
| 36 | |||
| 37 | static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst, | ||
| 38 | dma_addr_t src, size_t len) | ||
| 39 | { | ||
| 40 | int err = 0; | ||
| 41 | struct dma_async_tx_descriptor *tx; | ||
| 42 | struct dma_chan *mic_ch = mdev->dma_ch[0]; | ||
| 43 | |||
| 44 | if (!mic_ch) { | ||
| 45 | err = -EBUSY; | ||
| 46 | goto error; | ||
| 47 | } | ||
| 48 | |||
| 49 | tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len, | ||
| 50 | DMA_PREP_FENCE); | ||
| 51 | if (!tx) { | ||
| 52 | err = -ENOMEM; | ||
| 53 | goto error; | ||
| 54 | } else { | ||
| 55 | dma_cookie_t cookie = tx->tx_submit(tx); | ||
| 56 | |||
| 57 | err = dma_submit_error(cookie); | ||
| 58 | if (err) | ||
| 59 | goto error; | ||
| 60 | err = dma_sync_wait(mic_ch, cookie); | ||
| 61 | } | ||
| 62 | error: | ||
| 63 | if (err) | ||
| 64 | dev_err(&mdev->pdev->dev, "%s %d err %d\n", | ||
| 65 | __func__, __LINE__, err); | ||
| 66 | return err; | ||
| 67 | } | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Initiates the copies across the PCIe bus from card memory to a user | ||
| 71 | * space buffer. When transfers are done using DMA, source/destination | ||
| 72 | * addresses and transfer length must follow the alignment requirements of | ||
| 73 | * the MIC DMA engine. | ||
| 74 | */ | ||
| 75 | static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf, | ||
| 76 | size_t len, u64 daddr, size_t dlen, | ||
| 77 | int vr_idx) | ||
| 78 | { | ||
| 79 | struct mic_device *mdev = mvdev->mdev; | ||
| 80 | void __iomem *dbuf = mdev->aper.va + daddr; | ||
| 81 | struct mic_vringh *mvr = &mvdev->mvr[vr_idx]; | ||
| 82 | size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align; | ||
| 83 | size_t dma_offset; | ||
| 84 | size_t partlen; | ||
| 85 | int err; | ||
| 86 | |||
| 87 | dma_offset = daddr - round_down(daddr, dma_alignment); | ||
| 88 | daddr -= dma_offset; | ||
| 89 | len += dma_offset; | ||
| 90 | |||
| 91 | while (len) { | ||
| 92 | partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE); | ||
| 93 | |||
| 94 | err = mic_sync_dma(mdev, mvr->buf_da, daddr, | ||
| 95 | ALIGN(partlen, dma_alignment)); | ||
| 96 | if (err) | ||
| 97 | goto err; | ||
| 98 | |||
| 99 | if (copy_to_user(ubuf, mvr->buf + dma_offset, | ||
| 100 | partlen - dma_offset)) { | ||
| 101 | err = -EFAULT; | ||
| 102 | goto err; | ||
| 103 | } | ||
| 104 | daddr += partlen; | ||
| 105 | ubuf += partlen; | ||
| 106 | dbuf += partlen; | ||
| 107 | mvdev->in_bytes_dma += partlen; | ||
| 108 | mvdev->in_bytes += partlen; | ||
| 109 | len -= partlen; | ||
| 110 | dma_offset = 0; | ||
| 111 | } | ||
| 112 | return 0; | ||
| 113 | err: | ||
| 114 | dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err); | ||
| 115 | return err; | ||
| 116 | } | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Initiates copies across the PCIe bus from a user space buffer to card | ||
| 120 | * memory. When transfers are done using DMA, source/destination addresses | ||
| 121 | * and transfer length must follow the alignment requirements of the MIC | ||
| 122 | * DMA engine. | ||
| 123 | */ | ||
| 124 | static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf, | ||
| 125 | size_t len, u64 daddr, size_t dlen, | ||
| 126 | int vr_idx) | ||
| 127 | { | ||
| 128 | struct mic_device *mdev = mvdev->mdev; | ||
| 129 | void __iomem *dbuf = mdev->aper.va + daddr; | ||
| 130 | struct mic_vringh *mvr = &mvdev->mvr[vr_idx]; | ||
| 131 | size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align; | ||
| 132 | size_t partlen; | ||
| 133 | int err; | ||
| 134 | |||
| 135 | if (daddr & (dma_alignment - 1)) { | ||
| 136 | mvdev->tx_dst_unaligned += len; | ||
| 137 | goto memcpy; | ||
| 138 | } else if (ALIGN(len, dma_alignment) > dlen) { | ||
| 139 | mvdev->tx_len_unaligned += len; | ||
| 140 | goto memcpy; | ||
| 141 | } | ||
| 142 | |||
| 143 | while (len) { | ||
| 144 | partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE); | ||
| 145 | |||
| 146 | if (copy_from_user(mvr->buf, ubuf, partlen)) { | ||
| 147 | err = -EFAULT; | ||
| 148 | goto err; | ||
| 149 | } | ||
| 150 | err = mic_sync_dma(mdev, daddr, mvr->buf_da, | ||
| 151 | ALIGN(partlen, dma_alignment)); | ||
| 152 | if (err) | ||
| 153 | goto err; | ||
| 154 | daddr += partlen; | ||
| 155 | ubuf += partlen; | ||
| 156 | dbuf += partlen; | ||
| 157 | mvdev->out_bytes_dma += partlen; | ||
| 158 | mvdev->out_bytes += partlen; | ||
| 159 | len -= partlen; | ||
| 160 | } | ||
| 161 | memcpy: | ||
| 162 | /* | ||
| 163 | * We are copying to IO below and should ideally use something | ||
| 164 | * like copy_from_user_toio(..) if it existed. | ||
| 165 | */ | ||
| 166 | if (copy_from_user((void __force *)dbuf, ubuf, len)) { | ||
| 167 | err = -EFAULT; | ||
| 168 | goto err; | ||
| 169 | } | ||
| 170 | mvdev->out_bytes += len; | ||
| 171 | return 0; | ||
| 172 | err: | ||
| 173 | dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err); | ||
| 174 | return err; | ||
| 175 | } | ||
| 176 | |||
| 177 | #define MIC_VRINGH_READ true | ||
| 178 | |||
| 179 | /* The function to call to notify the card about added buffers */ | ||
| 180 | static void mic_notify(struct vringh *vrh) | ||
| 181 | { | ||
| 182 | struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh); | ||
| 183 | struct mic_vdev *mvdev = mvrh->mvdev; | ||
| 184 | s8 db = mvdev->dc->h2c_vdev_db; | ||
| 185 | |||
| 186 | if (db != -1) | ||
| 187 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | ||
| 188 | } | ||
| 189 | |||
| 190 | /* Determine the total number of bytes consumed in a VRINGH KIOV */ | ||
| 191 | static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov) | ||
| 192 | { | ||
| 193 | int i; | ||
| 194 | u32 total = iov->consumed; | ||
| 195 | |||
| 196 | for (i = 0; i < iov->i; i++) | ||
| 197 | total += iov->iov[i].iov_len; | ||
| 198 | return total; | ||
| 199 | } | ||
| 200 | |||
| 201 | /* | ||
| 202 | * Traverse the VRINGH KIOV and issue the APIs to trigger the copies. | ||
| 203 | * This API is heavily based on the vringh_iov_xfer(..) implementation | ||
| 204 | * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..) | ||
| 205 | * and vringh_iov_push_kern(..) directly is because there is no | ||
| 206 | * way to override the VRINGH xfer(..) routines as of v3.10. | ||
| 207 | */ | ||
| 208 | static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov, | ||
| 209 | void __user *ubuf, size_t len, bool read, int vr_idx, | ||
| 210 | size_t *out_len) | ||
| 211 | { | ||
| 212 | int ret = 0; | ||
| 213 | size_t partlen, tot_len = 0; | ||
| 214 | |||
| 215 | while (len && iov->i < iov->used) { | ||
| 216 | partlen = min(iov->iov[iov->i].iov_len, len); | ||
| 217 | if (read) | ||
| 218 | ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen, | ||
| 219 | (u64)iov->iov[iov->i].iov_base, | ||
| 220 | iov->iov[iov->i].iov_len, | ||
| 221 | vr_idx); | ||
| 222 | else | ||
| 223 | ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen, | ||
| 224 | (u64)iov->iov[iov->i].iov_base, | ||
| 225 | iov->iov[iov->i].iov_len, | ||
| 226 | vr_idx); | ||
| 227 | if (ret) { | ||
| 228 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 229 | __func__, __LINE__, ret); | ||
| 230 | break; | ||
| 231 | } | ||
| 232 | len -= partlen; | ||
| 233 | ubuf += partlen; | ||
| 234 | tot_len += partlen; | ||
| 235 | iov->consumed += partlen; | ||
| 236 | iov->iov[iov->i].iov_len -= partlen; | ||
| 237 | iov->iov[iov->i].iov_base += partlen; | ||
| 238 | if (!iov->iov[iov->i].iov_len) { | ||
| 239 | /* Fix up old iov element then increment. */ | ||
| 240 | iov->iov[iov->i].iov_len = iov->consumed; | ||
| 241 | iov->iov[iov->i].iov_base -= iov->consumed; | ||
| 242 | |||
| 243 | iov->consumed = 0; | ||
| 244 | iov->i++; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | *out_len = tot_len; | ||
| 248 | return ret; | ||
| 249 | } | ||
| 250 | |||
| 251 | /* | ||
| 252 | * Use the standard VRINGH infrastructure in the kernel to fetch new | ||
| 253 | * descriptors, initiate the copies and update the used ring. | ||
| 254 | */ | ||
| 255 | static int _mic_virtio_copy(struct mic_vdev *mvdev, | ||
| 256 | struct mic_copy_desc *copy) | ||
| 257 | { | ||
| 258 | int ret = 0; | ||
| 259 | u32 iovcnt = copy->iovcnt; | ||
| 260 | struct iovec iov; | ||
| 261 | struct iovec __user *u_iov = copy->iov; | ||
| 262 | void __user *ubuf = NULL; | ||
| 263 | struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; | ||
| 264 | struct vringh_kiov *riov = &mvr->riov; | ||
| 265 | struct vringh_kiov *wiov = &mvr->wiov; | ||
| 266 | struct vringh *vrh = &mvr->vrh; | ||
| 267 | u16 *head = &mvr->head; | ||
| 268 | struct mic_vring *vr = &mvr->vring; | ||
| 269 | size_t len = 0, out_len; | ||
| 270 | |||
| 271 | copy->out_len = 0; | ||
| 272 | /* Fetch a new IOVEC if all previous elements have been processed */ | ||
| 273 | if (riov->i == riov->used && wiov->i == wiov->used) { | ||
| 274 | ret = vringh_getdesc_kern(vrh, riov, wiov, | ||
| 275 | head, GFP_KERNEL); | ||
| 276 | /* Check if there are available descriptors */ | ||
| 277 | if (ret <= 0) | ||
| 278 | return ret; | ||
| 279 | } | ||
| 280 | while (iovcnt) { | ||
| 281 | if (!len) { | ||
| 282 | /* Copy over a new iovec from user space. */ | ||
| 283 | ret = copy_from_user(&iov, u_iov, sizeof(*u_iov)); | ||
| 284 | if (ret) { | ||
| 285 | ret = -EINVAL; | ||
| 286 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 287 | __func__, __LINE__, ret); | ||
| 288 | break; | ||
| 289 | } | ||
| 290 | len = iov.iov_len; | ||
| 291 | ubuf = iov.iov_base; | ||
| 292 | } | ||
| 293 | /* Issue all the read descriptors first */ | ||
| 294 | ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ, | ||
| 295 | copy->vr_idx, &out_len); | ||
| 296 | if (ret) { | ||
| 297 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 298 | __func__, __LINE__, ret); | ||
| 299 | break; | ||
| 300 | } | ||
| 301 | len -= out_len; | ||
| 302 | ubuf += out_len; | ||
| 303 | copy->out_len += out_len; | ||
| 304 | /* Issue the write descriptors next */ | ||
| 305 | ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ, | ||
| 306 | copy->vr_idx, &out_len); | ||
| 307 | if (ret) { | ||
| 308 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 309 | __func__, __LINE__, ret); | ||
| 310 | break; | ||
| 311 | } | ||
| 312 | len -= out_len; | ||
| 313 | ubuf += out_len; | ||
| 314 | copy->out_len += out_len; | ||
| 315 | if (!len) { | ||
| 316 | /* One user space iovec is now completed */ | ||
| 317 | iovcnt--; | ||
| 318 | u_iov++; | ||
| 319 | } | ||
| 320 | /* Exit loop if all elements in KIOVs have been processed. */ | ||
| 321 | if (riov->i == riov->used && wiov->i == wiov->used) | ||
| 322 | break; | ||
| 323 | } | ||
| 324 | /* | ||
| 325 | * Update the used ring if a descriptor was available and some data was | ||
| 326 | * copied in/out and the user asked for a used ring update. | ||
| 327 | */ | ||
| 328 | if (*head != USHRT_MAX && copy->out_len && copy->update_used) { | ||
| 329 | u32 total = 0; | ||
| 330 | |||
| 331 | /* Determine the total data consumed */ | ||
| 332 | total += mic_vringh_iov_consumed(riov); | ||
| 333 | total += mic_vringh_iov_consumed(wiov); | ||
| 334 | vringh_complete_kern(vrh, *head, total); | ||
| 335 | *head = USHRT_MAX; | ||
| 336 | if (vringh_need_notify_kern(vrh) > 0) | ||
| 337 | vringh_notify(vrh); | ||
| 338 | vringh_kiov_cleanup(riov); | ||
| 339 | vringh_kiov_cleanup(wiov); | ||
| 340 | /* Update avail idx for user space */ | ||
| 341 | vr->info->avail_idx = vrh->last_avail_idx; | ||
| 342 | } | ||
| 343 | return ret; | ||
| 344 | } | ||
| 345 | |||
| 346 | static inline int mic_verify_copy_args(struct mic_vdev *mvdev, | ||
| 347 | struct mic_copy_desc *copy) | ||
| 348 | { | ||
| 349 | if (copy->vr_idx >= mvdev->dd->num_vq) { | ||
| 350 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 351 | __func__, __LINE__, -EINVAL); | ||
| 352 | return -EINVAL; | ||
| 353 | } | ||
| 354 | return 0; | ||
| 355 | } | ||
| 356 | |||
| 357 | /* Copy a specified number of virtio descriptors in a chain */ | ||
| 358 | int mic_virtio_copy_desc(struct mic_vdev *mvdev, | ||
| 359 | struct mic_copy_desc *copy) | ||
| 360 | { | ||
| 361 | int err; | ||
| 362 | struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx]; | ||
| 363 | |||
| 364 | err = mic_verify_copy_args(mvdev, copy); | ||
| 365 | if (err) | ||
| 366 | return err; | ||
| 367 | |||
| 368 | mutex_lock(&mvr->vr_mutex); | ||
| 369 | if (!mic_vdevup(mvdev)) { | ||
| 370 | err = -ENODEV; | ||
| 371 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 372 | __func__, __LINE__, err); | ||
| 373 | goto err; | ||
| 374 | } | ||
| 375 | err = _mic_virtio_copy(mvdev, copy); | ||
| 376 | if (err) { | ||
| 377 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 378 | __func__, __LINE__, err); | ||
| 379 | } | ||
| 380 | err: | ||
| 381 | mutex_unlock(&mvr->vr_mutex); | ||
| 382 | return err; | ||
| 383 | } | ||
| 384 | |||
| 385 | static void mic_virtio_init_post(struct mic_vdev *mvdev) | ||
| 386 | { | ||
| 387 | struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd); | ||
| 388 | int i; | ||
| 389 | |||
| 390 | for (i = 0; i < mvdev->dd->num_vq; i++) { | ||
| 391 | if (!le64_to_cpu(vqconfig[i].used_address)) { | ||
| 392 | dev_warn(mic_dev(mvdev), "used_address zero??\n"); | ||
| 393 | continue; | ||
| 394 | } | ||
| 395 | mvdev->mvr[i].vrh.vring.used = | ||
| 396 | (void __force *)mvdev->mdev->aper.va + | ||
| 397 | le64_to_cpu(vqconfig[i].used_address); | ||
| 398 | } | ||
| 399 | |||
| 400 | mvdev->dc->used_address_updated = 0; | ||
| 401 | |||
| 402 | dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n", | ||
| 403 | __func__, mvdev->virtio_id); | ||
| 404 | } | ||
| 405 | |||
| 406 | static inline void mic_virtio_device_reset(struct mic_vdev *mvdev) | ||
| 407 | { | ||
| 408 | int i; | ||
| 409 | |||
| 410 | dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n", | ||
| 411 | __func__, mvdev->dd->status, mvdev->virtio_id); | ||
| 412 | |||
| 413 | for (i = 0; i < mvdev->dd->num_vq; i++) | ||
| 414 | /* | ||
| 415 | * Avoid lockdep false positive. The + 1 is for the mic | ||
| 416 | * mutex which is held in the reset devices code path. | ||
| 417 | */ | ||
| 418 | mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); | ||
| 419 | |||
| 420 | /* 0 status means "reset" */ | ||
| 421 | mvdev->dd->status = 0; | ||
| 422 | mvdev->dc->vdev_reset = 0; | ||
| 423 | mvdev->dc->host_ack = 1; | ||
| 424 | |||
| 425 | for (i = 0; i < mvdev->dd->num_vq; i++) { | ||
| 426 | struct vringh *vrh = &mvdev->mvr[i].vrh; | ||
| 427 | mvdev->mvr[i].vring.info->avail_idx = 0; | ||
| 428 | vrh->completed = 0; | ||
| 429 | vrh->last_avail_idx = 0; | ||
| 430 | vrh->last_used_idx = 0; | ||
| 431 | } | ||
| 432 | |||
| 433 | for (i = 0; i < mvdev->dd->num_vq; i++) | ||
| 434 | mutex_unlock(&mvdev->mvr[i].vr_mutex); | ||
| 435 | } | ||
| 436 | |||
| 437 | void mic_virtio_reset_devices(struct mic_device *mdev) | ||
| 438 | { | ||
| 439 | struct list_head *pos, *tmp; | ||
| 440 | struct mic_vdev *mvdev; | ||
| 441 | |||
| 442 | dev_dbg(&mdev->pdev->dev, "%s\n", __func__); | ||
| 443 | |||
| 444 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | ||
| 445 | mvdev = list_entry(pos, struct mic_vdev, list); | ||
| 446 | mic_virtio_device_reset(mvdev); | ||
| 447 | mvdev->poll_wake = 1; | ||
| 448 | wake_up(&mvdev->waitq); | ||
| 449 | } | ||
| 450 | } | ||
| 451 | |||
| 452 | void mic_bh_handler(struct work_struct *work) | ||
| 453 | { | ||
| 454 | struct mic_vdev *mvdev = container_of(work, struct mic_vdev, | ||
| 455 | virtio_bh_work); | ||
| 456 | |||
| 457 | if (mvdev->dc->used_address_updated) | ||
| 458 | mic_virtio_init_post(mvdev); | ||
| 459 | |||
| 460 | if (mvdev->dc->vdev_reset) | ||
| 461 | mic_virtio_device_reset(mvdev); | ||
| 462 | |||
| 463 | mvdev->poll_wake = 1; | ||
| 464 | wake_up(&mvdev->waitq); | ||
| 465 | } | ||
| 466 | |||
| 467 | static irqreturn_t mic_virtio_intr_handler(int irq, void *data) | ||
| 468 | { | ||
| 469 | struct mic_vdev *mvdev = data; | ||
| 470 | struct mic_device *mdev = mvdev->mdev; | ||
| 471 | |||
| 472 | mdev->ops->intr_workarounds(mdev); | ||
| 473 | schedule_work(&mvdev->virtio_bh_work); | ||
| 474 | return IRQ_HANDLED; | ||
| 475 | } | ||
| 476 | |||
| 477 | int mic_virtio_config_change(struct mic_vdev *mvdev, | ||
| 478 | void __user *argp) | ||
| 479 | { | ||
| 480 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | ||
| 481 | int ret = 0, retry, i; | ||
| 482 | struct mic_bootparam *bootparam = mvdev->mdev->dp; | ||
| 483 | s8 db = bootparam->h2c_config_db; | ||
| 484 | |||
| 485 | mutex_lock(&mvdev->mdev->mic_mutex); | ||
| 486 | for (i = 0; i < mvdev->dd->num_vq; i++) | ||
| 487 | mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1); | ||
| 488 | |||
| 489 | if (db == -1 || mvdev->dd->type == -1) { | ||
| 490 | ret = -EIO; | ||
| 491 | goto exit; | ||
| 492 | } | ||
| 493 | |||
| 494 | if (copy_from_user(mic_vq_configspace(mvdev->dd), | ||
| 495 | argp, mvdev->dd->config_len)) { | ||
| 496 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 497 | __func__, __LINE__, -EFAULT); | ||
| 498 | ret = -EFAULT; | ||
| 499 | goto exit; | ||
| 500 | } | ||
| 501 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; | ||
| 502 | mvdev->mdev->ops->send_intr(mvdev->mdev, db); | ||
| 503 | |||
| 504 | for (retry = 100; retry--;) { | ||
| 505 | ret = wait_event_timeout(wake, | ||
| 506 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | ||
| 507 | if (ret) | ||
| 508 | break; | ||
| 509 | } | ||
| 510 | |||
| 511 | dev_dbg(mic_dev(mvdev), | ||
| 512 | "%s %d retry: %d\n", __func__, __LINE__, retry); | ||
| 513 | mvdev->dc->config_change = 0; | ||
| 514 | mvdev->dc->guest_ack = 0; | ||
| 515 | exit: | ||
| 516 | for (i = 0; i < mvdev->dd->num_vq; i++) | ||
| 517 | mutex_unlock(&mvdev->mvr[i].vr_mutex); | ||
| 518 | mutex_unlock(&mvdev->mdev->mic_mutex); | ||
| 519 | return ret; | ||
| 520 | } | ||
| 521 | |||
| 522 | static int mic_copy_dp_entry(struct mic_vdev *mvdev, | ||
| 523 | void __user *argp, | ||
| 524 | __u8 *type, | ||
| 525 | struct mic_device_desc **devpage) | ||
| 526 | { | ||
| 527 | struct mic_device *mdev = mvdev->mdev; | ||
| 528 | struct mic_device_desc dd, *dd_config, *devp; | ||
| 529 | struct mic_vqconfig *vqconfig; | ||
| 530 | int ret = 0, i; | ||
| 531 | bool slot_found = false; | ||
| 532 | |||
| 533 | if (copy_from_user(&dd, argp, sizeof(dd))) { | ||
| 534 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 535 | __func__, __LINE__, -EFAULT); | ||
| 536 | return -EFAULT; | ||
| 537 | } | ||
| 538 | |||
| 539 | if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE || | ||
| 540 | dd.num_vq > MIC_MAX_VRINGS) { | ||
| 541 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 542 | __func__, __LINE__, -EINVAL); | ||
| 543 | return -EINVAL; | ||
| 544 | } | ||
| 545 | |||
| 546 | dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL); | ||
| 547 | if (dd_config == NULL) { | ||
| 548 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 549 | __func__, __LINE__, -ENOMEM); | ||
| 550 | return -ENOMEM; | ||
| 551 | } | ||
| 552 | if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) { | ||
| 553 | ret = -EFAULT; | ||
| 554 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 555 | __func__, __LINE__, ret); | ||
| 556 | goto exit; | ||
| 557 | } | ||
| 558 | |||
| 559 | vqconfig = mic_vq_config(dd_config); | ||
| 560 | for (i = 0; i < dd.num_vq; i++) { | ||
| 561 | if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) { | ||
| 562 | ret = -EINVAL; | ||
| 563 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 564 | __func__, __LINE__, ret); | ||
| 565 | goto exit; | ||
| 566 | } | ||
| 567 | } | ||
| 568 | |||
| 569 | /* Find the first free device page entry */ | ||
| 570 | for (i = sizeof(struct mic_bootparam); | ||
| 571 | i < MIC_DP_SIZE - mic_total_desc_size(dd_config); | ||
| 572 | i += mic_total_desc_size(devp)) { | ||
| 573 | devp = mdev->dp + i; | ||
| 574 | if (devp->type == 0 || devp->type == -1) { | ||
| 575 | slot_found = true; | ||
| 576 | break; | ||
| 577 | } | ||
| 578 | } | ||
| 579 | if (!slot_found) { | ||
| 580 | ret = -EINVAL; | ||
| 581 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 582 | __func__, __LINE__, ret); | ||
| 583 | goto exit; | ||
| 584 | } | ||
| 585 | /* | ||
| 586 | * Save off the type before doing the memcpy. Type will be set in the | ||
| 587 | * end after completing all initialization for the new device. | ||
| 588 | */ | ||
| 589 | *type = dd_config->type; | ||
| 590 | dd_config->type = 0; | ||
| 591 | memcpy(devp, dd_config, mic_desc_size(dd_config)); | ||
| 592 | |||
| 593 | *devpage = devp; | ||
| 594 | exit: | ||
| 595 | kfree(dd_config); | ||
| 596 | return ret; | ||
| 597 | } | ||
| 598 | |||
| 599 | static void mic_init_device_ctrl(struct mic_vdev *mvdev, | ||
| 600 | struct mic_device_desc *devpage) | ||
| 601 | { | ||
| 602 | struct mic_device_ctrl *dc; | ||
| 603 | |||
| 604 | dc = (void *)devpage + mic_aligned_desc_size(devpage); | ||
| 605 | |||
| 606 | dc->config_change = 0; | ||
| 607 | dc->guest_ack = 0; | ||
| 608 | dc->vdev_reset = 0; | ||
| 609 | dc->host_ack = 0; | ||
| 610 | dc->used_address_updated = 0; | ||
| 611 | dc->c2h_vdev_db = -1; | ||
| 612 | dc->h2c_vdev_db = -1; | ||
| 613 | mvdev->dc = dc; | ||
| 614 | } | ||
| 615 | |||
| 616 | int mic_virtio_add_device(struct mic_vdev *mvdev, | ||
| 617 | void __user *argp) | ||
| 618 | { | ||
| 619 | struct mic_device *mdev = mvdev->mdev; | ||
| 620 | struct mic_device_desc *dd = NULL; | ||
| 621 | struct mic_vqconfig *vqconfig; | ||
| 622 | int vr_size, i, j, ret; | ||
| 623 | u8 type = 0; | ||
| 624 | s8 db; | ||
| 625 | char irqname[10]; | ||
| 626 | struct mic_bootparam *bootparam = mdev->dp; | ||
| 627 | u16 num; | ||
| 628 | dma_addr_t vr_addr; | ||
| 629 | |||
| 630 | mutex_lock(&mdev->mic_mutex); | ||
| 631 | |||
| 632 | ret = mic_copy_dp_entry(mvdev, argp, &type, &dd); | ||
| 633 | if (ret) { | ||
| 634 | mutex_unlock(&mdev->mic_mutex); | ||
| 635 | return ret; | ||
| 636 | } | ||
| 637 | |||
| 638 | mic_init_device_ctrl(mvdev, dd); | ||
| 639 | |||
| 640 | mvdev->dd = dd; | ||
| 641 | mvdev->virtio_id = type; | ||
| 642 | vqconfig = mic_vq_config(dd); | ||
| 643 | INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler); | ||
| 644 | |||
| 645 | for (i = 0; i < dd->num_vq; i++) { | ||
| 646 | struct mic_vringh *mvr = &mvdev->mvr[i]; | ||
| 647 | struct mic_vring *vr = &mvdev->mvr[i].vring; | ||
| 648 | num = le16_to_cpu(vqconfig[i].num); | ||
| 649 | mutex_init(&mvr->vr_mutex); | ||
| 650 | vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + | ||
| 651 | sizeof(struct _mic_vring_info)); | ||
| 652 | vr->va = (void *) | ||
| 653 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
| 654 | get_order(vr_size)); | ||
| 655 | if (!vr->va) { | ||
| 656 | ret = -ENOMEM; | ||
| 657 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 658 | __func__, __LINE__, ret); | ||
| 659 | goto err; | ||
| 660 | } | ||
| 661 | vr->len = vr_size; | ||
| 662 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); | ||
| 663 | vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i); | ||
| 664 | vr_addr = mic_map_single(mdev, vr->va, vr_size); | ||
| 665 | if (mic_map_error(vr_addr)) { | ||
| 666 | free_pages((unsigned long)vr->va, get_order(vr_size)); | ||
| 667 | ret = -ENOMEM; | ||
| 668 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 669 | __func__, __LINE__, ret); | ||
| 670 | goto err; | ||
| 671 | } | ||
| 672 | vqconfig[i].address = cpu_to_le64(vr_addr); | ||
| 673 | |||
| 674 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); | ||
| 675 | ret = vringh_init_kern(&mvr->vrh, | ||
| 676 | *(u32 *)mic_vq_features(mvdev->dd), num, false, | ||
| 677 | vr->vr.desc, vr->vr.avail, vr->vr.used); | ||
| 678 | if (ret) { | ||
| 679 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 680 | __func__, __LINE__, ret); | ||
| 681 | goto err; | ||
| 682 | } | ||
| 683 | vringh_kiov_init(&mvr->riov, NULL, 0); | ||
| 684 | vringh_kiov_init(&mvr->wiov, NULL, 0); | ||
| 685 | mvr->head = USHRT_MAX; | ||
| 686 | mvr->mvdev = mvdev; | ||
| 687 | mvr->vrh.notify = mic_notify; | ||
| 688 | dev_dbg(&mdev->pdev->dev, | ||
| 689 | "%s %d index %d va %p info %p vr_size 0x%x\n", | ||
| 690 | __func__, __LINE__, i, vr->va, vr->info, vr_size); | ||
| 691 | mvr->buf = (void *)__get_free_pages(GFP_KERNEL, | ||
| 692 | get_order(MIC_INT_DMA_BUF_SIZE)); | ||
| 693 | mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf, | ||
| 694 | MIC_INT_DMA_BUF_SIZE); | ||
| 695 | } | ||
| 696 | |||
| 697 | snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id, | ||
| 698 | mvdev->virtio_id); | ||
| 699 | mvdev->virtio_db = mic_next_db(mdev); | ||
| 700 | mvdev->virtio_cookie = mic_request_threaded_irq(mdev, | ||
| 701 | mic_virtio_intr_handler, | ||
| 702 | NULL, irqname, mvdev, | ||
| 703 | mvdev->virtio_db, MIC_INTR_DB); | ||
| 704 | if (IS_ERR(mvdev->virtio_cookie)) { | ||
| 705 | ret = PTR_ERR(mvdev->virtio_cookie); | ||
| 706 | dev_dbg(&mdev->pdev->dev, "request irq failed\n"); | ||
| 707 | goto err; | ||
| 708 | } | ||
| 709 | |||
| 710 | mvdev->dc->c2h_vdev_db = mvdev->virtio_db; | ||
| 711 | |||
| 712 | list_add_tail(&mvdev->list, &mdev->vdev_list); | ||
| 713 | /* | ||
| 714 | * Order the type update with previous stores. This write barrier | ||
| 715 | * is paired with the corresponding read barrier before the uncached | ||
| 716 | * system memory read of the type, on the card while scanning the | ||
| 717 | * device page. | ||
| 718 | */ | ||
| 719 | smp_wmb(); | ||
| 720 | dd->type = type; | ||
| 721 | |||
| 722 | dev_dbg(&mdev->pdev->dev, "Added virtio device id %d\n", dd->type); | ||
| 723 | |||
| 724 | db = bootparam->h2c_config_db; | ||
| 725 | if (db != -1) | ||
| 726 | mdev->ops->send_intr(mdev, db); | ||
| 727 | mutex_unlock(&mdev->mic_mutex); | ||
| 728 | return 0; | ||
| 729 | err: | ||
| 730 | vqconfig = mic_vq_config(dd); | ||
| 731 | for (j = 0; j < i; j++) { | ||
| 732 | struct mic_vringh *mvr = &mvdev->mvr[j]; | ||
| 733 | mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address), | ||
| 734 | mvr->vring.len); | ||
| 735 | free_pages((unsigned long)mvr->vring.va, | ||
| 736 | get_order(mvr->vring.len)); | ||
| 737 | } | ||
| 738 | mutex_unlock(&mdev->mic_mutex); | ||
| 739 | return ret; | ||
| 740 | } | ||
| 741 | |||
| 742 | void mic_virtio_del_device(struct mic_vdev *mvdev) | ||
| 743 | { | ||
| 744 | struct list_head *pos, *tmp; | ||
| 745 | struct mic_vdev *tmp_mvdev; | ||
| 746 | struct mic_device *mdev = mvdev->mdev; | ||
| 747 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | ||
| 748 | int i, ret, retry; | ||
| 749 | struct mic_vqconfig *vqconfig; | ||
| 750 | struct mic_bootparam *bootparam = mdev->dp; | ||
| 751 | s8 db; | ||
| 752 | |||
| 753 | mutex_lock(&mdev->mic_mutex); | ||
| 754 | db = bootparam->h2c_config_db; | ||
| 755 | if (db == -1) | ||
| 756 | goto skip_hot_remove; | ||
| 757 | dev_dbg(&mdev->pdev->dev, | ||
| 758 | "Requesting hot remove id %d\n", mvdev->virtio_id); | ||
| 759 | mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; | ||
| 760 | mdev->ops->send_intr(mdev, db); | ||
| 761 | for (retry = 100; retry--;) { | ||
| 762 | ret = wait_event_timeout(wake, | ||
| 763 | mvdev->dc->guest_ack, msecs_to_jiffies(100)); | ||
| 764 | if (ret) | ||
| 765 | break; | ||
| 766 | } | ||
| 767 | dev_dbg(&mdev->pdev->dev, | ||
| 768 | "Device id %d config_change %d guest_ack %d retry %d\n", | ||
| 769 | mvdev->virtio_id, mvdev->dc->config_change, | ||
| 770 | mvdev->dc->guest_ack, retry); | ||
| 771 | mvdev->dc->config_change = 0; | ||
| 772 | mvdev->dc->guest_ack = 0; | ||
| 773 | skip_hot_remove: | ||
| 774 | mic_free_irq(mdev, mvdev->virtio_cookie, mvdev); | ||
| 775 | flush_work(&mvdev->virtio_bh_work); | ||
| 776 | vqconfig = mic_vq_config(mvdev->dd); | ||
| 777 | for (i = 0; i < mvdev->dd->num_vq; i++) { | ||
| 778 | struct mic_vringh *mvr = &mvdev->mvr[i]; | ||
| 779 | |||
| 780 | mic_unmap_single(mvdev->mdev, mvr->buf_da, | ||
| 781 | MIC_INT_DMA_BUF_SIZE); | ||
| 782 | free_pages((unsigned long)mvr->buf, | ||
| 783 | get_order(MIC_INT_DMA_BUF_SIZE)); | ||
| 784 | vringh_kiov_cleanup(&mvr->riov); | ||
| 785 | vringh_kiov_cleanup(&mvr->wiov); | ||
| 786 | mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address), | ||
| 787 | mvr->vring.len); | ||
| 788 | free_pages((unsigned long)mvr->vring.va, | ||
| 789 | get_order(mvr->vring.len)); | ||
| 790 | } | ||
| 791 | |||
| 792 | list_for_each_safe(pos, tmp, &mdev->vdev_list) { | ||
| 793 | tmp_mvdev = list_entry(pos, struct mic_vdev, list); | ||
| 794 | if (tmp_mvdev == mvdev) { | ||
| 795 | list_del(pos); | ||
| 796 | dev_dbg(&mdev->pdev->dev, | ||
| 797 | "Removing virtio device id %d\n", | ||
| 798 | mvdev->virtio_id); | ||
| 799 | break; | ||
| 800 | } | ||
| 801 | } | ||
| 802 | /* | ||
| 803 | * Order the type update with previous stores. This write barrier | ||
| 804 | * is paired with the corresponding read barrier before the uncached | ||
| 805 | * system memory read of the type, on the card while scanning the | ||
| 806 | * device page. | ||
| 807 | */ | ||
| 808 | smp_wmb(); | ||
| 809 | mvdev->dd->type = -1; | ||
| 810 | mutex_unlock(&mdev->mic_mutex); | ||
| 811 | } | ||
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c index 8118ac48c764..82a973c85b5d 100644 --- a/drivers/misc/mic/host/mic_x100.c +++ b/drivers/misc/mic/host/mic_x100.c | |||
| @@ -450,26 +450,29 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf) | |||
| 450 | 450 | ||
| 451 | rc = mic_x100_get_boot_addr(mdev); | 451 | rc = mic_x100_get_boot_addr(mdev); |
| 452 | if (rc) | 452 | if (rc) |
| 453 | goto error; | 453 | return rc; |
| 454 | /* load OS */ | 454 | /* load OS */ |
| 455 | rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev); | 455 | rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev); |
| 456 | if (rc < 0) { | 456 | if (rc < 0) { |
| 457 | dev_err(&mdev->pdev->dev, | 457 | dev_err(&mdev->pdev->dev, |
| 458 | "ramdisk request_firmware failed: %d %s\n", | 458 | "ramdisk request_firmware failed: %d %s\n", |
| 459 | rc, mdev->cosm_dev->firmware); | 459 | rc, mdev->cosm_dev->firmware); |
| 460 | goto error; | 460 | return rc; |
| 461 | } | 461 | } |
| 462 | if (mdev->bootaddr > mdev->aper.len - fw->size) { | 462 | if (mdev->bootaddr > mdev->aper.len - fw->size) { |
| 463 | rc = -EINVAL; | 463 | rc = -EINVAL; |
| 464 | dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n", | 464 | dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n", |
| 465 | __func__, __LINE__, rc, mdev->bootaddr); | 465 | __func__, __LINE__, rc, mdev->bootaddr); |
| 466 | release_firmware(fw); | ||
| 467 | goto error; | 466 | goto error; |
| 468 | } | 467 | } |
| 469 | memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size); | 468 | memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size); |
| 470 | mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size); | 469 | mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size); |
| 471 | if (!strcmp(mdev->cosm_dev->bootmode, "flash")) | 470 | if (!strcmp(mdev->cosm_dev->bootmode, "flash")) { |
| 472 | goto done; | 471 | rc = -EINVAL; |
| 472 | dev_err(&mdev->pdev->dev, "%s %d rc %d\n", | ||
| 473 | __func__, __LINE__, rc); | ||
| 474 | goto error; | ||
| 475 | } | ||
| 473 | /* load command line */ | 476 | /* load command line */ |
| 474 | rc = mic_x100_load_command_line(mdev, fw); | 477 | rc = mic_x100_load_command_line(mdev, fw); |
| 475 | if (rc) { | 478 | if (rc) { |
| @@ -481,9 +484,11 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf) | |||
| 481 | /* load ramdisk */ | 484 | /* load ramdisk */ |
| 482 | if (mdev->cosm_dev->ramdisk) | 485 | if (mdev->cosm_dev->ramdisk) |
| 483 | rc = mic_x100_load_ramdisk(mdev); | 486 | rc = mic_x100_load_ramdisk(mdev); |
| 487 | |||
| 488 | return rc; | ||
| 489 | |||
| 484 | error: | 490 | error: |
| 485 | dev_dbg(&mdev->pdev->dev, "%s %d rc %d\n", __func__, __LINE__, rc); | 491 | release_firmware(fw); |
| 486 | done: | ||
| 487 | return rc; | 492 | return rc; |
| 488 | } | 493 | } |
| 489 | 494 | ||
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c index 95a13c629a8e..cd01a0efda6b 100644 --- a/drivers/misc/mic/scif/scif_dma.c +++ b/drivers/misc/mic/scif/scif_dma.c | |||
| @@ -74,11 +74,6 @@ struct scif_copy_work { | |||
| 74 | bool ordered; | 74 | bool ordered; |
| 75 | }; | 75 | }; |
| 76 | 76 | ||
| 77 | #ifndef list_entry_next | ||
| 78 | #define list_entry_next(pos, member) \ | ||
| 79 | list_entry(pos->member.next, typeof(*pos), member) | ||
| 80 | #endif | ||
| 81 | |||
| 82 | /** | 77 | /** |
| 83 | * scif_reserve_dma_chan: | 78 | * scif_reserve_dma_chan: |
| 84 | * @ep: Endpoint Descriptor. | 79 | * @ep: Endpoint Descriptor. |
| @@ -276,13 +271,10 @@ static struct scif_mmu_notif * | |||
| 276 | scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma) | 271 | scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma) |
| 277 | { | 272 | { |
| 278 | struct scif_mmu_notif *mmn; | 273 | struct scif_mmu_notif *mmn; |
| 279 | struct list_head *item; | ||
| 280 | 274 | ||
| 281 | list_for_each(item, &rma->mmn_list) { | 275 | list_for_each_entry(mmn, &rma->mmn_list, list) |
| 282 | mmn = list_entry(item, struct scif_mmu_notif, list); | ||
| 283 | if (mmn->mm == mm) | 276 | if (mmn->mm == mm) |
| 284 | return mmn; | 277 | return mmn; |
| 285 | } | ||
| 286 | return NULL; | 278 | return NULL; |
| 287 | } | 279 | } |
| 288 | 280 | ||
| @@ -293,13 +285,12 @@ scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) | |||
| 293 | = kzalloc(sizeof(*mmn), GFP_KERNEL); | 285 | = kzalloc(sizeof(*mmn), GFP_KERNEL); |
| 294 | 286 | ||
| 295 | if (!mmn) | 287 | if (!mmn) |
| 296 | return ERR_PTR(ENOMEM); | 288 | return ERR_PTR(-ENOMEM); |
| 297 | 289 | ||
| 298 | scif_init_mmu_notifier(mmn, current->mm, ep); | 290 | scif_init_mmu_notifier(mmn, current->mm, ep); |
| 299 | if (mmu_notifier_register(&mmn->ep_mmu_notifier, | 291 | if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) { |
| 300 | current->mm)) { | ||
| 301 | kfree(mmn); | 292 | kfree(mmn); |
| 302 | return ERR_PTR(EBUSY); | 293 | return ERR_PTR(-EBUSY); |
| 303 | } | 294 | } |
| 304 | list_add(&mmn->list, &ep->rma_info.mmn_list); | 295 | list_add(&mmn->list, &ep->rma_info.mmn_list); |
| 305 | return mmn; | 296 | return mmn; |
| @@ -851,7 +842,7 @@ static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window, | |||
| 851 | (window->nr_pages << PAGE_SHIFT); | 842 | (window->nr_pages << PAGE_SHIFT); |
| 852 | while (rem_len) { | 843 | while (rem_len) { |
| 853 | if (offset == end_offset) { | 844 | if (offset == end_offset) { |
| 854 | window = list_entry_next(window, list); | 845 | window = list_next_entry(window, list); |
| 855 | end_offset = window->offset + | 846 | end_offset = window->offset + |
| 856 | (window->nr_pages << PAGE_SHIFT); | 847 | (window->nr_pages << PAGE_SHIFT); |
| 857 | } | 848 | } |
| @@ -957,7 +948,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, | |||
| 957 | remaining_len -= tail_len; | 948 | remaining_len -= tail_len; |
| 958 | while (remaining_len) { | 949 | while (remaining_len) { |
| 959 | if (offset == end_offset) { | 950 | if (offset == end_offset) { |
| 960 | window = list_entry_next(window, list); | 951 | window = list_next_entry(window, list); |
| 961 | end_offset = window->offset + | 952 | end_offset = window->offset + |
| 962 | (window->nr_pages << PAGE_SHIFT); | 953 | (window->nr_pages << PAGE_SHIFT); |
| 963 | } | 954 | } |
| @@ -1064,7 +1055,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, | |||
| 1064 | } | 1055 | } |
| 1065 | if (tail_len) { | 1056 | if (tail_len) { |
| 1066 | if (offset == end_offset) { | 1057 | if (offset == end_offset) { |
| 1067 | window = list_entry_next(window, list); | 1058 | window = list_next_entry(window, list); |
| 1068 | end_offset = window->offset + | 1059 | end_offset = window->offset + |
| 1069 | (window->nr_pages << PAGE_SHIFT); | 1060 | (window->nr_pages << PAGE_SHIFT); |
| 1070 | } | 1061 | } |
| @@ -1147,13 +1138,13 @@ static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |||
| 1147 | (dst_window->nr_pages << PAGE_SHIFT); | 1138 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1148 | while (remaining_len) { | 1139 | while (remaining_len) { |
| 1149 | if (src_offset == end_src_offset) { | 1140 | if (src_offset == end_src_offset) { |
| 1150 | src_window = list_entry_next(src_window, list); | 1141 | src_window = list_next_entry(src_window, list); |
| 1151 | end_src_offset = src_window->offset + | 1142 | end_src_offset = src_window->offset + |
| 1152 | (src_window->nr_pages << PAGE_SHIFT); | 1143 | (src_window->nr_pages << PAGE_SHIFT); |
| 1153 | scif_init_window_iter(src_window, &src_win_iter); | 1144 | scif_init_window_iter(src_window, &src_win_iter); |
| 1154 | } | 1145 | } |
| 1155 | if (dst_offset == end_dst_offset) { | 1146 | if (dst_offset == end_dst_offset) { |
| 1156 | dst_window = list_entry_next(dst_window, list); | 1147 | dst_window = list_next_entry(dst_window, list); |
| 1157 | end_dst_offset = dst_window->offset + | 1148 | end_dst_offset = dst_window->offset + |
| 1158 | (dst_window->nr_pages << PAGE_SHIFT); | 1149 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1159 | scif_init_window_iter(dst_window, &dst_win_iter); | 1150 | scif_init_window_iter(dst_window, &dst_win_iter); |
| @@ -1314,13 +1305,13 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |||
| 1314 | remaining_len -= tail_len; | 1305 | remaining_len -= tail_len; |
| 1315 | while (remaining_len) { | 1306 | while (remaining_len) { |
| 1316 | if (src_offset == end_src_offset) { | 1307 | if (src_offset == end_src_offset) { |
| 1317 | src_window = list_entry_next(src_window, list); | 1308 | src_window = list_next_entry(src_window, list); |
| 1318 | end_src_offset = src_window->offset + | 1309 | end_src_offset = src_window->offset + |
| 1319 | (src_window->nr_pages << PAGE_SHIFT); | 1310 | (src_window->nr_pages << PAGE_SHIFT); |
| 1320 | scif_init_window_iter(src_window, &src_win_iter); | 1311 | scif_init_window_iter(src_window, &src_win_iter); |
| 1321 | } | 1312 | } |
| 1322 | if (dst_offset == end_dst_offset) { | 1313 | if (dst_offset == end_dst_offset) { |
| 1323 | dst_window = list_entry_next(dst_window, list); | 1314 | dst_window = list_next_entry(dst_window, list); |
| 1324 | end_dst_offset = dst_window->offset + | 1315 | end_dst_offset = dst_window->offset + |
| 1325 | (dst_window->nr_pages << PAGE_SHIFT); | 1316 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1326 | scif_init_window_iter(dst_window, &dst_win_iter); | 1317 | scif_init_window_iter(dst_window, &dst_win_iter); |
| @@ -1405,9 +1396,9 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |||
| 1405 | if (remaining_len) { | 1396 | if (remaining_len) { |
| 1406 | loop_len = remaining_len; | 1397 | loop_len = remaining_len; |
| 1407 | if (src_offset == end_src_offset) | 1398 | if (src_offset == end_src_offset) |
| 1408 | src_window = list_entry_next(src_window, list); | 1399 | src_window = list_next_entry(src_window, list); |
| 1409 | if (dst_offset == end_dst_offset) | 1400 | if (dst_offset == end_dst_offset) |
| 1410 | dst_window = list_entry_next(dst_window, list); | 1401 | dst_window = list_next_entry(dst_window, list); |
| 1411 | 1402 | ||
| 1412 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); | 1403 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); |
| 1413 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); | 1404 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); |
| @@ -1550,12 +1541,12 @@ static int scif_rma_list_cpu_copy(struct scif_copy_work *work) | |||
| 1550 | end_dst_offset = dst_window->offset + | 1541 | end_dst_offset = dst_window->offset + |
| 1551 | (dst_window->nr_pages << PAGE_SHIFT); | 1542 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1552 | if (src_offset == end_src_offset) { | 1543 | if (src_offset == end_src_offset) { |
| 1553 | src_window = list_entry_next(src_window, list); | 1544 | src_window = list_next_entry(src_window, list); |
| 1554 | scif_init_window_iter(src_window, | 1545 | scif_init_window_iter(src_window, |
| 1555 | &src_win_iter); | 1546 | &src_win_iter); |
| 1556 | } | 1547 | } |
| 1557 | if (dst_offset == end_dst_offset) { | 1548 | if (dst_offset == end_dst_offset) { |
| 1558 | dst_window = list_entry_next(dst_window, list); | 1549 | dst_window = list_next_entry(dst_window, list); |
| 1559 | scif_init_window_iter(dst_window, | 1550 | scif_init_window_iter(dst_window, |
| 1560 | &dst_win_iter); | 1551 | &dst_win_iter); |
| 1561 | } | 1552 | } |
| @@ -1730,7 +1721,7 @@ static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr, | |||
| 1730 | mutex_lock(&ep->rma_info.mmn_lock); | 1721 | mutex_lock(&ep->rma_info.mmn_lock); |
| 1731 | mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); | 1722 | mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); |
| 1732 | if (!mmn) | 1723 | if (!mmn) |
| 1733 | scif_add_mmu_notifier(current->mm, ep); | 1724 | mmn = scif_add_mmu_notifier(current->mm, ep); |
| 1734 | mutex_unlock(&ep->rma_info.mmn_lock); | 1725 | mutex_unlock(&ep->rma_info.mmn_lock); |
| 1735 | if (IS_ERR(mmn)) { | 1726 | if (IS_ERR(mmn)) { |
| 1736 | scif_put_peer_dev(spdev); | 1727 | scif_put_peer_dev(spdev); |
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index 8310b4dbff06..6a451bd65bf3 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c | |||
| @@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd, | |||
| 1511 | if ((map_flags & SCIF_MAP_FIXED) && | 1511 | if ((map_flags & SCIF_MAP_FIXED) && |
| 1512 | ((ALIGN(offset, PAGE_SIZE) != offset) || | 1512 | ((ALIGN(offset, PAGE_SIZE) != offset) || |
| 1513 | (offset < 0) || | 1513 | (offset < 0) || |
| 1514 | (offset + (off_t)len < offset))) | 1514 | (len > LONG_MAX - offset))) |
| 1515 | return -EINVAL; | 1515 | return -EINVAL; |
| 1516 | 1516 | ||
| 1517 | might_sleep(); | 1517 | might_sleep(); |
| @@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset, | |||
| 1614 | if ((map_flags & SCIF_MAP_FIXED) && | 1614 | if ((map_flags & SCIF_MAP_FIXED) && |
| 1615 | ((ALIGN(offset, PAGE_SIZE) != offset) || | 1615 | ((ALIGN(offset, PAGE_SIZE) != offset) || |
| 1616 | (offset < 0) || | 1616 | (offset < 0) || |
| 1617 | (offset + (off_t)len < offset))) | 1617 | (len > LONG_MAX - offset))) |
| 1618 | return -EINVAL; | 1618 | return -EINVAL; |
| 1619 | 1619 | ||
| 1620 | /* Unsupported protection requested */ | 1620 | /* Unsupported protection requested */ |
| @@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len) | |||
| 1732 | 1732 | ||
| 1733 | /* Offset is not page aligned or offset+len wraps around */ | 1733 | /* Offset is not page aligned or offset+len wraps around */ |
| 1734 | if ((ALIGN(offset, PAGE_SIZE) != offset) || | 1734 | if ((ALIGN(offset, PAGE_SIZE) != offset) || |
| 1735 | (offset + (off_t)len < offset)) | 1735 | (offset < 0) || |
| 1736 | (len > LONG_MAX - offset)) | ||
| 1736 | return -EINVAL; | 1737 | return -EINVAL; |
| 1737 | 1738 | ||
| 1738 | err = scif_verify_epd(ep); | 1739 | err = scif_verify_epd(ep); |
diff --git a/drivers/misc/mic/vop/Makefile b/drivers/misc/mic/vop/Makefile new file mode 100644 index 000000000000..78819c8999f1 --- /dev/null +++ b/drivers/misc/mic/vop/Makefile | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | # | ||
| 2 | # Makefile - Intel MIC Linux driver. | ||
| 3 | # Copyright(c) 2016, Intel Corporation. | ||
| 4 | # | ||
| 5 | obj-m := vop.o | ||
| 6 | |||
| 7 | vop-objs += vop_main.o | ||
| 8 | vop-objs += vop_debugfs.o | ||
| 9 | vop-objs += vop_vringh.o | ||
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c new file mode 100644 index 000000000000..ab43884e5cd7 --- /dev/null +++ b/drivers/misc/mic/vop/vop_debugfs.c | |||
| @@ -0,0 +1,232 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2016 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Intel Virtio Over PCIe (VOP) driver. | ||
| 19 | * | ||
| 20 | */ | ||
| 21 | #include <linux/debugfs.h> | ||
| 22 | #include <linux/seq_file.h> | ||
| 23 | |||
| 24 | #include "vop_main.h" | ||
| 25 | |||
| 26 | static int vop_dp_show(struct seq_file *s, void *pos) | ||
| 27 | { | ||
| 28 | struct mic_device_desc *d; | ||
| 29 | struct mic_device_ctrl *dc; | ||
| 30 | struct mic_vqconfig *vqconfig; | ||
| 31 | __u32 *features; | ||
| 32 | __u8 *config; | ||
| 33 | struct vop_info *vi = s->private; | ||
| 34 | struct vop_device *vpdev = vi->vpdev; | ||
| 35 | struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
| 36 | int j, k; | ||
| 37 | |||
| 38 | seq_printf(s, "Bootparam: magic 0x%x\n", | ||
| 39 | bootparam->magic); | ||
| 40 | seq_printf(s, "Bootparam: h2c_config_db %d\n", | ||
| 41 | bootparam->h2c_config_db); | ||
| 42 | seq_printf(s, "Bootparam: node_id %d\n", | ||
| 43 | bootparam->node_id); | ||
| 44 | seq_printf(s, "Bootparam: c2h_scif_db %d\n", | ||
| 45 | bootparam->c2h_scif_db); | ||
| 46 | seq_printf(s, "Bootparam: h2c_scif_db %d\n", | ||
| 47 | bootparam->h2c_scif_db); | ||
| 48 | seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n", | ||
| 49 | bootparam->scif_host_dma_addr); | ||
| 50 | seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n", | ||
| 51 | bootparam->scif_card_dma_addr); | ||
| 52 | |||
| 53 | for (j = sizeof(*bootparam); | ||
| 54 | j < MIC_DP_SIZE; j += mic_total_desc_size(d)) { | ||
| 55 | d = (void *)bootparam + j; | ||
| 56 | dc = (void *)d + mic_aligned_desc_size(d); | ||
| 57 | |||
| 58 | /* end of list */ | ||
| 59 | if (d->type == 0) | ||
| 60 | break; | ||
| 61 | |||
| 62 | if (d->type == -1) | ||
| 63 | continue; | ||
| 64 | |||
| 65 | seq_printf(s, "Type %d ", d->type); | ||
| 66 | seq_printf(s, "Num VQ %d ", d->num_vq); | ||
| 67 | seq_printf(s, "Feature Len %d\n", d->feature_len); | ||
| 68 | seq_printf(s, "Config Len %d ", d->config_len); | ||
| 69 | seq_printf(s, "Shutdown Status %d\n", d->status); | ||
| 70 | |||
| 71 | for (k = 0; k < d->num_vq; k++) { | ||
| 72 | vqconfig = mic_vq_config(d) + k; | ||
| 73 | seq_printf(s, "vqconfig[%d]: ", k); | ||
| 74 | seq_printf(s, "address 0x%llx ", | ||
| 75 | vqconfig->address); | ||
| 76 | seq_printf(s, "num %d ", vqconfig->num); | ||
| 77 | seq_printf(s, "used address 0x%llx\n", | ||
| 78 | vqconfig->used_address); | ||
| 79 | } | ||
| 80 | |||
| 81 | features = (__u32 *)mic_vq_features(d); | ||
| 82 | seq_printf(s, "Features: Host 0x%x ", features[0]); | ||
| 83 | seq_printf(s, "Guest 0x%x\n", features[1]); | ||
| 84 | |||
| 85 | config = mic_vq_configspace(d); | ||
| 86 | for (k = 0; k < d->config_len; k++) | ||
| 87 | seq_printf(s, "config[%d]=%d\n", k, config[k]); | ||
| 88 | |||
| 89 | seq_puts(s, "Device control:\n"); | ||
| 90 | seq_printf(s, "Config Change %d ", dc->config_change); | ||
| 91 | seq_printf(s, "Vdev reset %d\n", dc->vdev_reset); | ||
| 92 | seq_printf(s, "Guest Ack %d ", dc->guest_ack); | ||
| 93 | seq_printf(s, "Host ack %d\n", dc->host_ack); | ||
| 94 | seq_printf(s, "Used address updated %d ", | ||
| 95 | dc->used_address_updated); | ||
| 96 | seq_printf(s, "Vdev 0x%llx\n", dc->vdev); | ||
| 97 | seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db); | ||
| 98 | seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db); | ||
| 99 | } | ||
| 100 | schedule_work(&vi->hotplug_work); | ||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | static int vop_dp_debug_open(struct inode *inode, struct file *file) | ||
| 105 | { | ||
| 106 | return single_open(file, vop_dp_show, inode->i_private); | ||
| 107 | } | ||
| 108 | |||
| 109 | static int vop_dp_debug_release(struct inode *inode, struct file *file) | ||
| 110 | { | ||
| 111 | return single_release(inode, file); | ||
| 112 | } | ||
| 113 | |||
| 114 | static const struct file_operations dp_ops = { | ||
| 115 | .owner = THIS_MODULE, | ||
| 116 | .open = vop_dp_debug_open, | ||
| 117 | .read = seq_read, | ||
| 118 | .llseek = seq_lseek, | ||
| 119 | .release = vop_dp_debug_release | ||
| 120 | }; | ||
| 121 | |||
| 122 | static int vop_vdev_info_show(struct seq_file *s, void *unused) | ||
| 123 | { | ||
| 124 | struct vop_info *vi = s->private; | ||
| 125 | struct list_head *pos, *tmp; | ||
| 126 | struct vop_vdev *vdev; | ||
| 127 | int i, j; | ||
| 128 | |||
| 129 | mutex_lock(&vi->vop_mutex); | ||
| 130 | list_for_each_safe(pos, tmp, &vi->vdev_list) { | ||
| 131 | vdev = list_entry(pos, struct vop_vdev, list); | ||
| 132 | seq_printf(s, "VDEV type %d state %s in %ld out %ld in_dma %ld out_dma %ld\n", | ||
| 133 | vdev->virtio_id, | ||
| 134 | vop_vdevup(vdev) ? "UP" : "DOWN", | ||
| 135 | vdev->in_bytes, | ||
| 136 | vdev->out_bytes, | ||
| 137 | vdev->in_bytes_dma, | ||
| 138 | vdev->out_bytes_dma); | ||
| 139 | for (i = 0; i < MIC_MAX_VRINGS; i++) { | ||
| 140 | struct vring_desc *desc; | ||
| 141 | struct vring_avail *avail; | ||
| 142 | struct vring_used *used; | ||
| 143 | struct vop_vringh *vvr = &vdev->vvr[i]; | ||
| 144 | struct vringh *vrh = &vvr->vrh; | ||
| 145 | int num = vrh->vring.num; | ||
| 146 | |||
| 147 | if (!num) | ||
| 148 | continue; | ||
| 149 | desc = vrh->vring.desc; | ||
| 150 | seq_printf(s, "vring i %d avail_idx %d", | ||
| 151 | i, vvr->vring.info->avail_idx & (num - 1)); | ||
| 152 | seq_printf(s, " vring i %d avail_idx %d\n", | ||
| 153 | i, vvr->vring.info->avail_idx); | ||
| 154 | seq_printf(s, "vrh i %d weak_barriers %d", | ||
| 155 | i, vrh->weak_barriers); | ||
| 156 | seq_printf(s, " last_avail_idx %d last_used_idx %d", | ||
| 157 | vrh->last_avail_idx, vrh->last_used_idx); | ||
| 158 | seq_printf(s, " completed %d\n", vrh->completed); | ||
| 159 | for (j = 0; j < num; j++) { | ||
| 160 | seq_printf(s, "desc[%d] addr 0x%llx len %d", | ||
| 161 | j, desc->addr, desc->len); | ||
| 162 | seq_printf(s, " flags 0x%x next %d\n", | ||
| 163 | desc->flags, desc->next); | ||
| 164 | desc++; | ||
| 165 | } | ||
| 166 | avail = vrh->vring.avail; | ||
| 167 | seq_printf(s, "avail flags 0x%x idx %d\n", | ||
| 168 | vringh16_to_cpu(vrh, avail->flags), | ||
| 169 | vringh16_to_cpu(vrh, | ||
| 170 | avail->idx) & (num - 1)); | ||
| 171 | seq_printf(s, "avail flags 0x%x idx %d\n", | ||
| 172 | vringh16_to_cpu(vrh, avail->flags), | ||
| 173 | vringh16_to_cpu(vrh, avail->idx)); | ||
| 174 | for (j = 0; j < num; j++) | ||
| 175 | seq_printf(s, "avail ring[%d] %d\n", | ||
| 176 | j, avail->ring[j]); | ||
| 177 | used = vrh->vring.used; | ||
| 178 | seq_printf(s, "used flags 0x%x idx %d\n", | ||
| 179 | vringh16_to_cpu(vrh, used->flags), | ||
| 180 | vringh16_to_cpu(vrh, used->idx) & (num - 1)); | ||
| 181 | seq_printf(s, "used flags 0x%x idx %d\n", | ||
| 182 | vringh16_to_cpu(vrh, used->flags), | ||
| 183 | vringh16_to_cpu(vrh, used->idx)); | ||
| 184 | for (j = 0; j < num; j++) | ||
| 185 | seq_printf(s, "used ring[%d] id %d len %d\n", | ||
| 186 | j, vringh32_to_cpu(vrh, | ||
| 187 | used->ring[j].id), | ||
| 188 | vringh32_to_cpu(vrh, | ||
| 189 | used->ring[j].len)); | ||
| 190 | } | ||
| 191 | } | ||
| 192 | mutex_unlock(&vi->vop_mutex); | ||
| 193 | |||
| 194 | return 0; | ||
| 195 | } | ||
| 196 | |||
| 197 | static int vop_vdev_info_debug_open(struct inode *inode, struct file *file) | ||
| 198 | { | ||
| 199 | return single_open(file, vop_vdev_info_show, inode->i_private); | ||
| 200 | } | ||
| 201 | |||
| 202 | static int vop_vdev_info_debug_release(struct inode *inode, struct file *file) | ||
| 203 | { | ||
| 204 | return single_release(inode, file); | ||
| 205 | } | ||
| 206 | |||
| 207 | static const struct file_operations vdev_info_ops = { | ||
| 208 | .owner = THIS_MODULE, | ||
| 209 | .open = vop_vdev_info_debug_open, | ||
| 210 | .read = seq_read, | ||
| 211 | .llseek = seq_lseek, | ||
| 212 | .release = vop_vdev_info_debug_release | ||
| 213 | }; | ||
| 214 | |||
| 215 | void vop_init_debugfs(struct vop_info *vi) | ||
| 216 | { | ||
| 217 | char name[16]; | ||
| 218 | |||
| 219 | snprintf(name, sizeof(name), "%s%d", KBUILD_MODNAME, vi->vpdev->dnode); | ||
| 220 | vi->dbg = debugfs_create_dir(name, NULL); | ||
| 221 | if (!vi->dbg) { | ||
| 222 | pr_err("can't create debugfs dir vop\n"); | ||
| 223 | return; | ||
| 224 | } | ||
| 225 | debugfs_create_file("dp", 0444, vi->dbg, vi, &dp_ops); | ||
| 226 | debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vdev_info_ops); | ||
| 227 | } | ||
| 228 | |||
| 229 | void vop_exit_debugfs(struct vop_info *vi) | ||
| 230 | { | ||
| 231 | debugfs_remove_recursive(vi->dbg); | ||
| 232 | } | ||
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c new file mode 100644 index 000000000000..1a2b67f3183d --- /dev/null +++ b/drivers/misc/mic/vop/vop_main.c | |||
| @@ -0,0 +1,755 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2016 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Adapted from: | ||
| 19 | * | ||
| 20 | * virtio for kvm on s390 | ||
| 21 | * | ||
| 22 | * Copyright IBM Corp. 2008 | ||
| 23 | * | ||
| 24 | * This program is free software; you can redistribute it and/or modify | ||
| 25 | * it under the terms of the GNU General Public License (version 2 only) | ||
| 26 | * as published by the Free Software Foundation. | ||
| 27 | * | ||
| 28 | * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> | ||
| 29 | * | ||
| 30 | * Intel Virtio Over PCIe (VOP) driver. | ||
| 31 | * | ||
| 32 | */ | ||
| 33 | #include <linux/delay.h> | ||
| 34 | #include <linux/module.h> | ||
| 35 | #include <linux/sched.h> | ||
| 36 | #include <linux/dma-mapping.h> | ||
| 37 | |||
| 38 | #include "vop_main.h" | ||
| 39 | |||
| 40 | #define VOP_MAX_VRINGS 4 | ||
| 41 | |||
| 42 | /* | ||
| 43 | * _vop_vdev - Allocated per virtio device instance injected by the peer. | ||
| 44 | * | ||
| 45 | * @vdev: Virtio device | ||
| 46 | * @desc: Virtio device page descriptor | ||
| 47 | * @dc: Virtio device control | ||
| 48 | * @vpdev: VOP device which is the parent for this virtio device | ||
| 49 | * @vr: Buffer for accessing the VRING | ||
| 50 | * @used: Buffer for used | ||
| 51 | * @used_size: Size of the used buffer | ||
| 52 | * @reset_done: Track whether VOP reset is complete | ||
| 53 | * @virtio_cookie: Cookie returned upon requesting a interrupt | ||
| 54 | * @c2h_vdev_db: The doorbell used by the guest to interrupt the host | ||
| 55 | * @h2c_vdev_db: The doorbell used by the host to interrupt the guest | ||
| 56 | * @dnode: The destination node | ||
| 57 | */ | ||
| 58 | struct _vop_vdev { | ||
| 59 | struct virtio_device vdev; | ||
| 60 | struct mic_device_desc __iomem *desc; | ||
| 61 | struct mic_device_ctrl __iomem *dc; | ||
| 62 | struct vop_device *vpdev; | ||
| 63 | void __iomem *vr[VOP_MAX_VRINGS]; | ||
| 64 | dma_addr_t used[VOP_MAX_VRINGS]; | ||
| 65 | int used_size[VOP_MAX_VRINGS]; | ||
| 66 | struct completion reset_done; | ||
| 67 | struct mic_irq *virtio_cookie; | ||
| 68 | int c2h_vdev_db; | ||
| 69 | int h2c_vdev_db; | ||
| 70 | int dnode; | ||
| 71 | }; | ||
| 72 | |||
| 73 | #define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev) | ||
| 74 | |||
| 75 | #define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8) | ||
| 76 | |||
| 77 | /* Helper API to obtain the parent of the virtio device */ | ||
| 78 | static inline struct device *_vop_dev(struct _vop_vdev *vdev) | ||
| 79 | { | ||
| 80 | return vdev->vdev.dev.parent; | ||
| 81 | } | ||
| 82 | |||
| 83 | static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc) | ||
| 84 | { | ||
| 85 | return sizeof(*desc) | ||
| 86 | + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) | ||
| 87 | + ioread8(&desc->feature_len) * 2 | ||
| 88 | + ioread8(&desc->config_len); | ||
| 89 | } | ||
| 90 | |||
| 91 | static inline struct mic_vqconfig __iomem * | ||
| 92 | _vop_vq_config(struct mic_device_desc __iomem *desc) | ||
| 93 | { | ||
| 94 | return (struct mic_vqconfig __iomem *)(desc + 1); | ||
| 95 | } | ||
| 96 | |||
| 97 | static inline u8 __iomem * | ||
| 98 | _vop_vq_features(struct mic_device_desc __iomem *desc) | ||
| 99 | { | ||
| 100 | return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq)); | ||
| 101 | } | ||
| 102 | |||
| 103 | static inline u8 __iomem * | ||
| 104 | _vop_vq_configspace(struct mic_device_desc __iomem *desc) | ||
| 105 | { | ||
| 106 | return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline unsigned | ||
| 110 | _vop_total_desc_size(struct mic_device_desc __iomem *desc) | ||
| 111 | { | ||
| 112 | return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); | ||
| 113 | } | ||
| 114 | |||
| 115 | /* This gets the device's feature bits. */ | ||
| 116 | static u64 vop_get_features(struct virtio_device *vdev) | ||
| 117 | { | ||
| 118 | unsigned int i, bits; | ||
| 119 | u32 features = 0; | ||
| 120 | struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; | ||
| 121 | u8 __iomem *in_features = _vop_vq_features(desc); | ||
| 122 | int feature_len = ioread8(&desc->feature_len); | ||
| 123 | |||
| 124 | bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8; | ||
| 125 | for (i = 0; i < bits; i++) | ||
| 126 | if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) | ||
| 127 | features |= BIT(i); | ||
| 128 | |||
| 129 | return features; | ||
| 130 | } | ||
| 131 | |||
| 132 | static int vop_finalize_features(struct virtio_device *vdev) | ||
| 133 | { | ||
| 134 | unsigned int i, bits; | ||
| 135 | struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; | ||
| 136 | u8 feature_len = ioread8(&desc->feature_len); | ||
| 137 | /* Second half of bitmap is features we accept. */ | ||
| 138 | u8 __iomem *out_features = | ||
| 139 | _vop_vq_features(desc) + feature_len; | ||
| 140 | |||
| 141 | /* Give virtio_ring a chance to accept features. */ | ||
| 142 | vring_transport_features(vdev); | ||
| 143 | |||
| 144 | memset_io(out_features, 0, feature_len); | ||
| 145 | bits = min_t(unsigned, feature_len, | ||
| 146 | sizeof(vdev->features)) * 8; | ||
| 147 | for (i = 0; i < bits; i++) { | ||
| 148 | if (__virtio_test_bit(vdev, i)) | ||
| 149 | iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)), | ||
| 150 | &out_features[i / 8]); | ||
| 151 | } | ||
| 152 | return 0; | ||
| 153 | } | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Reading and writing elements in config space | ||
| 157 | */ | ||
| 158 | static void vop_get(struct virtio_device *vdev, unsigned int offset, | ||
| 159 | void *buf, unsigned len) | ||
| 160 | { | ||
| 161 | struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; | ||
| 162 | |||
| 163 | if (offset + len > ioread8(&desc->config_len)) | ||
| 164 | return; | ||
| 165 | memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len); | ||
| 166 | } | ||
| 167 | |||
| 168 | static void vop_set(struct virtio_device *vdev, unsigned int offset, | ||
| 169 | const void *buf, unsigned len) | ||
| 170 | { | ||
| 171 | struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; | ||
| 172 | |||
| 173 | if (offset + len > ioread8(&desc->config_len)) | ||
| 174 | return; | ||
| 175 | memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len); | ||
| 176 | } | ||
| 177 | |||
| 178 | /* | ||
| 179 | * The operations to get and set the status word just access the status | ||
| 180 | * field of the device descriptor. set_status also interrupts the host | ||
| 181 | * to tell about status changes. | ||
| 182 | */ | ||
| 183 | static u8 vop_get_status(struct virtio_device *vdev) | ||
| 184 | { | ||
| 185 | return ioread8(&to_vopvdev(vdev)->desc->status); | ||
| 186 | } | ||
| 187 | |||
| 188 | static void vop_set_status(struct virtio_device *dev, u8 status) | ||
| 189 | { | ||
| 190 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
| 191 | struct vop_device *vpdev = vdev->vpdev; | ||
| 192 | |||
| 193 | if (!status) | ||
| 194 | return; | ||
| 195 | iowrite8(status, &vdev->desc->status); | ||
| 196 | vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db); | ||
| 197 | } | ||
| 198 | |||
| 199 | /* Inform host on a virtio device reset and wait for ack from host */ | ||
| 200 | static void vop_reset_inform_host(struct virtio_device *dev) | ||
| 201 | { | ||
| 202 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
| 203 | struct mic_device_ctrl __iomem *dc = vdev->dc; | ||
| 204 | struct vop_device *vpdev = vdev->vpdev; | ||
| 205 | int retry; | ||
| 206 | |||
| 207 | iowrite8(0, &dc->host_ack); | ||
| 208 | iowrite8(1, &dc->vdev_reset); | ||
| 209 | vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db); | ||
| 210 | |||
| 211 | /* Wait till host completes all card accesses and acks the reset */ | ||
| 212 | for (retry = 100; retry--;) { | ||
| 213 | if (ioread8(&dc->host_ack)) | ||
| 214 | break; | ||
| 215 | msleep(100); | ||
| 216 | }; | ||
| 217 | |||
| 218 | dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry); | ||
| 219 | |||
| 220 | /* Reset status to 0 in case we timed out */ | ||
| 221 | iowrite8(0, &vdev->desc->status); | ||
| 222 | } | ||
| 223 | |||
| 224 | static void vop_reset(struct virtio_device *dev) | ||
| 225 | { | ||
| 226 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
| 227 | |||
| 228 | dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n", | ||
| 229 | __func__, dev->id.device); | ||
| 230 | |||
| 231 | vop_reset_inform_host(dev); | ||
| 232 | complete_all(&vdev->reset_done); | ||
| 233 | } | ||
| 234 | |||
| 235 | /* | ||
| 236 | * The virtio_ring code calls this API when it wants to notify the Host. | ||
| 237 | */ | ||
| 238 | static bool vop_notify(struct virtqueue *vq) | ||
| 239 | { | ||
| 240 | struct _vop_vdev *vdev = vq->priv; | ||
| 241 | struct vop_device *vpdev = vdev->vpdev; | ||
| 242 | |||
| 243 | vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db); | ||
| 244 | return true; | ||
| 245 | } | ||
| 246 | |||
| 247 | static void vop_del_vq(struct virtqueue *vq, int n) | ||
| 248 | { | ||
| 249 | struct _vop_vdev *vdev = to_vopvdev(vq->vdev); | ||
| 250 | struct vring *vr = (struct vring *)(vq + 1); | ||
| 251 | struct vop_device *vpdev = vdev->vpdev; | ||
| 252 | |||
| 253 | dma_unmap_single(&vpdev->dev, vdev->used[n], | ||
| 254 | vdev->used_size[n], DMA_BIDIRECTIONAL); | ||
| 255 | free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); | ||
| 256 | vring_del_virtqueue(vq); | ||
| 257 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); | ||
| 258 | vdev->vr[n] = NULL; | ||
| 259 | } | ||
| 260 | |||
| 261 | static void vop_del_vqs(struct virtio_device *dev) | ||
| 262 | { | ||
| 263 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
| 264 | struct virtqueue *vq, *n; | ||
| 265 | int idx = 0; | ||
| 266 | |||
| 267 | dev_dbg(_vop_dev(vdev), "%s\n", __func__); | ||
| 268 | |||
| 269 | list_for_each_entry_safe(vq, n, &dev->vqs, list) | ||
| 270 | vop_del_vq(vq, idx++); | ||
| 271 | } | ||
| 272 | |||
| 273 | /* | ||
| 274 | * This routine will assign vring's allocated in host/io memory. Code in | ||
| 275 | * virtio_ring.c however continues to access this io memory as if it were local | ||
| 276 | * memory without io accessors. | ||
| 277 | */ | ||
| 278 | static struct virtqueue *vop_find_vq(struct virtio_device *dev, | ||
| 279 | unsigned index, | ||
| 280 | void (*callback)(struct virtqueue *vq), | ||
| 281 | const char *name) | ||
| 282 | { | ||
| 283 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
| 284 | struct vop_device *vpdev = vdev->vpdev; | ||
| 285 | struct mic_vqconfig __iomem *vqconfig; | ||
| 286 | struct mic_vqconfig config; | ||
| 287 | struct virtqueue *vq; | ||
| 288 | void __iomem *va; | ||
| 289 | struct _mic_vring_info __iomem *info; | ||
| 290 | void *used; | ||
| 291 | int vr_size, _vr_size, err, magic; | ||
| 292 | struct vring *vr; | ||
| 293 | u8 type = ioread8(&vdev->desc->type); | ||
| 294 | |||
| 295 | if (index >= ioread8(&vdev->desc->num_vq)) | ||
| 296 | return ERR_PTR(-ENOENT); | ||
| 297 | |||
| 298 | if (!name) | ||
| 299 | return ERR_PTR(-ENOENT); | ||
| 300 | |||
| 301 | /* First assign the vring's allocated in host memory */ | ||
| 302 | vqconfig = _vop_vq_config(vdev->desc) + index; | ||
| 303 | memcpy_fromio(&config, vqconfig, sizeof(config)); | ||
| 304 | _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); | ||
| 305 | vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); | ||
| 306 | va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address), | ||
| 307 | vr_size); | ||
| 308 | if (!va) | ||
| 309 | return ERR_PTR(-ENOMEM); | ||
| 310 | vdev->vr[index] = va; | ||
| 311 | memset_io(va, 0x0, _vr_size); | ||
| 312 | vq = vring_new_virtqueue( | ||
| 313 | index, | ||
| 314 | le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, | ||
| 315 | dev, | ||
| 316 | false, | ||
| 317 | (void __force *)va, vop_notify, callback, name); | ||
| 318 | if (!vq) { | ||
| 319 | err = -ENOMEM; | ||
| 320 | goto unmap; | ||
| 321 | } | ||
| 322 | info = va + _vr_size; | ||
| 323 | magic = ioread32(&info->magic); | ||
| 324 | |||
| 325 | if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) { | ||
| 326 | err = -EIO; | ||
| 327 | goto unmap; | ||
| 328 | } | ||
| 329 | |||
| 330 | /* Allocate and reassign used ring now */ | ||
| 331 | vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + | ||
| 332 | sizeof(struct vring_used_elem) * | ||
| 333 | le16_to_cpu(config.num)); | ||
| 334 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
| 335 | get_order(vdev->used_size[index])); | ||
| 336 | if (!used) { | ||
| 337 | err = -ENOMEM; | ||
| 338 | dev_err(_vop_dev(vdev), "%s %d err %d\n", | ||
| 339 | __func__, __LINE__, err); | ||
| 340 | goto del_vq; | ||
| 341 | } | ||
| 342 | vdev->used[index] = dma_map_single(&vpdev->dev, used, | ||
| 343 | vdev->used_size[index], | ||
| 344 | DMA_BIDIRECTIONAL); | ||
| 345 | if (dma_mapping_error(&vpdev->dev, vdev->used[index])) { | ||
| 346 | err = -ENOMEM; | ||
| 347 | dev_err(_vop_dev(vdev), "%s %d err %d\n", | ||
| 348 | __func__, __LINE__, err); | ||
| 349 | goto free_used; | ||
| 350 | } | ||
| 351 | writeq(vdev->used[index], &vqconfig->used_address); | ||
| 352 | /* | ||
| 353 | * To reassign the used ring here we are directly accessing | ||
| 354 | * struct vring_virtqueue which is a private data structure | ||
| 355 | * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in | ||
| 356 | * vring_new_virtqueue() would ensure that | ||
| 357 | * (&vq->vring == (struct vring *) (&vq->vq + 1)); | ||
| 358 | */ | ||
| 359 | vr = (struct vring *)(vq + 1); | ||
| 360 | vr->used = used; | ||
| 361 | |||
| 362 | vq->priv = vdev; | ||
| 363 | return vq; | ||
| 364 | free_used: | ||
| 365 | free_pages((unsigned long)used, | ||
| 366 | get_order(vdev->used_size[index])); | ||
| 367 | del_vq: | ||
| 368 | vring_del_virtqueue(vq); | ||
| 369 | unmap: | ||
| 370 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); | ||
| 371 | return ERR_PTR(err); | ||
| 372 | } | ||
| 373 | |||
| 374 | static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, | ||
| 375 | struct virtqueue *vqs[], | ||
| 376 | vq_callback_t *callbacks[], | ||
| 377 | const char * const names[]) | ||
| 378 | { | ||
| 379 | struct _vop_vdev *vdev = to_vopvdev(dev); | ||
| 380 | struct vop_device *vpdev = vdev->vpdev; | ||
| 381 | struct mic_device_ctrl __iomem *dc = vdev->dc; | ||
| 382 | int i, err, retry; | ||
| 383 | |||
| 384 | /* We must have this many virtqueues. */ | ||
| 385 | if (nvqs > ioread8(&vdev->desc->num_vq)) | ||
| 386 | return -ENOENT; | ||
| 387 | |||
| 388 | for (i = 0; i < nvqs; ++i) { | ||
| 389 | dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", | ||
| 390 | __func__, i, names[i]); | ||
| 391 | vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]); | ||
| 392 | if (IS_ERR(vqs[i])) { | ||
| 393 | err = PTR_ERR(vqs[i]); | ||
| 394 | goto error; | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | iowrite8(1, &dc->used_address_updated); | ||
| 399 | /* | ||
| 400 | * Send an interrupt to the host to inform it that used | ||
| 401 | * rings have been re-assigned. | ||
| 402 | */ | ||
| 403 | vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db); | ||
| 404 | for (retry = 100; --retry;) { | ||
| 405 | if (!ioread8(&dc->used_address_updated)) | ||
| 406 | break; | ||
| 407 | msleep(100); | ||
| 408 | }; | ||
| 409 | |||
| 410 | dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry); | ||
| 411 | if (!retry) { | ||
| 412 | err = -ENODEV; | ||
| 413 | goto error; | ||
| 414 | } | ||
| 415 | |||
| 416 | return 0; | ||
| 417 | error: | ||
| 418 | vop_del_vqs(dev); | ||
| 419 | return err; | ||
| 420 | } | ||
| 421 | |||
| 422 | /* | ||
| 423 | * The config ops structure as defined by virtio config | ||
| 424 | */ | ||
| 425 | static struct virtio_config_ops vop_vq_config_ops = { | ||
| 426 | .get_features = vop_get_features, | ||
| 427 | .finalize_features = vop_finalize_features, | ||
| 428 | .get = vop_get, | ||
| 429 | .set = vop_set, | ||
| 430 | .get_status = vop_get_status, | ||
| 431 | .set_status = vop_set_status, | ||
| 432 | .reset = vop_reset, | ||
| 433 | .find_vqs = vop_find_vqs, | ||
| 434 | .del_vqs = vop_del_vqs, | ||
| 435 | }; | ||
| 436 | |||
| 437 | static irqreturn_t vop_virtio_intr_handler(int irq, void *data) | ||
| 438 | { | ||
| 439 | struct _vop_vdev *vdev = data; | ||
| 440 | struct vop_device *vpdev = vdev->vpdev; | ||
| 441 | struct virtqueue *vq; | ||
| 442 | |||
| 443 | vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db); | ||
| 444 | list_for_each_entry(vq, &vdev->vdev.vqs, list) | ||
| 445 | vring_interrupt(0, vq); | ||
| 446 | |||
| 447 | return IRQ_HANDLED; | ||
| 448 | } | ||
| 449 | |||
| 450 | static void vop_virtio_release_dev(struct device *_d) | ||
| 451 | { | ||
| 452 | /* | ||
| 453 | * No need for a release method similar to virtio PCI. | ||
| 454 | * Provide an empty one to avoid getting a warning from core. | ||
| 455 | */ | ||
| 456 | } | ||
| 457 | |||
| 458 | /* | ||
| 459 | * adds a new device and register it with virtio | ||
| 460 | * appropriate drivers are loaded by the device model | ||
| 461 | */ | ||
| 462 | static int _vop_add_device(struct mic_device_desc __iomem *d, | ||
| 463 | unsigned int offset, struct vop_device *vpdev, | ||
| 464 | int dnode) | ||
| 465 | { | ||
| 466 | struct _vop_vdev *vdev; | ||
| 467 | int ret; | ||
| 468 | u8 type = ioread8(&d->type); | ||
| 469 | |||
| 470 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
| 471 | if (!vdev) | ||
| 472 | return -ENOMEM; | ||
| 473 | |||
| 474 | vdev->vpdev = vpdev; | ||
| 475 | vdev->vdev.dev.parent = &vpdev->dev; | ||
| 476 | vdev->vdev.dev.release = vop_virtio_release_dev; | ||
| 477 | vdev->vdev.id.device = type; | ||
| 478 | vdev->vdev.config = &vop_vq_config_ops; | ||
| 479 | vdev->desc = d; | ||
| 480 | vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d); | ||
| 481 | vdev->dnode = dnode; | ||
| 482 | vdev->vdev.priv = (void *)(u64)dnode; | ||
| 483 | init_completion(&vdev->reset_done); | ||
| 484 | |||
| 485 | vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev); | ||
| 486 | vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev, | ||
| 487 | vop_virtio_intr_handler, "virtio intr", | ||
| 488 | vdev, vdev->h2c_vdev_db); | ||
| 489 | if (IS_ERR(vdev->virtio_cookie)) { | ||
| 490 | ret = PTR_ERR(vdev->virtio_cookie); | ||
| 491 | goto kfree; | ||
| 492 | } | ||
| 493 | iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db); | ||
| 494 | vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db); | ||
| 495 | |||
| 496 | ret = register_virtio_device(&vdev->vdev); | ||
| 497 | if (ret) { | ||
| 498 | dev_err(_vop_dev(vdev), | ||
| 499 | "Failed to register vop device %u type %u\n", | ||
| 500 | offset, type); | ||
| 501 | goto free_irq; | ||
| 502 | } | ||
| 503 | writeq((u64)vdev, &vdev->dc->vdev); | ||
| 504 | dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n", | ||
| 505 | __func__, offset, type, vdev); | ||
| 506 | |||
| 507 | return 0; | ||
| 508 | |||
| 509 | free_irq: | ||
| 510 | vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev); | ||
| 511 | kfree: | ||
| 512 | kfree(vdev); | ||
| 513 | return ret; | ||
| 514 | } | ||
| 515 | |||
| 516 | /* | ||
| 517 | * match for a vop device with a specific desc pointer | ||
| 518 | */ | ||
| 519 | static int vop_match_desc(struct device *dev, void *data) | ||
| 520 | { | ||
| 521 | struct virtio_device *_dev = dev_to_virtio(dev); | ||
| 522 | struct _vop_vdev *vdev = to_vopvdev(_dev); | ||
| 523 | |||
| 524 | return vdev->desc == (void __iomem *)data; | ||
| 525 | } | ||
| 526 | |||
| 527 | static void _vop_handle_config_change(struct mic_device_desc __iomem *d, | ||
| 528 | unsigned int offset, | ||
| 529 | struct vop_device *vpdev) | ||
| 530 | { | ||
| 531 | struct mic_device_ctrl __iomem *dc | ||
| 532 | = (void __iomem *)d + _vop_aligned_desc_size(d); | ||
| 533 | struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev); | ||
| 534 | |||
| 535 | if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) | ||
| 536 | return; | ||
| 537 | |||
| 538 | dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__); | ||
| 539 | virtio_config_changed(&vdev->vdev); | ||
| 540 | iowrite8(1, &dc->guest_ack); | ||
| 541 | } | ||
| 542 | |||
| 543 | /* | ||
| 544 | * removes a virtio device if a hot remove event has been | ||
| 545 | * requested by the host. | ||
| 546 | */ | ||
| 547 | static int _vop_remove_device(struct mic_device_desc __iomem *d, | ||
| 548 | unsigned int offset, struct vop_device *vpdev) | ||
| 549 | { | ||
| 550 | struct mic_device_ctrl __iomem *dc | ||
| 551 | = (void __iomem *)d + _vop_aligned_desc_size(d); | ||
| 552 | struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev); | ||
| 553 | u8 status; | ||
| 554 | int ret = -1; | ||
| 555 | |||
| 556 | if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { | ||
| 557 | dev_dbg(&vpdev->dev, | ||
| 558 | "%s %d config_change %d type %d vdev %p\n", | ||
| 559 | __func__, __LINE__, | ||
| 560 | ioread8(&dc->config_change), ioread8(&d->type), vdev); | ||
| 561 | status = ioread8(&d->status); | ||
| 562 | reinit_completion(&vdev->reset_done); | ||
| 563 | unregister_virtio_device(&vdev->vdev); | ||
| 564 | vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev); | ||
| 565 | iowrite8(-1, &dc->h2c_vdev_db); | ||
| 566 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) | ||
| 567 | wait_for_completion(&vdev->reset_done); | ||
| 568 | kfree(vdev); | ||
| 569 | iowrite8(1, &dc->guest_ack); | ||
| 570 | dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", | ||
| 571 | __func__, __LINE__, ioread8(&dc->guest_ack)); | ||
| 572 | iowrite8(-1, &d->type); | ||
| 573 | ret = 0; | ||
| 574 | } | ||
| 575 | return ret; | ||
| 576 | } | ||
| 577 | |||
| 578 | #define REMOVE_DEVICES true | ||
| 579 | |||
| 580 | static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev, | ||
| 581 | bool remove, int dnode) | ||
| 582 | { | ||
| 583 | s8 type; | ||
| 584 | unsigned int i; | ||
| 585 | struct mic_device_desc __iomem *d; | ||
| 586 | struct mic_device_ctrl __iomem *dc; | ||
| 587 | struct device *dev; | ||
| 588 | int ret; | ||
| 589 | |||
| 590 | for (i = sizeof(struct mic_bootparam); | ||
| 591 | i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) { | ||
| 592 | d = dp + i; | ||
| 593 | dc = (void __iomem *)d + _vop_aligned_desc_size(d); | ||
| 594 | /* | ||
| 595 | * This read barrier is paired with the corresponding write | ||
| 596 | * barrier on the host which is inserted before adding or | ||
| 597 | * removing a virtio device descriptor, by updating the type. | ||
| 598 | */ | ||
| 599 | rmb(); | ||
| 600 | type = ioread8(&d->type); | ||
| 601 | |||
| 602 | /* end of list */ | ||
| 603 | if (type == 0) | ||
| 604 | break; | ||
| 605 | |||
| 606 | if (type == -1) | ||
| 607 | continue; | ||
| 608 | |||
| 609 | /* device already exists */ | ||
| 610 | dev = device_find_child(&vpdev->dev, (void __force *)d, | ||
| 611 | vop_match_desc); | ||
| 612 | if (dev) { | ||
| 613 | if (remove) | ||
| 614 | iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE, | ||
| 615 | &dc->config_change); | ||
| 616 | put_device(dev); | ||
| 617 | _vop_handle_config_change(d, i, vpdev); | ||
| 618 | ret = _vop_remove_device(d, i, vpdev); | ||
| 619 | if (remove) { | ||
| 620 | iowrite8(0, &dc->config_change); | ||
| 621 | iowrite8(0, &dc->guest_ack); | ||
| 622 | } | ||
| 623 | continue; | ||
| 624 | } | ||
| 625 | |||
| 626 | /* new device */ | ||
| 627 | dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n", | ||
| 628 | __func__, __LINE__, d); | ||
| 629 | if (!remove) | ||
| 630 | _vop_add_device(d, i, vpdev, dnode); | ||
| 631 | } | ||
| 632 | } | ||
| 633 | |||
| 634 | static void vop_scan_devices(struct vop_info *vi, | ||
| 635 | struct vop_device *vpdev, bool remove) | ||
| 636 | { | ||
| 637 | void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev); | ||
| 638 | |||
| 639 | if (!dp) | ||
| 640 | return; | ||
| 641 | mutex_lock(&vi->vop_mutex); | ||
| 642 | _vop_scan_devices(dp, vpdev, remove, vpdev->dnode); | ||
| 643 | mutex_unlock(&vi->vop_mutex); | ||
| 644 | } | ||
| 645 | |||
| 646 | /* | ||
| 647 | * vop_hotplug_device tries to find changes in the device page. | ||
| 648 | */ | ||
| 649 | static void vop_hotplug_devices(struct work_struct *work) | ||
| 650 | { | ||
| 651 | struct vop_info *vi = container_of(work, struct vop_info, | ||
| 652 | hotplug_work); | ||
| 653 | |||
| 654 | vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES); | ||
| 655 | } | ||
| 656 | |||
| 657 | /* | ||
| 658 | * Interrupt handler for hot plug/config changes etc. | ||
| 659 | */ | ||
| 660 | static irqreturn_t vop_extint_handler(int irq, void *data) | ||
| 661 | { | ||
| 662 | struct vop_info *vi = data; | ||
| 663 | struct mic_bootparam __iomem *bp; | ||
| 664 | struct vop_device *vpdev = vi->vpdev; | ||
| 665 | |||
| 666 | bp = vpdev->hw_ops->get_remote_dp(vpdev); | ||
| 667 | dev_dbg(&vpdev->dev, "%s %d hotplug work\n", | ||
| 668 | __func__, __LINE__); | ||
| 669 | vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db)); | ||
| 670 | schedule_work(&vi->hotplug_work); | ||
| 671 | return IRQ_HANDLED; | ||
| 672 | } | ||
| 673 | |||
| 674 | static int vop_driver_probe(struct vop_device *vpdev) | ||
| 675 | { | ||
| 676 | struct vop_info *vi; | ||
| 677 | int rc; | ||
| 678 | |||
| 679 | vi = kzalloc(sizeof(*vi), GFP_KERNEL); | ||
| 680 | if (!vi) { | ||
| 681 | rc = -ENOMEM; | ||
| 682 | goto exit; | ||
| 683 | } | ||
| 684 | dev_set_drvdata(&vpdev->dev, vi); | ||
| 685 | vi->vpdev = vpdev; | ||
| 686 | |||
| 687 | mutex_init(&vi->vop_mutex); | ||
| 688 | INIT_WORK(&vi->hotplug_work, vop_hotplug_devices); | ||
| 689 | if (vpdev->dnode) { | ||
| 690 | rc = vop_host_init(vi); | ||
| 691 | if (rc < 0) | ||
| 692 | goto free; | ||
| 693 | } else { | ||
| 694 | struct mic_bootparam __iomem *bootparam; | ||
| 695 | |||
| 696 | vop_scan_devices(vi, vpdev, !REMOVE_DEVICES); | ||
| 697 | |||
| 698 | vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev); | ||
| 699 | vi->cookie = vpdev->hw_ops->request_irq(vpdev, | ||
| 700 | vop_extint_handler, | ||
| 701 | "virtio_config_intr", | ||
| 702 | vi, vi->h2c_config_db); | ||
| 703 | if (IS_ERR(vi->cookie)) { | ||
| 704 | rc = PTR_ERR(vi->cookie); | ||
| 705 | goto free; | ||
| 706 | } | ||
| 707 | bootparam = vpdev->hw_ops->get_remote_dp(vpdev); | ||
| 708 | iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db); | ||
| 709 | } | ||
| 710 | vop_init_debugfs(vi); | ||
| 711 | return 0; | ||
| 712 | free: | ||
| 713 | kfree(vi); | ||
| 714 | exit: | ||
| 715 | return rc; | ||
| 716 | } | ||
| 717 | |||
| 718 | static void vop_driver_remove(struct vop_device *vpdev) | ||
| 719 | { | ||
| 720 | struct vop_info *vi = dev_get_drvdata(&vpdev->dev); | ||
| 721 | |||
| 722 | if (vpdev->dnode) { | ||
| 723 | vop_host_uninit(vi); | ||
| 724 | } else { | ||
| 725 | struct mic_bootparam __iomem *bootparam = | ||
| 726 | vpdev->hw_ops->get_remote_dp(vpdev); | ||
| 727 | if (bootparam) | ||
| 728 | iowrite8(-1, &bootparam->h2c_config_db); | ||
| 729 | vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi); | ||
| 730 | flush_work(&vi->hotplug_work); | ||
| 731 | vop_scan_devices(vi, vpdev, REMOVE_DEVICES); | ||
| 732 | } | ||
| 733 | vop_exit_debugfs(vi); | ||
| 734 | kfree(vi); | ||
| 735 | } | ||
| 736 | |||
| 737 | static struct vop_device_id id_table[] = { | ||
| 738 | { VOP_DEV_TRNSP, VOP_DEV_ANY_ID }, | ||
| 739 | { 0 }, | ||
| 740 | }; | ||
| 741 | |||
| 742 | static struct vop_driver vop_driver = { | ||
| 743 | .driver.name = KBUILD_MODNAME, | ||
| 744 | .driver.owner = THIS_MODULE, | ||
| 745 | .id_table = id_table, | ||
| 746 | .probe = vop_driver_probe, | ||
| 747 | .remove = vop_driver_remove, | ||
| 748 | }; | ||
| 749 | |||
| 750 | module_vop_driver(vop_driver); | ||
| 751 | |||
| 752 | MODULE_DEVICE_TABLE(mbus, id_table); | ||
| 753 | MODULE_AUTHOR("Intel Corporation"); | ||
| 754 | MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver"); | ||
| 755 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/vop/vop_main.h index a80631f2790d..ba47ec7a6386 100644 --- a/drivers/misc/mic/host/mic_virtio.h +++ b/drivers/misc/mic/vop/vop_main.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Intel MIC Platform Software Stack (MPSS) | 2 | * Intel MIC Platform Software Stack (MPSS) |
| 3 | * | 3 | * |
| 4 | * Copyright(c) 2013 Intel Corporation. | 4 | * Copyright(c) 2016 Intel Corporation. |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License, version 2, as | 7 | * it under the terms of the GNU General Public License, version 2, as |
| @@ -15,14 +15,21 @@ | |||
| 15 | * The full GNU General Public License is included in this distribution in | 15 | * The full GNU General Public License is included in this distribution in |
| 16 | * the file called "COPYING". | 16 | * the file called "COPYING". |
| 17 | * | 17 | * |
| 18 | * Intel MIC Host driver. | 18 | * Intel Virtio Over PCIe (VOP) driver. |
| 19 | * | 19 | * |
| 20 | */ | 20 | */ |
| 21 | #ifndef MIC_VIRTIO_H | 21 | #ifndef _VOP_MAIN_H_ |
| 22 | #define MIC_VIRTIO_H | 22 | #define _VOP_MAIN_H_ |
| 23 | 23 | ||
| 24 | #include <linux/vringh.h> | ||
| 24 | #include <linux/virtio_config.h> | 25 | #include <linux/virtio_config.h> |
| 25 | #include <linux/mic_ioctl.h> | 26 | #include <linux/virtio.h> |
| 27 | #include <linux/miscdevice.h> | ||
| 28 | |||
| 29 | #include <linux/mic_common.h> | ||
| 30 | #include "../common/mic_dev.h" | ||
| 31 | |||
| 32 | #include "../bus/vop_bus.h" | ||
| 26 | 33 | ||
| 27 | /* | 34 | /* |
| 28 | * Note on endianness. | 35 | * Note on endianness. |
| @@ -39,38 +46,68 @@ | |||
| 39 | * in guest endianness. | 46 | * in guest endianness. |
| 40 | */ | 47 | */ |
| 41 | 48 | ||
| 49 | /* | ||
| 50 | * vop_info - Allocated per invocation of VOP probe | ||
| 51 | * | ||
| 52 | * @vpdev: VOP device | ||
| 53 | * @hotplug_work: Handle virtio device creation, deletion and configuration | ||
| 54 | * @cookie: Cookie received upon requesting a virtio configuration interrupt | ||
| 55 | * @h2c_config_db: The doorbell used by the peer to indicate a config change | ||
| 56 | * @vdev_list: List of "active" virtio devices injected in the peer node | ||
| 57 | * @vop_mutex: Synchronize access to the device page as well as serialize | ||
| 58 | * creation/deletion of virtio devices on the peer node | ||
| 59 | * @dp: Peer device page information | ||
| 60 | * @dbg: Debugfs entry | ||
| 61 | * @dma_ch: The DMA channel used by this transport for data transfers. | ||
| 62 | * @name: Name for this transport used in misc device creation. | ||
| 63 | * @miscdev: The misc device registered. | ||
| 64 | */ | ||
| 65 | struct vop_info { | ||
| 66 | struct vop_device *vpdev; | ||
| 67 | struct work_struct hotplug_work; | ||
| 68 | struct mic_irq *cookie; | ||
| 69 | int h2c_config_db; | ||
| 70 | struct list_head vdev_list; | ||
| 71 | struct mutex vop_mutex; | ||
| 72 | void __iomem *dp; | ||
| 73 | struct dentry *dbg; | ||
| 74 | struct dma_chan *dma_ch; | ||
| 75 | char name[16]; | ||
| 76 | struct miscdevice miscdev; | ||
| 77 | }; | ||
| 78 | |||
| 42 | /** | 79 | /** |
| 43 | * struct mic_vringh - Virtio ring host information. | 80 | * struct vop_vringh - Virtio ring host information. |
| 44 | * | 81 | * |
| 45 | * @vring: The MIC vring used for setting up user space mappings. | 82 | * @vring: The VOP vring used for setting up user space mappings. |
| 46 | * @vrh: The host VRINGH used for accessing the card vrings. | 83 | * @vrh: The host VRINGH used for accessing the card vrings. |
| 47 | * @riov: The VRINGH read kernel IOV. | 84 | * @riov: The VRINGH read kernel IOV. |
| 48 | * @wiov: The VRINGH write kernel IOV. | 85 | * @wiov: The VRINGH write kernel IOV. |
| 86 | * @head: The VRINGH head index address passed to vringh_getdesc_kern(..). | ||
| 49 | * @vr_mutex: Mutex for synchronizing access to the VRING. | 87 | * @vr_mutex: Mutex for synchronizing access to the VRING. |
| 50 | * @buf: Temporary kernel buffer used to copy in/out data | 88 | * @buf: Temporary kernel buffer used to copy in/out data |
| 51 | * from/to the card via DMA. | 89 | * from/to the card via DMA. |
| 52 | * @buf_da: dma address of buf. | 90 | * @buf_da: dma address of buf. |
| 53 | * @mvdev: Back pointer to MIC virtio device for vringh_notify(..). | 91 | * @vdev: Back pointer to VOP virtio device for vringh_notify(..). |
| 54 | * @head: The VRINGH head index address passed to vringh_getdesc_kern(..). | ||
| 55 | */ | 92 | */ |
| 56 | struct mic_vringh { | 93 | struct vop_vringh { |
| 57 | struct mic_vring vring; | 94 | struct mic_vring vring; |
| 58 | struct vringh vrh; | 95 | struct vringh vrh; |
| 59 | struct vringh_kiov riov; | 96 | struct vringh_kiov riov; |
| 60 | struct vringh_kiov wiov; | 97 | struct vringh_kiov wiov; |
| 98 | u16 head; | ||
| 61 | struct mutex vr_mutex; | 99 | struct mutex vr_mutex; |
| 62 | void *buf; | 100 | void *buf; |
| 63 | dma_addr_t buf_da; | 101 | dma_addr_t buf_da; |
| 64 | struct mic_vdev *mvdev; | 102 | struct vop_vdev *vdev; |
| 65 | u16 head; | ||
| 66 | }; | 103 | }; |
| 67 | 104 | ||
| 68 | /** | 105 | /** |
| 69 | * struct mic_vdev - Host information for a card Virtio device. | 106 | * struct vop_vdev - Host information for a card Virtio device. |
| 70 | * | 107 | * |
| 71 | * @virtio_id - Virtio device id. | 108 | * @virtio_id - Virtio device id. |
| 72 | * @waitq - Waitqueue to allow ring3 apps to poll. | 109 | * @waitq - Waitqueue to allow ring3 apps to poll. |
| 73 | * @mdev - Back pointer to host MIC device. | 110 | * @vpdev - pointer to VOP bus device. |
| 74 | * @poll_wake - Used for waking up threads blocked in poll. | 111 | * @poll_wake - Used for waking up threads blocked in poll. |
| 75 | * @out_bytes - Debug stats for number of bytes copied from host to card. | 112 | * @out_bytes - Debug stats for number of bytes copied from host to card. |
| 76 | * @in_bytes - Debug stats for number of bytes copied from card to host. | 113 | * @in_bytes - Debug stats for number of bytes copied from card to host. |
| @@ -82,18 +119,23 @@ struct mic_vringh { | |||
| 82 | * the transfer length did not have the required DMA alignment. | 119 | * the transfer length did not have the required DMA alignment. |
| 83 | * @tx_dst_unaligned - Debug stats for number of bytes copied where the | 120 | * @tx_dst_unaligned - Debug stats for number of bytes copied where the |
| 84 | * destination address on the card did not have the required DMA alignment. | 121 | * destination address on the card did not have the required DMA alignment. |
| 85 | * @mvr - Store per VRING data structures. | 122 | * @vvr - Store per VRING data structures. |
| 86 | * @virtio_bh_work - Work struct used to schedule virtio bottom half handling. | 123 | * @virtio_bh_work - Work struct used to schedule virtio bottom half handling. |
| 87 | * @dd - Virtio device descriptor. | 124 | * @dd - Virtio device descriptor. |
| 88 | * @dc - Virtio device control fields. | 125 | * @dc - Virtio device control fields. |
| 89 | * @list - List of Virtio devices. | 126 | * @list - List of Virtio devices. |
| 90 | * @virtio_db - The doorbell used by the card to interrupt the host. | 127 | * @virtio_db - The doorbell used by the card to interrupt the host. |
| 91 | * @virtio_cookie - The cookie returned while requesting interrupts. | 128 | * @virtio_cookie - The cookie returned while requesting interrupts. |
| 129 | * @vi: Transport information. | ||
| 130 | * @vdev_mutex: Mutex synchronizing virtio device injection, | ||
| 131 | * removal and data transfers. | ||
| 132 | * @destroy: Track if a virtio device is being destroyed. | ||
| 133 | * @deleted: The virtio device has been deleted. | ||
| 92 | */ | 134 | */ |
| 93 | struct mic_vdev { | 135 | struct vop_vdev { |
| 94 | int virtio_id; | 136 | int virtio_id; |
| 95 | wait_queue_head_t waitq; | 137 | wait_queue_head_t waitq; |
| 96 | struct mic_device *mdev; | 138 | struct vop_device *vpdev; |
| 97 | int poll_wake; | 139 | int poll_wake; |
| 98 | unsigned long out_bytes; | 140 | unsigned long out_bytes; |
| 99 | unsigned long in_bytes; | 141 | unsigned long in_bytes; |
| @@ -101,55 +143,28 @@ struct mic_vdev { | |||
| 101 | unsigned long in_bytes_dma; | 143 | unsigned long in_bytes_dma; |
| 102 | unsigned long tx_len_unaligned; | 144 | unsigned long tx_len_unaligned; |
| 103 | unsigned long tx_dst_unaligned; | 145 | unsigned long tx_dst_unaligned; |
| 104 | struct mic_vringh mvr[MIC_MAX_VRINGS]; | 146 | unsigned long rx_dst_unaligned; |
| 147 | struct vop_vringh vvr[MIC_MAX_VRINGS]; | ||
| 105 | struct work_struct virtio_bh_work; | 148 | struct work_struct virtio_bh_work; |
| 106 | struct mic_device_desc *dd; | 149 | struct mic_device_desc *dd; |
| 107 | struct mic_device_ctrl *dc; | 150 | struct mic_device_ctrl *dc; |
| 108 | struct list_head list; | 151 | struct list_head list; |
| 109 | int virtio_db; | 152 | int virtio_db; |
| 110 | struct mic_irq *virtio_cookie; | 153 | struct mic_irq *virtio_cookie; |
| 154 | struct vop_info *vi; | ||
| 155 | struct mutex vdev_mutex; | ||
| 156 | struct completion destroy; | ||
| 157 | bool deleted; | ||
| 111 | }; | 158 | }; |
| 112 | 159 | ||
| 113 | void mic_virtio_uninit(struct mic_device *mdev); | ||
| 114 | int mic_virtio_add_device(struct mic_vdev *mvdev, | ||
| 115 | void __user *argp); | ||
| 116 | void mic_virtio_del_device(struct mic_vdev *mvdev); | ||
| 117 | int mic_virtio_config_change(struct mic_vdev *mvdev, | ||
| 118 | void __user *argp); | ||
| 119 | int mic_virtio_copy_desc(struct mic_vdev *mvdev, | ||
| 120 | struct mic_copy_desc *request); | ||
| 121 | void mic_virtio_reset_devices(struct mic_device *mdev); | ||
| 122 | void mic_bh_handler(struct work_struct *work); | ||
| 123 | |||
| 124 | /* Helper API to obtain the MIC PCIe device */ | ||
| 125 | static inline struct device *mic_dev(struct mic_vdev *mvdev) | ||
| 126 | { | ||
| 127 | return &mvdev->mdev->pdev->dev; | ||
| 128 | } | ||
| 129 | |||
| 130 | /* Helper API to check if a virtio device is initialized */ | ||
| 131 | static inline int mic_vdev_inited(struct mic_vdev *mvdev) | ||
| 132 | { | ||
| 133 | /* Device has not been created yet */ | ||
| 134 | if (!mvdev->dd || !mvdev->dd->type) { | ||
| 135 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 136 | __func__, __LINE__, -EINVAL); | ||
| 137 | return -EINVAL; | ||
| 138 | } | ||
| 139 | |||
| 140 | /* Device has been removed/deleted */ | ||
| 141 | if (mvdev->dd->type == -1) { | ||
| 142 | dev_err(mic_dev(mvdev), "%s %d err %d\n", | ||
| 143 | __func__, __LINE__, -ENODEV); | ||
| 144 | return -ENODEV; | ||
| 145 | } | ||
| 146 | |||
| 147 | return 0; | ||
| 148 | } | ||
| 149 | |||
| 150 | /* Helper API to check if a virtio device is running */ | 160 | /* Helper API to check if a virtio device is running */ |
| 151 | static inline bool mic_vdevup(struct mic_vdev *mvdev) | 161 | static inline bool vop_vdevup(struct vop_vdev *vdev) |
| 152 | { | 162 | { |
| 153 | return !!mvdev->dd->status; | 163 | return !!vdev->dd->status; |
| 154 | } | 164 | } |
| 165 | |||
| 166 | void vop_init_debugfs(struct vop_info *vi); | ||
| 167 | void vop_exit_debugfs(struct vop_info *vi); | ||
| 168 | int vop_host_init(struct vop_info *vi); | ||
| 169 | void vop_host_uninit(struct vop_info *vi); | ||
| 155 | #endif | 170 | #endif |
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c new file mode 100644 index 000000000000..e94c7fb6712a --- /dev/null +++ b/drivers/misc/mic/vop/vop_vringh.c | |||
| @@ -0,0 +1,1165 @@ | |||
| 1 | /* | ||
| 2 | * Intel MIC Platform Software Stack (MPSS) | ||
| 3 | * | ||
| 4 | * Copyright(c) 2016 Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License, version 2, as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | * | ||
| 15 | * The full GNU General Public License is included in this distribution in | ||
| 16 | * the file called "COPYING". | ||
| 17 | * | ||
| 18 | * Intel Virtio Over PCIe (VOP) driver. | ||
| 19 | * | ||
| 20 | */ | ||
| 21 | #include <linux/sched.h> | ||
| 22 | #include <linux/poll.h> | ||
| 23 | #include <linux/dma-mapping.h> | ||
| 24 | |||
| 25 | #include <linux/mic_common.h> | ||
| 26 | #include "../common/mic_dev.h" | ||
| 27 | |||
| 28 | #include <linux/mic_ioctl.h> | ||
| 29 | #include "vop_main.h" | ||
| 30 | |||
| 31 | /* Helper API to obtain the VOP PCIe device */ | ||
| 32 | static inline struct device *vop_dev(struct vop_vdev *vdev) | ||
| 33 | { | ||
| 34 | return vdev->vpdev->dev.parent; | ||
| 35 | } | ||
| 36 | |||
| 37 | /* Helper API to check if a virtio device is initialized */ | ||
| 38 | static inline int vop_vdev_inited(struct vop_vdev *vdev) | ||
| 39 | { | ||
| 40 | if (!vdev) | ||
| 41 | return -EINVAL; | ||
| 42 | /* Device has not been created yet */ | ||
| 43 | if (!vdev->dd || !vdev->dd->type) { | ||
| 44 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 45 | __func__, __LINE__, -EINVAL); | ||
| 46 | return -EINVAL; | ||
| 47 | } | ||
| 48 | /* Device has been removed/deleted */ | ||
| 49 | if (vdev->dd->type == -1) { | ||
| 50 | dev_dbg(vop_dev(vdev), "%s %d err %d\n", | ||
| 51 | __func__, __LINE__, -ENODEV); | ||
| 52 | return -ENODEV; | ||
| 53 | } | ||
| 54 | return 0; | ||
| 55 | } | ||
| 56 | |||
| 57 | static void _vop_notify(struct vringh *vrh) | ||
| 58 | { | ||
| 59 | struct vop_vringh *vvrh = container_of(vrh, struct vop_vringh, vrh); | ||
| 60 | struct vop_vdev *vdev = vvrh->vdev; | ||
| 61 | struct vop_device *vpdev = vdev->vpdev; | ||
| 62 | s8 db = vdev->dc->h2c_vdev_db; | ||
| 63 | |||
| 64 | if (db != -1) | ||
| 65 | vpdev->hw_ops->send_intr(vpdev, db); | ||
| 66 | } | ||
| 67 | |||
| 68 | static void vop_virtio_init_post(struct vop_vdev *vdev) | ||
| 69 | { | ||
| 70 | struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd); | ||
| 71 | struct vop_device *vpdev = vdev->vpdev; | ||
| 72 | int i, used_size; | ||
| 73 | |||
| 74 | for (i = 0; i < vdev->dd->num_vq; i++) { | ||
| 75 | used_size = PAGE_ALIGN(sizeof(u16) * 3 + | ||
| 76 | sizeof(struct vring_used_elem) * | ||
| 77 | le16_to_cpu(vqconfig->num)); | ||
| 78 | if (!le64_to_cpu(vqconfig[i].used_address)) { | ||
| 79 | dev_warn(vop_dev(vdev), "used_address zero??\n"); | ||
| 80 | continue; | ||
| 81 | } | ||
| 82 | vdev->vvr[i].vrh.vring.used = | ||
| 83 | (void __force *)vpdev->hw_ops->ioremap( | ||
| 84 | vpdev, | ||
| 85 | le64_to_cpu(vqconfig[i].used_address), | ||
| 86 | used_size); | ||
| 87 | } | ||
| 88 | |||
| 89 | vdev->dc->used_address_updated = 0; | ||
| 90 | |||
| 91 | dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n", | ||
| 92 | __func__, vdev->virtio_id); | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline void vop_virtio_device_reset(struct vop_vdev *vdev) | ||
| 96 | { | ||
| 97 | int i; | ||
| 98 | |||
| 99 | dev_dbg(vop_dev(vdev), "%s: status %d device type %d RESET\n", | ||
| 100 | __func__, vdev->dd->status, vdev->virtio_id); | ||
| 101 | |||
| 102 | for (i = 0; i < vdev->dd->num_vq; i++) | ||
| 103 | /* | ||
| 104 | * Avoid lockdep false positive. The + 1 is for the vop | ||
| 105 | * mutex which is held in the reset devices code path. | ||
| 106 | */ | ||
| 107 | mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1); | ||
| 108 | |||
| 109 | /* 0 status means "reset" */ | ||
| 110 | vdev->dd->status = 0; | ||
| 111 | vdev->dc->vdev_reset = 0; | ||
| 112 | vdev->dc->host_ack = 1; | ||
| 113 | |||
| 114 | for (i = 0; i < vdev->dd->num_vq; i++) { | ||
| 115 | struct vringh *vrh = &vdev->vvr[i].vrh; | ||
| 116 | |||
| 117 | vdev->vvr[i].vring.info->avail_idx = 0; | ||
| 118 | vrh->completed = 0; | ||
| 119 | vrh->last_avail_idx = 0; | ||
| 120 | vrh->last_used_idx = 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | for (i = 0; i < vdev->dd->num_vq; i++) | ||
| 124 | mutex_unlock(&vdev->vvr[i].vr_mutex); | ||
| 125 | } | ||
| 126 | |||
| 127 | static void vop_virtio_reset_devices(struct vop_info *vi) | ||
| 128 | { | ||
| 129 | struct list_head *pos, *tmp; | ||
| 130 | struct vop_vdev *vdev; | ||
| 131 | |||
| 132 | list_for_each_safe(pos, tmp, &vi->vdev_list) { | ||
| 133 | vdev = list_entry(pos, struct vop_vdev, list); | ||
| 134 | vop_virtio_device_reset(vdev); | ||
| 135 | vdev->poll_wake = 1; | ||
| 136 | wake_up(&vdev->waitq); | ||
| 137 | } | ||
| 138 | } | ||
| 139 | |||
| 140 | static void vop_bh_handler(struct work_struct *work) | ||
| 141 | { | ||
| 142 | struct vop_vdev *vdev = container_of(work, struct vop_vdev, | ||
| 143 | virtio_bh_work); | ||
| 144 | |||
| 145 | if (vdev->dc->used_address_updated) | ||
| 146 | vop_virtio_init_post(vdev); | ||
| 147 | |||
| 148 | if (vdev->dc->vdev_reset) | ||
| 149 | vop_virtio_device_reset(vdev); | ||
| 150 | |||
| 151 | vdev->poll_wake = 1; | ||
| 152 | wake_up(&vdev->waitq); | ||
| 153 | } | ||
| 154 | |||
| 155 | static irqreturn_t _vop_virtio_intr_handler(int irq, void *data) | ||
| 156 | { | ||
| 157 | struct vop_vdev *vdev = data; | ||
| 158 | struct vop_device *vpdev = vdev->vpdev; | ||
| 159 | |||
| 160 | vpdev->hw_ops->ack_interrupt(vpdev, vdev->virtio_db); | ||
| 161 | schedule_work(&vdev->virtio_bh_work); | ||
| 162 | return IRQ_HANDLED; | ||
| 163 | } | ||
| 164 | |||
| 165 | static int vop_virtio_config_change(struct vop_vdev *vdev, void *argp) | ||
| 166 | { | ||
| 167 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | ||
| 168 | int ret = 0, retry, i; | ||
| 169 | struct vop_device *vpdev = vdev->vpdev; | ||
| 170 | struct vop_info *vi = dev_get_drvdata(&vpdev->dev); | ||
| 171 | struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
| 172 | s8 db = bootparam->h2c_config_db; | ||
| 173 | |||
| 174 | mutex_lock(&vi->vop_mutex); | ||
| 175 | for (i = 0; i < vdev->dd->num_vq; i++) | ||
| 176 | mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1); | ||
| 177 | |||
| 178 | if (db == -1 || vdev->dd->type == -1) { | ||
| 179 | ret = -EIO; | ||
| 180 | goto exit; | ||
| 181 | } | ||
| 182 | |||
| 183 | memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len); | ||
| 184 | vdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED; | ||
| 185 | vpdev->hw_ops->send_intr(vpdev, db); | ||
| 186 | |||
| 187 | for (retry = 100; retry--;) { | ||
| 188 | ret = wait_event_timeout(wake, vdev->dc->guest_ack, | ||
| 189 | msecs_to_jiffies(100)); | ||
| 190 | if (ret) | ||
| 191 | break; | ||
| 192 | } | ||
| 193 | |||
| 194 | dev_dbg(vop_dev(vdev), | ||
| 195 | "%s %d retry: %d\n", __func__, __LINE__, retry); | ||
| 196 | vdev->dc->config_change = 0; | ||
| 197 | vdev->dc->guest_ack = 0; | ||
| 198 | exit: | ||
| 199 | for (i = 0; i < vdev->dd->num_vq; i++) | ||
| 200 | mutex_unlock(&vdev->vvr[i].vr_mutex); | ||
| 201 | mutex_unlock(&vi->vop_mutex); | ||
| 202 | return ret; | ||
| 203 | } | ||
| 204 | |||
| 205 | static int vop_copy_dp_entry(struct vop_vdev *vdev, | ||
| 206 | struct mic_device_desc *argp, __u8 *type, | ||
| 207 | struct mic_device_desc **devpage) | ||
| 208 | { | ||
| 209 | struct vop_device *vpdev = vdev->vpdev; | ||
| 210 | struct mic_device_desc *devp; | ||
| 211 | struct mic_vqconfig *vqconfig; | ||
| 212 | int ret = 0, i; | ||
| 213 | bool slot_found = false; | ||
| 214 | |||
| 215 | vqconfig = mic_vq_config(argp); | ||
| 216 | for (i = 0; i < argp->num_vq; i++) { | ||
| 217 | if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) { | ||
| 218 | ret = -EINVAL; | ||
| 219 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 220 | __func__, __LINE__, ret); | ||
| 221 | goto exit; | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | /* Find the first free device page entry */ | ||
| 226 | for (i = sizeof(struct mic_bootparam); | ||
| 227 | i < MIC_DP_SIZE - mic_total_desc_size(argp); | ||
| 228 | i += mic_total_desc_size(devp)) { | ||
| 229 | devp = vpdev->hw_ops->get_dp(vpdev) + i; | ||
| 230 | if (devp->type == 0 || devp->type == -1) { | ||
| 231 | slot_found = true; | ||
| 232 | break; | ||
| 233 | } | ||
| 234 | } | ||
| 235 | if (!slot_found) { | ||
| 236 | ret = -EINVAL; | ||
| 237 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 238 | __func__, __LINE__, ret); | ||
| 239 | goto exit; | ||
| 240 | } | ||
| 241 | /* | ||
| 242 | * Save off the type before doing the memcpy. Type will be set in the | ||
| 243 | * end after completing all initialization for the new device. | ||
| 244 | */ | ||
| 245 | *type = argp->type; | ||
| 246 | argp->type = 0; | ||
| 247 | memcpy(devp, argp, mic_desc_size(argp)); | ||
| 248 | |||
| 249 | *devpage = devp; | ||
| 250 | exit: | ||
| 251 | return ret; | ||
| 252 | } | ||
| 253 | |||
| 254 | static void vop_init_device_ctrl(struct vop_vdev *vdev, | ||
| 255 | struct mic_device_desc *devpage) | ||
| 256 | { | ||
| 257 | struct mic_device_ctrl *dc; | ||
| 258 | |||
| 259 | dc = (void *)devpage + mic_aligned_desc_size(devpage); | ||
| 260 | |||
| 261 | dc->config_change = 0; | ||
| 262 | dc->guest_ack = 0; | ||
| 263 | dc->vdev_reset = 0; | ||
| 264 | dc->host_ack = 0; | ||
| 265 | dc->used_address_updated = 0; | ||
| 266 | dc->c2h_vdev_db = -1; | ||
| 267 | dc->h2c_vdev_db = -1; | ||
| 268 | vdev->dc = dc; | ||
| 269 | } | ||
| 270 | |||
| 271 | static int vop_virtio_add_device(struct vop_vdev *vdev, | ||
| 272 | struct mic_device_desc *argp) | ||
| 273 | { | ||
| 274 | struct vop_info *vi = vdev->vi; | ||
| 275 | struct vop_device *vpdev = vi->vpdev; | ||
| 276 | struct mic_device_desc *dd = NULL; | ||
| 277 | struct mic_vqconfig *vqconfig; | ||
| 278 | int vr_size, i, j, ret; | ||
| 279 | u8 type = 0; | ||
| 280 | s8 db = -1; | ||
| 281 | char irqname[16]; | ||
| 282 | struct mic_bootparam *bootparam; | ||
| 283 | u16 num; | ||
| 284 | dma_addr_t vr_addr; | ||
| 285 | |||
| 286 | bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
| 287 | init_waitqueue_head(&vdev->waitq); | ||
| 288 | INIT_LIST_HEAD(&vdev->list); | ||
| 289 | vdev->vpdev = vpdev; | ||
| 290 | |||
| 291 | ret = vop_copy_dp_entry(vdev, argp, &type, &dd); | ||
| 292 | if (ret) { | ||
| 293 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 294 | __func__, __LINE__, ret); | ||
| 295 | kfree(vdev); | ||
| 296 | return ret; | ||
| 297 | } | ||
| 298 | |||
| 299 | vop_init_device_ctrl(vdev, dd); | ||
| 300 | |||
| 301 | vdev->dd = dd; | ||
| 302 | vdev->virtio_id = type; | ||
| 303 | vqconfig = mic_vq_config(dd); | ||
| 304 | INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler); | ||
| 305 | |||
| 306 | for (i = 0; i < dd->num_vq; i++) { | ||
| 307 | struct vop_vringh *vvr = &vdev->vvr[i]; | ||
| 308 | struct mic_vring *vr = &vdev->vvr[i].vring; | ||
| 309 | |||
| 310 | num = le16_to_cpu(vqconfig[i].num); | ||
| 311 | mutex_init(&vvr->vr_mutex); | ||
| 312 | vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) + | ||
| 313 | sizeof(struct _mic_vring_info)); | ||
| 314 | vr->va = (void *) | ||
| 315 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
| 316 | get_order(vr_size)); | ||
| 317 | if (!vr->va) { | ||
| 318 | ret = -ENOMEM; | ||
| 319 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 320 | __func__, __LINE__, ret); | ||
| 321 | goto err; | ||
| 322 | } | ||
| 323 | vr->len = vr_size; | ||
| 324 | vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN); | ||
| 325 | vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i); | ||
| 326 | vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size, | ||
| 327 | DMA_BIDIRECTIONAL); | ||
| 328 | if (dma_mapping_error(&vpdev->dev, vr_addr)) { | ||
| 329 | free_pages((unsigned long)vr->va, get_order(vr_size)); | ||
| 330 | ret = -ENOMEM; | ||
| 331 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 332 | __func__, __LINE__, ret); | ||
| 333 | goto err; | ||
| 334 | } | ||
| 335 | vqconfig[i].address = cpu_to_le64(vr_addr); | ||
| 336 | |||
| 337 | vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN); | ||
| 338 | ret = vringh_init_kern(&vvr->vrh, | ||
| 339 | *(u32 *)mic_vq_features(vdev->dd), | ||
| 340 | num, false, vr->vr.desc, vr->vr.avail, | ||
| 341 | vr->vr.used); | ||
| 342 | if (ret) { | ||
| 343 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 344 | __func__, __LINE__, ret); | ||
| 345 | goto err; | ||
| 346 | } | ||
| 347 | vringh_kiov_init(&vvr->riov, NULL, 0); | ||
| 348 | vringh_kiov_init(&vvr->wiov, NULL, 0); | ||
| 349 | vvr->head = USHRT_MAX; | ||
| 350 | vvr->vdev = vdev; | ||
| 351 | vvr->vrh.notify = _vop_notify; | ||
| 352 | dev_dbg(&vpdev->dev, | ||
| 353 | "%s %d index %d va %p info %p vr_size 0x%x\n", | ||
| 354 | __func__, __LINE__, i, vr->va, vr->info, vr_size); | ||
| 355 | vvr->buf = (void *)__get_free_pages(GFP_KERNEL, | ||
| 356 | get_order(VOP_INT_DMA_BUF_SIZE)); | ||
| 357 | vvr->buf_da = dma_map_single(&vpdev->dev, | ||
| 358 | vvr->buf, VOP_INT_DMA_BUF_SIZE, | ||
| 359 | DMA_BIDIRECTIONAL); | ||
| 360 | } | ||
| 361 | |||
| 362 | snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index, | ||
| 363 | vdev->virtio_id); | ||
| 364 | vdev->virtio_db = vpdev->hw_ops->next_db(vpdev); | ||
| 365 | vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev, | ||
| 366 | _vop_virtio_intr_handler, irqname, vdev, | ||
| 367 | vdev->virtio_db); | ||
| 368 | if (IS_ERR(vdev->virtio_cookie)) { | ||
| 369 | ret = PTR_ERR(vdev->virtio_cookie); | ||
| 370 | dev_dbg(&vpdev->dev, "request irq failed\n"); | ||
| 371 | goto err; | ||
| 372 | } | ||
| 373 | |||
| 374 | vdev->dc->c2h_vdev_db = vdev->virtio_db; | ||
| 375 | |||
| 376 | /* | ||
| 377 | * Order the type update with previous stores. This write barrier | ||
| 378 | * is paired with the corresponding read barrier before the uncached | ||
| 379 | * system memory read of the type, on the card while scanning the | ||
| 380 | * device page. | ||
| 381 | */ | ||
| 382 | smp_wmb(); | ||
| 383 | dd->type = type; | ||
| 384 | argp->type = type; | ||
| 385 | |||
| 386 | if (bootparam) { | ||
| 387 | db = bootparam->h2c_config_db; | ||
| 388 | if (db != -1) | ||
| 389 | vpdev->hw_ops->send_intr(vpdev, db); | ||
| 390 | } | ||
| 391 | dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db); | ||
| 392 | return 0; | ||
| 393 | err: | ||
| 394 | vqconfig = mic_vq_config(dd); | ||
| 395 | for (j = 0; j < i; j++) { | ||
| 396 | struct vop_vringh *vvr = &vdev->vvr[j]; | ||
| 397 | |||
| 398 | dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address), | ||
| 399 | vvr->vring.len, DMA_BIDIRECTIONAL); | ||
| 400 | free_pages((unsigned long)vvr->vring.va, | ||
| 401 | get_order(vvr->vring.len)); | ||
| 402 | } | ||
| 403 | return ret; | ||
| 404 | } | ||
| 405 | |||
| 406 | static void vop_dev_remove(struct vop_info *pvi, struct mic_device_ctrl *devp, | ||
| 407 | struct vop_device *vpdev) | ||
| 408 | { | ||
| 409 | struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
| 410 | s8 db; | ||
| 411 | int ret, retry; | ||
| 412 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake); | ||
| 413 | |||
| 414 | devp->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE; | ||
| 415 | db = bootparam->h2c_config_db; | ||
| 416 | if (db != -1) | ||
| 417 | vpdev->hw_ops->send_intr(vpdev, db); | ||
| 418 | else | ||
| 419 | goto done; | ||
| 420 | for (retry = 15; retry--;) { | ||
| 421 | ret = wait_event_timeout(wake, devp->guest_ack, | ||
| 422 | msecs_to_jiffies(1000)); | ||
| 423 | if (ret) | ||
| 424 | break; | ||
| 425 | } | ||
| 426 | done: | ||
| 427 | devp->config_change = 0; | ||
| 428 | devp->guest_ack = 0; | ||
| 429 | } | ||
| 430 | |||
| 431 | static void vop_virtio_del_device(struct vop_vdev *vdev) | ||
| 432 | { | ||
| 433 | struct vop_info *vi = vdev->vi; | ||
| 434 | struct vop_device *vpdev = vdev->vpdev; | ||
| 435 | int i; | ||
| 436 | struct mic_vqconfig *vqconfig; | ||
| 437 | struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev); | ||
| 438 | |||
| 439 | if (!bootparam) | ||
| 440 | goto skip_hot_remove; | ||
| 441 | vop_dev_remove(vi, vdev->dc, vpdev); | ||
| 442 | skip_hot_remove: | ||
| 443 | vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev); | ||
| 444 | flush_work(&vdev->virtio_bh_work); | ||
| 445 | vqconfig = mic_vq_config(vdev->dd); | ||
| 446 | for (i = 0; i < vdev->dd->num_vq; i++) { | ||
| 447 | struct vop_vringh *vvr = &vdev->vvr[i]; | ||
| 448 | |||
| 449 | dma_unmap_single(&vpdev->dev, | ||
| 450 | vvr->buf_da, VOP_INT_DMA_BUF_SIZE, | ||
| 451 | DMA_BIDIRECTIONAL); | ||
| 452 | free_pages((unsigned long)vvr->buf, | ||
| 453 | get_order(VOP_INT_DMA_BUF_SIZE)); | ||
| 454 | vringh_kiov_cleanup(&vvr->riov); | ||
| 455 | vringh_kiov_cleanup(&vvr->wiov); | ||
| 456 | dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address), | ||
| 457 | vvr->vring.len, DMA_BIDIRECTIONAL); | ||
| 458 | free_pages((unsigned long)vvr->vring.va, | ||
| 459 | get_order(vvr->vring.len)); | ||
| 460 | } | ||
| 461 | /* | ||
| 462 | * Order the type update with previous stores. This write barrier | ||
| 463 | * is paired with the corresponding read barrier before the uncached | ||
| 464 | * system memory read of the type, on the card while scanning the | ||
| 465 | * device page. | ||
| 466 | */ | ||
| 467 | smp_wmb(); | ||
| 468 | vdev->dd->type = -1; | ||
| 469 | } | ||
| 470 | |||
| 471 | /* | ||
| 472 | * vop_sync_dma - Wrapper for synchronous DMAs. | ||
| 473 | * | ||
| 474 | * @dev - The address of the pointer to the device instance used | ||
| 475 | * for DMA registration. | ||
| 476 | * @dst - destination DMA address. | ||
| 477 | * @src - source DMA address. | ||
| 478 | * @len - size of the transfer. | ||
| 479 | * | ||
| 480 | * Return DMA_SUCCESS on success | ||
| 481 | */ | ||
| 482 | static int vop_sync_dma(struct vop_vdev *vdev, dma_addr_t dst, dma_addr_t src, | ||
| 483 | size_t len) | ||
| 484 | { | ||
| 485 | int err = 0; | ||
| 486 | struct dma_device *ddev; | ||
| 487 | struct dma_async_tx_descriptor *tx; | ||
| 488 | struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev); | ||
| 489 | struct dma_chan *vop_ch = vi->dma_ch; | ||
| 490 | |||
| 491 | if (!vop_ch) { | ||
| 492 | err = -EBUSY; | ||
| 493 | goto error; | ||
| 494 | } | ||
| 495 | ddev = vop_ch->device; | ||
| 496 | tx = ddev->device_prep_dma_memcpy(vop_ch, dst, src, len, | ||
| 497 | DMA_PREP_FENCE); | ||
| 498 | if (!tx) { | ||
| 499 | err = -ENOMEM; | ||
| 500 | goto error; | ||
| 501 | } else { | ||
| 502 | dma_cookie_t cookie; | ||
| 503 | |||
| 504 | cookie = tx->tx_submit(tx); | ||
| 505 | if (dma_submit_error(cookie)) { | ||
| 506 | err = -ENOMEM; | ||
| 507 | goto error; | ||
| 508 | } | ||
| 509 | dma_async_issue_pending(vop_ch); | ||
| 510 | err = dma_sync_wait(vop_ch, cookie); | ||
| 511 | } | ||
| 512 | error: | ||
| 513 | if (err) | ||
| 514 | dev_err(&vi->vpdev->dev, "%s %d err %d\n", | ||
| 515 | __func__, __LINE__, err); | ||
| 516 | return err; | ||
| 517 | } | ||
| 518 | |||
| 519 | #define VOP_USE_DMA true | ||
| 520 | |||
| 521 | /* | ||
| 522 | * Initiates the copies across the PCIe bus from card memory to a user | ||
| 523 | * space buffer. When transfers are done using DMA, source/destination | ||
| 524 | * addresses and transfer length must follow the alignment requirements of | ||
| 525 | * the MIC DMA engine. | ||
| 526 | */ | ||
| 527 | static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf, | ||
| 528 | size_t len, u64 daddr, size_t dlen, | ||
| 529 | int vr_idx) | ||
| 530 | { | ||
| 531 | struct vop_device *vpdev = vdev->vpdev; | ||
| 532 | void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len); | ||
| 533 | struct vop_vringh *vvr = &vdev->vvr[vr_idx]; | ||
| 534 | struct vop_info *vi = dev_get_drvdata(&vpdev->dev); | ||
| 535 | size_t dma_alignment = 1 << vi->dma_ch->device->copy_align; | ||
| 536 | bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); | ||
| 537 | size_t dma_offset, partlen; | ||
| 538 | int err; | ||
| 539 | |||
| 540 | if (!VOP_USE_DMA) { | ||
| 541 | if (copy_to_user(ubuf, (void __force *)dbuf, len)) { | ||
| 542 | err = -EFAULT; | ||
| 543 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 544 | __func__, __LINE__, err); | ||
| 545 | goto err; | ||
| 546 | } | ||
| 547 | vdev->in_bytes += len; | ||
| 548 | err = 0; | ||
| 549 | goto err; | ||
| 550 | } | ||
| 551 | |||
| 552 | dma_offset = daddr - round_down(daddr, dma_alignment); | ||
| 553 | daddr -= dma_offset; | ||
| 554 | len += dma_offset; | ||
| 555 | /* | ||
| 556 | * X100 uses DMA addresses as seen by the card so adding | ||
| 557 | * the aperture base is not required for DMA. However x200 | ||
| 558 | * requires DMA addresses to be an offset into the bar so | ||
| 559 | * add the aperture base for x200. | ||
| 560 | */ | ||
| 561 | if (x200) | ||
| 562 | daddr += vpdev->aper->pa; | ||
| 563 | while (len) { | ||
| 564 | partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE); | ||
| 565 | err = vop_sync_dma(vdev, vvr->buf_da, daddr, | ||
| 566 | ALIGN(partlen, dma_alignment)); | ||
| 567 | if (err) { | ||
| 568 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 569 | __func__, __LINE__, err); | ||
| 570 | goto err; | ||
| 571 | } | ||
| 572 | if (copy_to_user(ubuf, vvr->buf + dma_offset, | ||
| 573 | partlen - dma_offset)) { | ||
| 574 | err = -EFAULT; | ||
| 575 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 576 | __func__, __LINE__, err); | ||
| 577 | goto err; | ||
| 578 | } | ||
| 579 | daddr += partlen; | ||
| 580 | ubuf += partlen; | ||
| 581 | dbuf += partlen; | ||
| 582 | vdev->in_bytes_dma += partlen; | ||
| 583 | vdev->in_bytes += partlen; | ||
| 584 | len -= partlen; | ||
| 585 | dma_offset = 0; | ||
| 586 | } | ||
| 587 | err = 0; | ||
| 588 | err: | ||
| 589 | vpdev->hw_ops->iounmap(vpdev, dbuf); | ||
| 590 | dev_dbg(vop_dev(vdev), | ||
| 591 | "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n", | ||
| 592 | __func__, ubuf, dbuf, len, vr_idx); | ||
| 593 | return err; | ||
| 594 | } | ||
| 595 | |||
| 596 | /* | ||
| 597 | * Initiates copies across the PCIe bus from a user space buffer to card | ||
| 598 | * memory. When transfers are done using DMA, source/destination addresses | ||
| 599 | * and transfer length must follow the alignment requirements of the MIC | ||
| 600 | * DMA engine. | ||
| 601 | */ | ||
| 602 | static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf, | ||
| 603 | size_t len, u64 daddr, size_t dlen, | ||
| 604 | int vr_idx) | ||
| 605 | { | ||
| 606 | struct vop_device *vpdev = vdev->vpdev; | ||
| 607 | void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len); | ||
| 608 | struct vop_vringh *vvr = &vdev->vvr[vr_idx]; | ||
| 609 | struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev); | ||
| 610 | size_t dma_alignment = 1 << vi->dma_ch->device->copy_align; | ||
| 611 | bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); | ||
| 612 | size_t partlen; | ||
| 613 | bool dma = VOP_USE_DMA; | ||
| 614 | int err = 0; | ||
| 615 | |||
| 616 | if (daddr & (dma_alignment - 1)) { | ||
| 617 | vdev->tx_dst_unaligned += len; | ||
| 618 | dma = false; | ||
| 619 | } else if (ALIGN(len, dma_alignment) > dlen) { | ||
| 620 | vdev->tx_len_unaligned += len; | ||
| 621 | dma = false; | ||
| 622 | } | ||
| 623 | |||
| 624 | if (!dma) | ||
| 625 | goto memcpy; | ||
| 626 | |||
| 627 | /* | ||
| 628 | * X100 uses DMA addresses as seen by the card so adding | ||
| 629 | * the aperture base is not required for DMA. However x200 | ||
| 630 | * requires DMA addresses to be an offset into the bar so | ||
| 631 | * add the aperture base for x200. | ||
| 632 | */ | ||
| 633 | if (x200) | ||
| 634 | daddr += vpdev->aper->pa; | ||
| 635 | while (len) { | ||
| 636 | partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE); | ||
| 637 | |||
| 638 | if (copy_from_user(vvr->buf, ubuf, partlen)) { | ||
| 639 | err = -EFAULT; | ||
| 640 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 641 | __func__, __LINE__, err); | ||
| 642 | goto err; | ||
| 643 | } | ||
| 644 | err = vop_sync_dma(vdev, daddr, vvr->buf_da, | ||
| 645 | ALIGN(partlen, dma_alignment)); | ||
| 646 | if (err) { | ||
| 647 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 648 | __func__, __LINE__, err); | ||
| 649 | goto err; | ||
| 650 | } | ||
| 651 | daddr += partlen; | ||
| 652 | ubuf += partlen; | ||
| 653 | dbuf += partlen; | ||
| 654 | vdev->out_bytes_dma += partlen; | ||
| 655 | vdev->out_bytes += partlen; | ||
| 656 | len -= partlen; | ||
| 657 | } | ||
| 658 | memcpy: | ||
| 659 | /* | ||
| 660 | * We are copying to IO below and should ideally use something | ||
| 661 | * like copy_from_user_toio(..) if it existed. | ||
| 662 | */ | ||
| 663 | if (copy_from_user((void __force *)dbuf, ubuf, len)) { | ||
| 664 | err = -EFAULT; | ||
| 665 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 666 | __func__, __LINE__, err); | ||
| 667 | goto err; | ||
| 668 | } | ||
| 669 | vdev->out_bytes += len; | ||
| 670 | err = 0; | ||
| 671 | err: | ||
| 672 | vpdev->hw_ops->iounmap(vpdev, dbuf); | ||
| 673 | dev_dbg(vop_dev(vdev), | ||
| 674 | "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n", | ||
| 675 | __func__, ubuf, dbuf, len, vr_idx); | ||
| 676 | return err; | ||
| 677 | } | ||
| 678 | |||
| 679 | #define MIC_VRINGH_READ true | ||
| 680 | |||
| 681 | /* Determine the total number of bytes consumed in a VRINGH KIOV */ | ||
| 682 | static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov) | ||
| 683 | { | ||
| 684 | int i; | ||
| 685 | u32 total = iov->consumed; | ||
| 686 | |||
| 687 | for (i = 0; i < iov->i; i++) | ||
| 688 | total += iov->iov[i].iov_len; | ||
| 689 | return total; | ||
| 690 | } | ||
| 691 | |||
| 692 | /* | ||
| 693 | * Traverse the VRINGH KIOV and issue the APIs to trigger the copies. | ||
| 694 | * This API is heavily based on the vringh_iov_xfer(..) implementation | ||
| 695 | * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..) | ||
| 696 | * and vringh_iov_push_kern(..) directly is because there is no | ||
| 697 | * way to override the VRINGH xfer(..) routines as of v3.10. | ||
| 698 | */ | ||
| 699 | static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov, | ||
| 700 | void __user *ubuf, size_t len, bool read, int vr_idx, | ||
| 701 | size_t *out_len) | ||
| 702 | { | ||
| 703 | int ret = 0; | ||
| 704 | size_t partlen, tot_len = 0; | ||
| 705 | |||
| 706 | while (len && iov->i < iov->used) { | ||
| 707 | struct kvec *kiov = &iov->iov[iov->i]; | ||
| 708 | |||
| 709 | partlen = min(kiov->iov_len, len); | ||
| 710 | if (read) | ||
| 711 | ret = vop_virtio_copy_to_user(vdev, ubuf, partlen, | ||
| 712 | (u64)kiov->iov_base, | ||
| 713 | kiov->iov_len, | ||
| 714 | vr_idx); | ||
| 715 | else | ||
| 716 | ret = vop_virtio_copy_from_user(vdev, ubuf, partlen, | ||
| 717 | (u64)kiov->iov_base, | ||
| 718 | kiov->iov_len, | ||
| 719 | vr_idx); | ||
| 720 | if (ret) { | ||
| 721 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 722 | __func__, __LINE__, ret); | ||
| 723 | break; | ||
| 724 | } | ||
| 725 | len -= partlen; | ||
| 726 | ubuf += partlen; | ||
| 727 | tot_len += partlen; | ||
| 728 | iov->consumed += partlen; | ||
| 729 | kiov->iov_len -= partlen; | ||
| 730 | kiov->iov_base += partlen; | ||
| 731 | if (!kiov->iov_len) { | ||
| 732 | /* Fix up old iov element then increment. */ | ||
| 733 | kiov->iov_len = iov->consumed; | ||
| 734 | kiov->iov_base -= iov->consumed; | ||
| 735 | |||
| 736 | iov->consumed = 0; | ||
| 737 | iov->i++; | ||
| 738 | } | ||
| 739 | } | ||
| 740 | *out_len = tot_len; | ||
| 741 | return ret; | ||
| 742 | } | ||
| 743 | |||
| 744 | /* | ||
| 745 | * Use the standard VRINGH infrastructure in the kernel to fetch new | ||
| 746 | * descriptors, initiate the copies and update the used ring. | ||
| 747 | */ | ||
| 748 | static int _vop_virtio_copy(struct vop_vdev *vdev, struct mic_copy_desc *copy) | ||
| 749 | { | ||
| 750 | int ret = 0; | ||
| 751 | u32 iovcnt = copy->iovcnt; | ||
| 752 | struct iovec iov; | ||
| 753 | struct iovec __user *u_iov = copy->iov; | ||
| 754 | void __user *ubuf = NULL; | ||
| 755 | struct vop_vringh *vvr = &vdev->vvr[copy->vr_idx]; | ||
| 756 | struct vringh_kiov *riov = &vvr->riov; | ||
| 757 | struct vringh_kiov *wiov = &vvr->wiov; | ||
| 758 | struct vringh *vrh = &vvr->vrh; | ||
| 759 | u16 *head = &vvr->head; | ||
| 760 | struct mic_vring *vr = &vvr->vring; | ||
| 761 | size_t len = 0, out_len; | ||
| 762 | |||
| 763 | copy->out_len = 0; | ||
| 764 | /* Fetch a new IOVEC if all previous elements have been processed */ | ||
| 765 | if (riov->i == riov->used && wiov->i == wiov->used) { | ||
| 766 | ret = vringh_getdesc_kern(vrh, riov, wiov, | ||
| 767 | head, GFP_KERNEL); | ||
| 768 | /* Check if there are available descriptors */ | ||
| 769 | if (ret <= 0) | ||
| 770 | return ret; | ||
| 771 | } | ||
| 772 | while (iovcnt) { | ||
| 773 | if (!len) { | ||
| 774 | /* Copy over a new iovec from user space. */ | ||
| 775 | ret = copy_from_user(&iov, u_iov, sizeof(*u_iov)); | ||
| 776 | if (ret) { | ||
| 777 | ret = -EINVAL; | ||
| 778 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 779 | __func__, __LINE__, ret); | ||
| 780 | break; | ||
| 781 | } | ||
| 782 | len = iov.iov_len; | ||
| 783 | ubuf = iov.iov_base; | ||
| 784 | } | ||
| 785 | /* Issue all the read descriptors first */ | ||
| 786 | ret = vop_vringh_copy(vdev, riov, ubuf, len, | ||
| 787 | MIC_VRINGH_READ, copy->vr_idx, &out_len); | ||
| 788 | if (ret) { | ||
| 789 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 790 | __func__, __LINE__, ret); | ||
| 791 | break; | ||
| 792 | } | ||
| 793 | len -= out_len; | ||
| 794 | ubuf += out_len; | ||
| 795 | copy->out_len += out_len; | ||
| 796 | /* Issue the write descriptors next */ | ||
| 797 | ret = vop_vringh_copy(vdev, wiov, ubuf, len, | ||
| 798 | !MIC_VRINGH_READ, copy->vr_idx, &out_len); | ||
| 799 | if (ret) { | ||
| 800 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 801 | __func__, __LINE__, ret); | ||
| 802 | break; | ||
| 803 | } | ||
| 804 | len -= out_len; | ||
| 805 | ubuf += out_len; | ||
| 806 | copy->out_len += out_len; | ||
| 807 | if (!len) { | ||
| 808 | /* One user space iovec is now completed */ | ||
| 809 | iovcnt--; | ||
| 810 | u_iov++; | ||
| 811 | } | ||
| 812 | /* Exit loop if all elements in KIOVs have been processed. */ | ||
| 813 | if (riov->i == riov->used && wiov->i == wiov->used) | ||
| 814 | break; | ||
| 815 | } | ||
| 816 | /* | ||
| 817 | * Update the used ring if a descriptor was available and some data was | ||
| 818 | * copied in/out and the user asked for a used ring update. | ||
| 819 | */ | ||
| 820 | if (*head != USHRT_MAX && copy->out_len && copy->update_used) { | ||
| 821 | u32 total = 0; | ||
| 822 | |||
| 823 | /* Determine the total data consumed */ | ||
| 824 | total += vop_vringh_iov_consumed(riov); | ||
| 825 | total += vop_vringh_iov_consumed(wiov); | ||
| 826 | vringh_complete_kern(vrh, *head, total); | ||
| 827 | *head = USHRT_MAX; | ||
| 828 | if (vringh_need_notify_kern(vrh) > 0) | ||
| 829 | vringh_notify(vrh); | ||
| 830 | vringh_kiov_cleanup(riov); | ||
| 831 | vringh_kiov_cleanup(wiov); | ||
| 832 | /* Update avail idx for user space */ | ||
| 833 | vr->info->avail_idx = vrh->last_avail_idx; | ||
| 834 | } | ||
| 835 | return ret; | ||
| 836 | } | ||
| 837 | |||
| 838 | static inline int vop_verify_copy_args(struct vop_vdev *vdev, | ||
| 839 | struct mic_copy_desc *copy) | ||
| 840 | { | ||
| 841 | if (!vdev || copy->vr_idx >= vdev->dd->num_vq) | ||
| 842 | return -EINVAL; | ||
| 843 | return 0; | ||
| 844 | } | ||
| 845 | |||
| 846 | /* Copy a specified number of virtio descriptors in a chain */ | ||
| 847 | static int vop_virtio_copy_desc(struct vop_vdev *vdev, | ||
| 848 | struct mic_copy_desc *copy) | ||
| 849 | { | ||
| 850 | int err; | ||
| 851 | struct vop_vringh *vvr; | ||
| 852 | |||
| 853 | err = vop_verify_copy_args(vdev, copy); | ||
| 854 | if (err) | ||
| 855 | return err; | ||
| 856 | |||
| 857 | vvr = &vdev->vvr[copy->vr_idx]; | ||
| 858 | mutex_lock(&vvr->vr_mutex); | ||
| 859 | if (!vop_vdevup(vdev)) { | ||
| 860 | err = -ENODEV; | ||
| 861 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 862 | __func__, __LINE__, err); | ||
| 863 | goto err; | ||
| 864 | } | ||
| 865 | err = _vop_virtio_copy(vdev, copy); | ||
| 866 | if (err) { | ||
| 867 | dev_err(vop_dev(vdev), "%s %d err %d\n", | ||
| 868 | __func__, __LINE__, err); | ||
| 869 | } | ||
| 870 | err: | ||
| 871 | mutex_unlock(&vvr->vr_mutex); | ||
| 872 | return err; | ||
| 873 | } | ||
| 874 | |||
| 875 | static int vop_open(struct inode *inode, struct file *f) | ||
| 876 | { | ||
| 877 | struct vop_vdev *vdev; | ||
| 878 | struct vop_info *vi = container_of(f->private_data, | ||
| 879 | struct vop_info, miscdev); | ||
| 880 | |||
| 881 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
| 882 | if (!vdev) | ||
| 883 | return -ENOMEM; | ||
| 884 | vdev->vi = vi; | ||
| 885 | mutex_init(&vdev->vdev_mutex); | ||
| 886 | f->private_data = vdev; | ||
| 887 | init_completion(&vdev->destroy); | ||
| 888 | complete(&vdev->destroy); | ||
| 889 | return 0; | ||
| 890 | } | ||
| 891 | |||
| 892 | static int vop_release(struct inode *inode, struct file *f) | ||
| 893 | { | ||
| 894 | struct vop_vdev *vdev = f->private_data, *vdev_tmp; | ||
| 895 | struct vop_info *vi = vdev->vi; | ||
| 896 | struct list_head *pos, *tmp; | ||
| 897 | bool found = false; | ||
| 898 | |||
| 899 | mutex_lock(&vdev->vdev_mutex); | ||
| 900 | if (vdev->deleted) | ||
| 901 | goto unlock; | ||
| 902 | mutex_lock(&vi->vop_mutex); | ||
| 903 | list_for_each_safe(pos, tmp, &vi->vdev_list) { | ||
| 904 | vdev_tmp = list_entry(pos, struct vop_vdev, list); | ||
| 905 | if (vdev == vdev_tmp) { | ||
| 906 | vop_virtio_del_device(vdev); | ||
| 907 | list_del(pos); | ||
| 908 | found = true; | ||
| 909 | break; | ||
| 910 | } | ||
| 911 | } | ||
| 912 | mutex_unlock(&vi->vop_mutex); | ||
| 913 | unlock: | ||
| 914 | mutex_unlock(&vdev->vdev_mutex); | ||
| 915 | if (!found) | ||
| 916 | wait_for_completion(&vdev->destroy); | ||
| 917 | f->private_data = NULL; | ||
| 918 | kfree(vdev); | ||
| 919 | return 0; | ||
| 920 | } | ||
| 921 | |||
| 922 | static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | ||
| 923 | { | ||
| 924 | struct vop_vdev *vdev = f->private_data; | ||
| 925 | struct vop_info *vi = vdev->vi; | ||
| 926 | void __user *argp = (void __user *)arg; | ||
| 927 | int ret; | ||
| 928 | |||
| 929 | switch (cmd) { | ||
| 930 | case MIC_VIRTIO_ADD_DEVICE: | ||
| 931 | { | ||
| 932 | struct mic_device_desc dd, *dd_config; | ||
| 933 | |||
| 934 | if (copy_from_user(&dd, argp, sizeof(dd))) | ||
| 935 | return -EFAULT; | ||
| 936 | |||
| 937 | if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE || | ||
| 938 | dd.num_vq > MIC_MAX_VRINGS) | ||
| 939 | return -EINVAL; | ||
| 940 | |||
| 941 | dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL); | ||
| 942 | if (!dd_config) | ||
| 943 | return -ENOMEM; | ||
| 944 | if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) { | ||
| 945 | ret = -EFAULT; | ||
| 946 | goto free_ret; | ||
| 947 | } | ||
| 948 | mutex_lock(&vdev->vdev_mutex); | ||
| 949 | mutex_lock(&vi->vop_mutex); | ||
| 950 | ret = vop_virtio_add_device(vdev, dd_config); | ||
| 951 | if (ret) | ||
| 952 | goto unlock_ret; | ||
| 953 | list_add_tail(&vdev->list, &vi->vdev_list); | ||
| 954 | unlock_ret: | ||
| 955 | mutex_unlock(&vi->vop_mutex); | ||
| 956 | mutex_unlock(&vdev->vdev_mutex); | ||
| 957 | free_ret: | ||
| 958 | kfree(dd_config); | ||
| 959 | return ret; | ||
| 960 | } | ||
| 961 | case MIC_VIRTIO_COPY_DESC: | ||
| 962 | { | ||
| 963 | struct mic_copy_desc copy; | ||
| 964 | |||
| 965 | mutex_lock(&vdev->vdev_mutex); | ||
| 966 | ret = vop_vdev_inited(vdev); | ||
| 967 | if (ret) | ||
| 968 | goto _unlock_ret; | ||
| 969 | |||
| 970 | if (copy_from_user(©, argp, sizeof(copy))) { | ||
| 971 | ret = -EFAULT; | ||
| 972 | goto _unlock_ret; | ||
| 973 | } | ||
| 974 | |||
| 975 | ret = vop_virtio_copy_desc(vdev, ©); | ||
| 976 | if (ret < 0) | ||
| 977 | goto _unlock_ret; | ||
| 978 | if (copy_to_user( | ||
| 979 | &((struct mic_copy_desc __user *)argp)->out_len, | ||
| 980 | ©.out_len, sizeof(copy.out_len))) | ||
| 981 | ret = -EFAULT; | ||
| 982 | _unlock_ret: | ||
| 983 | mutex_unlock(&vdev->vdev_mutex); | ||
| 984 | return ret; | ||
| 985 | } | ||
| 986 | case MIC_VIRTIO_CONFIG_CHANGE: | ||
| 987 | { | ||
| 988 | void *buf; | ||
| 989 | |||
| 990 | mutex_lock(&vdev->vdev_mutex); | ||
| 991 | ret = vop_vdev_inited(vdev); | ||
| 992 | if (ret) | ||
| 993 | goto __unlock_ret; | ||
| 994 | buf = kzalloc(vdev->dd->config_len, GFP_KERNEL); | ||
| 995 | if (!buf) { | ||
| 996 | ret = -ENOMEM; | ||
| 997 | goto __unlock_ret; | ||
| 998 | } | ||
| 999 | if (copy_from_user(buf, argp, vdev->dd->config_len)) { | ||
| 1000 | ret = -EFAULT; | ||
| 1001 | goto done; | ||
| 1002 | } | ||
| 1003 | ret = vop_virtio_config_change(vdev, buf); | ||
| 1004 | done: | ||
| 1005 | kfree(buf); | ||
| 1006 | __unlock_ret: | ||
| 1007 | mutex_unlock(&vdev->vdev_mutex); | ||
| 1008 | return ret; | ||
| 1009 | } | ||
| 1010 | default: | ||
| 1011 | return -ENOIOCTLCMD; | ||
| 1012 | }; | ||
| 1013 | return 0; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | /* | ||
| 1017 | * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and | ||
| 1018 | * not when previously enqueued buffers may be available. This means that | ||
| 1019 | * in the card->host (TX) path, when userspace is unblocked by poll it | ||
| 1020 | * must drain all available descriptors or it can stall. | ||
| 1021 | */ | ||
| 1022 | static unsigned int vop_poll(struct file *f, poll_table *wait) | ||
| 1023 | { | ||
| 1024 | struct vop_vdev *vdev = f->private_data; | ||
| 1025 | int mask = 0; | ||
| 1026 | |||
| 1027 | mutex_lock(&vdev->vdev_mutex); | ||
| 1028 | if (vop_vdev_inited(vdev)) { | ||
| 1029 | mask = POLLERR; | ||
| 1030 | goto done; | ||
| 1031 | } | ||
| 1032 | poll_wait(f, &vdev->waitq, wait); | ||
| 1033 | if (vop_vdev_inited(vdev)) { | ||
| 1034 | mask = POLLERR; | ||
| 1035 | } else if (vdev->poll_wake) { | ||
| 1036 | vdev->poll_wake = 0; | ||
| 1037 | mask = POLLIN | POLLOUT; | ||
| 1038 | } | ||
| 1039 | done: | ||
| 1040 | mutex_unlock(&vdev->vdev_mutex); | ||
| 1041 | return mask; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | static inline int | ||
| 1045 | vop_query_offset(struct vop_vdev *vdev, unsigned long offset, | ||
| 1046 | unsigned long *size, unsigned long *pa) | ||
| 1047 | { | ||
| 1048 | struct vop_device *vpdev = vdev->vpdev; | ||
| 1049 | unsigned long start = MIC_DP_SIZE; | ||
| 1050 | int i; | ||
| 1051 | |||
| 1052 | /* | ||
| 1053 | * MMAP interface is as follows: | ||
| 1054 | * offset region | ||
| 1055 | * 0x0 virtio device_page | ||
| 1056 | * 0x1000 first vring | ||
| 1057 | * 0x1000 + size of 1st vring second vring | ||
| 1058 | * .... | ||
| 1059 | */ | ||
| 1060 | if (!offset) { | ||
| 1061 | *pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev)); | ||
| 1062 | *size = MIC_DP_SIZE; | ||
| 1063 | return 0; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | for (i = 0; i < vdev->dd->num_vq; i++) { | ||
| 1067 | struct vop_vringh *vvr = &vdev->vvr[i]; | ||
| 1068 | |||
| 1069 | if (offset == start) { | ||
| 1070 | *pa = virt_to_phys(vvr->vring.va); | ||
| 1071 | *size = vvr->vring.len; | ||
| 1072 | return 0; | ||
| 1073 | } | ||
| 1074 | start += vvr->vring.len; | ||
| 1075 | } | ||
| 1076 | return -1; | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | /* | ||
| 1080 | * Maps the device page and virtio rings to user space for readonly access. | ||
| 1081 | */ | ||
| 1082 | static int vop_mmap(struct file *f, struct vm_area_struct *vma) | ||
| 1083 | { | ||
| 1084 | struct vop_vdev *vdev = f->private_data; | ||
| 1085 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | ||
| 1086 | unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size; | ||
| 1087 | int i, err; | ||
| 1088 | |||
| 1089 | err = vop_vdev_inited(vdev); | ||
| 1090 | if (err) | ||
| 1091 | goto ret; | ||
| 1092 | if (vma->vm_flags & VM_WRITE) { | ||
| 1093 | err = -EACCES; | ||
| 1094 | goto ret; | ||
| 1095 | } | ||
| 1096 | while (size_rem) { | ||
| 1097 | i = vop_query_offset(vdev, offset, &size, &pa); | ||
| 1098 | if (i < 0) { | ||
| 1099 | err = -EINVAL; | ||
| 1100 | goto ret; | ||
| 1101 | } | ||
| 1102 | err = remap_pfn_range(vma, vma->vm_start + offset, | ||
| 1103 | pa >> PAGE_SHIFT, size, | ||
| 1104 | vma->vm_page_prot); | ||
| 1105 | if (err) | ||
| 1106 | goto ret; | ||
| 1107 | size_rem -= size; | ||
| 1108 | offset += size; | ||
| 1109 | } | ||
| 1110 | ret: | ||
| 1111 | return err; | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | static const struct file_operations vop_fops = { | ||
| 1115 | .open = vop_open, | ||
| 1116 | .release = vop_release, | ||
| 1117 | .unlocked_ioctl = vop_ioctl, | ||
| 1118 | .poll = vop_poll, | ||
| 1119 | .mmap = vop_mmap, | ||
| 1120 | .owner = THIS_MODULE, | ||
| 1121 | }; | ||
| 1122 | |||
| 1123 | int vop_host_init(struct vop_info *vi) | ||
| 1124 | { | ||
| 1125 | int rc; | ||
| 1126 | struct miscdevice *mdev; | ||
| 1127 | struct vop_device *vpdev = vi->vpdev; | ||
| 1128 | |||
| 1129 | INIT_LIST_HEAD(&vi->vdev_list); | ||
| 1130 | vi->dma_ch = vpdev->dma_ch; | ||
| 1131 | mdev = &vi->miscdev; | ||
| 1132 | mdev->minor = MISC_DYNAMIC_MINOR; | ||
| 1133 | snprintf(vi->name, sizeof(vi->name), "vop_virtio%d", vpdev->index); | ||
| 1134 | mdev->name = vi->name; | ||
| 1135 | mdev->fops = &vop_fops; | ||
| 1136 | mdev->parent = &vpdev->dev; | ||
| 1137 | |||
| 1138 | rc = misc_register(mdev); | ||
| 1139 | if (rc) | ||
| 1140 | dev_err(&vpdev->dev, "%s failed rc %d\n", __func__, rc); | ||
| 1141 | return rc; | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | void vop_host_uninit(struct vop_info *vi) | ||
| 1145 | { | ||
| 1146 | struct list_head *pos, *tmp; | ||
| 1147 | struct vop_vdev *vdev; | ||
| 1148 | |||
| 1149 | mutex_lock(&vi->vop_mutex); | ||
| 1150 | vop_virtio_reset_devices(vi); | ||
| 1151 | list_for_each_safe(pos, tmp, &vi->vdev_list) { | ||
| 1152 | vdev = list_entry(pos, struct vop_vdev, list); | ||
| 1153 | list_del(pos); | ||
| 1154 | reinit_completion(&vdev->destroy); | ||
| 1155 | mutex_unlock(&vi->vop_mutex); | ||
| 1156 | mutex_lock(&vdev->vdev_mutex); | ||
| 1157 | vop_virtio_del_device(vdev); | ||
| 1158 | vdev->deleted = true; | ||
| 1159 | mutex_unlock(&vdev->vdev_mutex); | ||
| 1160 | complete(&vdev->destroy); | ||
| 1161 | mutex_lock(&vi->vop_mutex); | ||
| 1162 | } | ||
| 1163 | mutex_unlock(&vi->vop_mutex); | ||
| 1164 | misc_deregister(&vi->miscdev); | ||
| 1165 | } | ||
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index 9a17a9bab8d6..15bb0c8cdda3 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c | |||
| @@ -503,8 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj, | |||
| 503 | int err; | 503 | int err; |
| 504 | ssize_t rom_size; | 504 | ssize_t rom_size; |
| 505 | 505 | ||
| 506 | struct pch_phub_reg *chip = | 506 | struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj)); |
| 507 | dev_get_drvdata(container_of(kobj, struct device, kobj)); | ||
| 508 | 507 | ||
| 509 | ret = mutex_lock_interruptible(&pch_phub_mutex); | 508 | ret = mutex_lock_interruptible(&pch_phub_mutex); |
| 510 | if (ret) { | 509 | if (ret) { |
| @@ -567,8 +566,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj, | |||
| 567 | unsigned int addr_offset; | 566 | unsigned int addr_offset; |
| 568 | int ret; | 567 | int ret; |
| 569 | ssize_t rom_size; | 568 | ssize_t rom_size; |
| 570 | struct pch_phub_reg *chip = | 569 | struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj)); |
| 571 | dev_get_drvdata(container_of(kobj, struct device, kobj)); | ||
| 572 | 570 | ||
| 573 | ret = mutex_lock_interruptible(&pch_phub_mutex); | 571 | ret = mutex_lock_interruptible(&pch_phub_mutex); |
| 574 | if (ret) | 572 | if (ret) |
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index 6e3af8b42cdd..dcdbd58672cc 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c | |||
| @@ -632,7 +632,6 @@ long st_register(struct st_proto_s *new_proto) | |||
| 632 | spin_unlock_irqrestore(&st_gdata->lock, flags); | 632 | spin_unlock_irqrestore(&st_gdata->lock, flags); |
| 633 | return err; | 633 | return err; |
| 634 | } | 634 | } |
| 635 | pr_debug("done %s(%d) ", __func__, new_proto->chnl_id); | ||
| 636 | } | 635 | } |
| 637 | EXPORT_SYMBOL_GPL(st_register); | 636 | EXPORT_SYMBOL_GPL(st_register); |
| 638 | 637 | ||
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c index b823f9a6e464..896be150e28f 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.c +++ b/drivers/misc/vmw_vmci/vmci_driver.c | |||
| @@ -113,5 +113,5 @@ module_exit(vmci_drv_exit); | |||
| 113 | 113 | ||
| 114 | MODULE_AUTHOR("VMware, Inc."); | 114 | MODULE_AUTHOR("VMware, Inc."); |
| 115 | MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); | 115 | MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface."); |
| 116 | MODULE_VERSION("1.1.3.0-k"); | 116 | MODULE_VERSION("1.1.4.0-k"); |
| 117 | MODULE_LICENSE("GPL v2"); | 117 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 5914263090fc..fe207e542032 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
| @@ -47,13 +47,10 @@ | |||
| 47 | #include "queue.h" | 47 | #include "queue.h" |
| 48 | 48 | ||
| 49 | MODULE_ALIAS("mmc:block"); | 49 | MODULE_ALIAS("mmc:block"); |
| 50 | |||
| 51 | #ifdef KERNEL | ||
| 52 | #ifdef MODULE_PARAM_PREFIX | 50 | #ifdef MODULE_PARAM_PREFIX |
| 53 | #undef MODULE_PARAM_PREFIX | 51 | #undef MODULE_PARAM_PREFIX |
| 54 | #endif | 52 | #endif |
| 55 | #define MODULE_PARAM_PREFIX "mmcblk." | 53 | #define MODULE_PARAM_PREFIX "mmcblk." |
| 56 | #endif | ||
| 57 | 54 | ||
| 58 | #define INAND_CMD38_ARG_EXT_CSD 113 | 55 | #define INAND_CMD38_ARG_EXT_CSD 113 |
| 59 | #define INAND_CMD38_ARG_ERASE 0x00 | 56 | #define INAND_CMD38_ARG_ERASE 0x00 |
| @@ -655,8 +652,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, | |||
| 655 | } | 652 | } |
| 656 | 653 | ||
| 657 | md = mmc_blk_get(bdev->bd_disk); | 654 | md = mmc_blk_get(bdev->bd_disk); |
| 658 | if (!md) | 655 | if (!md) { |
| 656 | err = -EINVAL; | ||
| 659 | goto cmd_err; | 657 | goto cmd_err; |
| 658 | } | ||
| 660 | 659 | ||
| 661 | card = md->queue.card; | 660 | card = md->queue.card; |
| 662 | if (IS_ERR(card)) { | 661 | if (IS_ERR(card)) { |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 1c1b45ef3faf..3446097a43c0 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c | |||
| @@ -925,6 +925,10 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, | |||
| 925 | 925 | ||
| 926 | dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, | 926 | dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, |
| 927 | PAGE_SIZE, dir); | 927 | PAGE_SIZE, dir); |
| 928 | if (dma_mapping_error(dma_dev, dma_addr)) { | ||
| 929 | data->error = -EFAULT; | ||
| 930 | break; | ||
| 931 | } | ||
| 928 | if (direction == DMA_TO_DEVICE) | 932 | if (direction == DMA_TO_DEVICE) |
| 929 | t->tx_dma = dma_addr + sg->offset; | 933 | t->tx_dma = dma_addr + sg->offset; |
| 930 | else | 934 | else |
| @@ -1393,10 +1397,12 @@ static int mmc_spi_probe(struct spi_device *spi) | |||
| 1393 | host->dma_dev = dev; | 1397 | host->dma_dev = dev; |
| 1394 | host->ones_dma = dma_map_single(dev, ones, | 1398 | host->ones_dma = dma_map_single(dev, ones, |
| 1395 | MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); | 1399 | MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); |
| 1400 | if (dma_mapping_error(dev, host->ones_dma)) | ||
| 1401 | goto fail_ones_dma; | ||
| 1396 | host->data_dma = dma_map_single(dev, host->data, | 1402 | host->data_dma = dma_map_single(dev, host->data, |
| 1397 | sizeof(*host->data), DMA_BIDIRECTIONAL); | 1403 | sizeof(*host->data), DMA_BIDIRECTIONAL); |
| 1398 | 1404 | if (dma_mapping_error(dev, host->data_dma)) | |
| 1399 | /* REVISIT in theory those map operations can fail... */ | 1405 | goto fail_data_dma; |
| 1400 | 1406 | ||
| 1401 | dma_sync_single_for_cpu(host->dma_dev, | 1407 | dma_sync_single_for_cpu(host->dma_dev, |
| 1402 | host->data_dma, sizeof(*host->data), | 1408 | host->data_dma, sizeof(*host->data), |
| @@ -1462,6 +1468,11 @@ fail_glue_init: | |||
| 1462 | if (host->dma_dev) | 1468 | if (host->dma_dev) |
| 1463 | dma_unmap_single(host->dma_dev, host->data_dma, | 1469 | dma_unmap_single(host->dma_dev, host->data_dma, |
| 1464 | sizeof(*host->data), DMA_BIDIRECTIONAL); | 1470 | sizeof(*host->data), DMA_BIDIRECTIONAL); |
| 1471 | fail_data_dma: | ||
| 1472 | if (host->dma_dev) | ||
| 1473 | dma_unmap_single(host->dma_dev, host->ones_dma, | ||
| 1474 | MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); | ||
| 1475 | fail_ones_dma: | ||
| 1465 | kfree(host->data); | 1476 | kfree(host->data); |
| 1466 | 1477 | ||
| 1467 | fail_nobuf1: | 1478 | fail_nobuf1: |
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index ce08896b9d69..da824772bbb4 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
| @@ -86,7 +86,7 @@ struct pxamci_host { | |||
| 86 | static inline void pxamci_init_ocr(struct pxamci_host *host) | 86 | static inline void pxamci_init_ocr(struct pxamci_host *host) |
| 87 | { | 87 | { |
| 88 | #ifdef CONFIG_REGULATOR | 88 | #ifdef CONFIG_REGULATOR |
| 89 | host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc"); | 89 | host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc"); |
| 90 | 90 | ||
| 91 | if (IS_ERR(host->vcc)) | 91 | if (IS_ERR(host->vcc)) |
| 92 | host->vcc = NULL; | 92 | host->vcc = NULL; |
| @@ -654,12 +654,8 @@ static int pxamci_probe(struct platform_device *pdev) | |||
| 654 | 654 | ||
| 655 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 655 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 656 | irq = platform_get_irq(pdev, 0); | 656 | irq = platform_get_irq(pdev, 0); |
| 657 | if (!r || irq < 0) | 657 | if (irq < 0) |
| 658 | return -ENXIO; | 658 | return irq; |
| 659 | |||
| 660 | r = request_mem_region(r->start, SZ_4K, DRIVER_NAME); | ||
| 661 | if (!r) | ||
| 662 | return -EBUSY; | ||
| 663 | 659 | ||
| 664 | mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); | 660 | mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); |
| 665 | if (!mmc) { | 661 | if (!mmc) { |
| @@ -695,7 +691,7 @@ static int pxamci_probe(struct platform_device *pdev) | |||
| 695 | host->pdata = pdev->dev.platform_data; | 691 | host->pdata = pdev->dev.platform_data; |
| 696 | host->clkrt = CLKRT_OFF; | 692 | host->clkrt = CLKRT_OFF; |
| 697 | 693 | ||
| 698 | host->clk = clk_get(&pdev->dev, NULL); | 694 | host->clk = devm_clk_get(&pdev->dev, NULL); |
| 699 | if (IS_ERR(host->clk)) { | 695 | if (IS_ERR(host->clk)) { |
| 700 | ret = PTR_ERR(host->clk); | 696 | ret = PTR_ERR(host->clk); |
| 701 | host->clk = NULL; | 697 | host->clk = NULL; |
| @@ -727,9 +723,9 @@ static int pxamci_probe(struct platform_device *pdev) | |||
| 727 | host->irq = irq; | 723 | host->irq = irq; |
| 728 | host->imask = MMC_I_MASK_ALL; | 724 | host->imask = MMC_I_MASK_ALL; |
| 729 | 725 | ||
| 730 | host->base = ioremap(r->start, SZ_4K); | 726 | host->base = devm_ioremap_resource(&pdev->dev, r); |
| 731 | if (!host->base) { | 727 | if (IS_ERR(host->base)) { |
| 732 | ret = -ENOMEM; | 728 | ret = PTR_ERR(host->base); |
| 733 | goto out; | 729 | goto out; |
| 734 | } | 730 | } |
| 735 | 731 | ||
| @@ -742,7 +738,8 @@ static int pxamci_probe(struct platform_device *pdev) | |||
| 742 | writel(64, host->base + MMC_RESTO); | 738 | writel(64, host->base + MMC_RESTO); |
| 743 | writel(host->imask, host->base + MMC_I_MASK); | 739 | writel(host->imask, host->base + MMC_I_MASK); |
| 744 | 740 | ||
| 745 | ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); | 741 | ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0, |
| 742 | DRIVER_NAME, host); | ||
| 746 | if (ret) | 743 | if (ret) |
| 747 | goto out; | 744 | goto out; |
| 748 | 745 | ||
| @@ -804,7 +801,7 @@ static int pxamci_probe(struct platform_device *pdev) | |||
| 804 | dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); | 801 | dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); |
| 805 | goto out; | 802 | goto out; |
| 806 | } else { | 803 | } else { |
| 807 | mmc->caps |= host->pdata->gpio_card_ro_invert ? | 804 | mmc->caps2 |= host->pdata->gpio_card_ro_invert ? |
| 808 | 0 : MMC_CAP2_RO_ACTIVE_HIGH; | 805 | 0 : MMC_CAP2_RO_ACTIVE_HIGH; |
| 809 | } | 806 | } |
| 810 | 807 | ||
| @@ -833,14 +830,9 @@ out: | |||
| 833 | dma_release_channel(host->dma_chan_rx); | 830 | dma_release_channel(host->dma_chan_rx); |
| 834 | if (host->dma_chan_tx) | 831 | if (host->dma_chan_tx) |
| 835 | dma_release_channel(host->dma_chan_tx); | 832 | dma_release_channel(host->dma_chan_tx); |
| 836 | if (host->base) | ||
| 837 | iounmap(host->base); | ||
| 838 | if (host->clk) | ||
| 839 | clk_put(host->clk); | ||
| 840 | } | 833 | } |
| 841 | if (mmc) | 834 | if (mmc) |
| 842 | mmc_free_host(mmc); | 835 | mmc_free_host(mmc); |
| 843 | release_resource(r); | ||
| 844 | return ret; | 836 | return ret; |
| 845 | } | 837 | } |
| 846 | 838 | ||
| @@ -859,9 +851,6 @@ static int pxamci_remove(struct platform_device *pdev) | |||
| 859 | gpio_ro = host->pdata->gpio_card_ro; | 851 | gpio_ro = host->pdata->gpio_card_ro; |
| 860 | gpio_power = host->pdata->gpio_power; | 852 | gpio_power = host->pdata->gpio_power; |
| 861 | } | 853 | } |
| 862 | if (host->vcc) | ||
| 863 | regulator_put(host->vcc); | ||
| 864 | |||
| 865 | if (host->pdata && host->pdata->exit) | 854 | if (host->pdata && host->pdata->exit) |
| 866 | host->pdata->exit(&pdev->dev, mmc); | 855 | host->pdata->exit(&pdev->dev, mmc); |
| 867 | 856 | ||
| @@ -870,16 +859,10 @@ static int pxamci_remove(struct platform_device *pdev) | |||
| 870 | END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, | 859 | END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, |
| 871 | host->base + MMC_I_MASK); | 860 | host->base + MMC_I_MASK); |
| 872 | 861 | ||
| 873 | free_irq(host->irq, host); | ||
| 874 | dmaengine_terminate_all(host->dma_chan_rx); | 862 | dmaengine_terminate_all(host->dma_chan_rx); |
| 875 | dmaengine_terminate_all(host->dma_chan_tx); | 863 | dmaengine_terminate_all(host->dma_chan_tx); |
| 876 | dma_release_channel(host->dma_chan_rx); | 864 | dma_release_channel(host->dma_chan_rx); |
| 877 | dma_release_channel(host->dma_chan_tx); | 865 | dma_release_channel(host->dma_chan_tx); |
| 878 | iounmap(host->base); | ||
| 879 | |||
| 880 | clk_put(host->clk); | ||
| 881 | |||
| 882 | release_resource(host->res); | ||
| 883 | 866 | ||
| 884 | mmc_free_host(mmc); | 867 | mmc_free_host(mmc); |
| 885 | } | 868 | } |
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index f6047fc94062..a5cda926d38e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c | |||
| @@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { | |||
| 146 | .ops = &sdhci_acpi_ops_int, | 146 | .ops = &sdhci_acpi_ops_int, |
| 147 | }; | 147 | }; |
| 148 | 148 | ||
| 149 | static int bxt_get_cd(struct mmc_host *mmc) | ||
| 150 | { | ||
| 151 | int gpio_cd = mmc_gpio_get_cd(mmc); | ||
| 152 | struct sdhci_host *host = mmc_priv(mmc); | ||
| 153 | unsigned long flags; | ||
| 154 | int ret = 0; | ||
| 155 | |||
| 156 | if (!gpio_cd) | ||
| 157 | return 0; | ||
| 158 | |||
| 159 | pm_runtime_get_sync(mmc->parent); | ||
| 160 | |||
| 161 | spin_lock_irqsave(&host->lock, flags); | ||
| 162 | |||
| 163 | if (host->flags & SDHCI_DEVICE_DEAD) | ||
| 164 | goto out; | ||
| 165 | |||
| 166 | ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); | ||
| 167 | out: | ||
| 168 | spin_unlock_irqrestore(&host->lock, flags); | ||
| 169 | |||
| 170 | pm_runtime_mark_last_busy(mmc->parent); | ||
| 171 | pm_runtime_put_autosuspend(mmc->parent); | ||
| 172 | |||
| 173 | return ret; | ||
| 174 | } | ||
| 175 | |||
| 149 | static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, | 176 | static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, |
| 150 | const char *hid, const char *uid) | 177 | const char *hid, const char *uid) |
| 151 | { | 178 | { |
| @@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev, | |||
| 196 | 223 | ||
| 197 | /* Platform specific code during sd probe slot goes here */ | 224 | /* Platform specific code during sd probe slot goes here */ |
| 198 | 225 | ||
| 226 | if (hid && !strcmp(hid, "80865ACA")) | ||
| 227 | host->mmc_host_ops.get_cd = bxt_get_cd; | ||
| 228 | |||
| 199 | return 0; | 229 | return 0; |
| 200 | } | 230 | } |
| 201 | 231 | ||
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 7e7d8f0c9438..9cb86fb25976 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c | |||
| @@ -217,6 +217,7 @@ static int sdhci_at91_probe(struct platform_device *pdev) | |||
| 217 | pm_runtime_disable: | 217 | pm_runtime_disable: |
| 218 | pm_runtime_disable(&pdev->dev); | 218 | pm_runtime_disable(&pdev->dev); |
| 219 | pm_runtime_set_suspended(&pdev->dev); | 219 | pm_runtime_set_suspended(&pdev->dev); |
| 220 | pm_runtime_put_noidle(&pdev->dev); | ||
| 220 | clocks_disable_unprepare: | 221 | clocks_disable_unprepare: |
| 221 | clk_disable_unprepare(priv->gck); | 222 | clk_disable_unprepare(priv->gck); |
| 222 | clk_disable_unprepare(priv->mainck); | 223 | clk_disable_unprepare(priv->mainck); |
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index cc851b065d0a..df3b8eced8c4 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
| @@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host) | |||
| 330 | sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); | 330 | sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | static int bxt_get_cd(struct mmc_host *mmc) | ||
| 334 | { | ||
| 335 | int gpio_cd = mmc_gpio_get_cd(mmc); | ||
| 336 | struct sdhci_host *host = mmc_priv(mmc); | ||
| 337 | unsigned long flags; | ||
| 338 | int ret = 0; | ||
| 339 | |||
| 340 | if (!gpio_cd) | ||
| 341 | return 0; | ||
| 342 | |||
| 343 | pm_runtime_get_sync(mmc->parent); | ||
| 344 | |||
| 345 | spin_lock_irqsave(&host->lock, flags); | ||
| 346 | |||
| 347 | if (host->flags & SDHCI_DEVICE_DEAD) | ||
| 348 | goto out; | ||
| 349 | |||
| 350 | ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); | ||
| 351 | out: | ||
| 352 | spin_unlock_irqrestore(&host->lock, flags); | ||
| 353 | |||
| 354 | pm_runtime_mark_last_busy(mmc->parent); | ||
| 355 | pm_runtime_put_autosuspend(mmc->parent); | ||
| 356 | |||
| 357 | return ret; | ||
| 358 | } | ||
| 359 | |||
| 333 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) | 360 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) |
| 334 | { | 361 | { |
| 335 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | | 362 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | |
| @@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) | |||
| 362 | slot->cd_con_id = NULL; | 389 | slot->cd_con_id = NULL; |
| 363 | slot->cd_idx = 0; | 390 | slot->cd_idx = 0; |
| 364 | slot->cd_override_level = true; | 391 | slot->cd_override_level = true; |
| 392 | if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || | ||
| 393 | slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) | ||
| 394 | slot->host->mmc_host_ops.get_cd = bxt_get_cd; | ||
| 395 | |||
| 365 | return 0; | 396 | return 0; |
| 366 | } | 397 | } |
| 367 | 398 | ||
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index d622435d1bcc..add9fdfd1d8f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
| @@ -1360,7 +1360,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
| 1360 | sdhci_runtime_pm_get(host); | 1360 | sdhci_runtime_pm_get(host); |
| 1361 | 1361 | ||
| 1362 | /* Firstly check card presence */ | 1362 | /* Firstly check card presence */ |
| 1363 | present = sdhci_do_get_cd(host); | 1363 | present = mmc->ops->get_cd(mmc); |
| 1364 | 1364 | ||
| 1365 | spin_lock_irqsave(&host->lock, flags); | 1365 | spin_lock_irqsave(&host->lock, flags); |
| 1366 | 1366 | ||
| @@ -2849,6 +2849,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, | |||
| 2849 | 2849 | ||
| 2850 | host = mmc_priv(mmc); | 2850 | host = mmc_priv(mmc); |
| 2851 | host->mmc = mmc; | 2851 | host->mmc = mmc; |
| 2852 | host->mmc_host_ops = sdhci_ops; | ||
| 2853 | mmc->ops = &host->mmc_host_ops; | ||
| 2852 | 2854 | ||
| 2853 | return host; | 2855 | return host; |
| 2854 | } | 2856 | } |
| @@ -3037,7 +3039,6 @@ int sdhci_add_host(struct sdhci_host *host) | |||
| 3037 | /* | 3039 | /* |
| 3038 | * Set host parameters. | 3040 | * Set host parameters. |
| 3039 | */ | 3041 | */ |
| 3040 | mmc->ops = &sdhci_ops; | ||
| 3041 | max_clk = host->max_clk; | 3042 | max_clk = host->max_clk; |
| 3042 | 3043 | ||
| 3043 | if (host->ops->get_min_clock) | 3044 | if (host->ops->get_min_clock) |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7654ae5d2b4e..0115e9907bf8 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
| @@ -430,6 +430,7 @@ struct sdhci_host { | |||
| 430 | 430 | ||
| 431 | /* Internal data */ | 431 | /* Internal data */ |
| 432 | struct mmc_host *mmc; /* MMC structure */ | 432 | struct mmc_host *mmc; /* MMC structure */ |
| 433 | struct mmc_host_ops mmc_host_ops; /* MMC host ops */ | ||
| 433 | u64 dma_mask; /* custom DMA mask */ | 434 | u64 dma_mask; /* custom DMA mask */ |
| 434 | 435 | ||
| 435 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) | 436 | #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 1ca8a1359cbc..6234eab38ff3 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
| @@ -445,7 +445,7 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host) | |||
| 445 | pdata->slave_id_rx); | 445 | pdata->slave_id_rx); |
| 446 | } else { | 446 | } else { |
| 447 | host->chan_tx = dma_request_slave_channel(dev, "tx"); | 447 | host->chan_tx = dma_request_slave_channel(dev, "tx"); |
| 448 | host->chan_tx = dma_request_slave_channel(dev, "rx"); | 448 | host->chan_rx = dma_request_slave_channel(dev, "rx"); |
| 449 | } | 449 | } |
| 450 | dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, | 450 | dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, |
| 451 | host->chan_rx); | 451 | host->chan_rx); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 49eea8981332..3010080cfeee 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | |||
| 7831 | return ret; | 7831 | return ret; |
| 7832 | } | 7832 | } |
| 7833 | 7833 | ||
| 7834 | static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) | ||
| 7835 | { | ||
| 7836 | /* Check if we will never have enough descriptors, | ||
| 7837 | * as gso_segs can be more than current ring size | ||
| 7838 | */ | ||
| 7839 | return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; | ||
| 7840 | } | ||
| 7841 | |||
| 7834 | static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); | 7842 | static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); |
| 7835 | 7843 | ||
| 7836 | /* Use GSO to workaround all TSO packets that meet HW bug conditions | 7844 | /* Use GSO to workaround all TSO packets that meet HW bug conditions |
| @@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 7934 | * vlan encapsulated. | 7942 | * vlan encapsulated. |
| 7935 | */ | 7943 | */ |
| 7936 | if (skb->protocol == htons(ETH_P_8021Q) || | 7944 | if (skb->protocol == htons(ETH_P_8021Q) || |
| 7937 | skb->protocol == htons(ETH_P_8021AD)) | 7945 | skb->protocol == htons(ETH_P_8021AD)) { |
| 7938 | return tg3_tso_bug(tp, tnapi, txq, skb); | 7946 | if (tg3_tso_bug_gso_check(tnapi, skb)) |
| 7947 | return tg3_tso_bug(tp, tnapi, txq, skb); | ||
| 7948 | goto drop; | ||
| 7949 | } | ||
| 7939 | 7950 | ||
| 7940 | if (!skb_is_gso_v6(skb)) { | 7951 | if (!skb_is_gso_v6(skb)) { |
| 7941 | if (unlikely((ETH_HLEN + hdr_len) > 80) && | 7952 | if (unlikely((ETH_HLEN + hdr_len) > 80) && |
| 7942 | tg3_flag(tp, TSO_BUG)) | 7953 | tg3_flag(tp, TSO_BUG)) { |
| 7943 | return tg3_tso_bug(tp, tnapi, txq, skb); | 7954 | if (tg3_tso_bug_gso_check(tnapi, skb)) |
| 7944 | 7955 | return tg3_tso_bug(tp, tnapi, txq, skb); | |
| 7956 | goto drop; | ||
| 7957 | } | ||
| 7945 | ip_csum = iph->check; | 7958 | ip_csum = iph->check; |
| 7946 | ip_tot_len = iph->tot_len; | 7959 | ip_tot_len = iph->tot_len; |
| 7947 | iph->check = 0; | 7960 | iph->check = 0; |
| @@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 8073 | if (would_hit_hwbug) { | 8086 | if (would_hit_hwbug) { |
| 8074 | tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); | 8087 | tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); |
| 8075 | 8088 | ||
| 8076 | if (mss) { | 8089 | if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { |
| 8077 | /* If it's a TSO packet, do GSO instead of | 8090 | /* If it's a TSO packet, do GSO instead of |
| 8078 | * allocating and copying to a large linear SKB | 8091 | * allocating and copying to a large linear SKB |
| 8079 | */ | 8092 | */ |
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 1671fa3332c2..7ba6d530b0c0 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | 33 | ||
| 34 | #define DRV_NAME "enic" | 34 | #define DRV_NAME "enic" |
| 35 | #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" | 35 | #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" |
| 36 | #define DRV_VERSION "2.3.0.12" | 36 | #define DRV_VERSION "2.3.0.20" |
| 37 | #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" | 37 | #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" |
| 38 | 38 | ||
| 39 | #define ENIC_BARS_MAX 6 | 39 | #define ENIC_BARS_MAX 6 |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 1ffd1050860b..1fdf5fe12a95 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c | |||
| @@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |||
| 298 | int wait) | 298 | int wait) |
| 299 | { | 299 | { |
| 300 | struct devcmd2_controller *dc2c = vdev->devcmd2; | 300 | struct devcmd2_controller *dc2c = vdev->devcmd2; |
| 301 | struct devcmd2_result *result = dc2c->result + dc2c->next_result; | 301 | struct devcmd2_result *result; |
| 302 | u8 color; | ||
| 302 | unsigned int i; | 303 | unsigned int i; |
| 303 | int delay, err; | 304 | int delay, err; |
| 304 | u32 fetch_index, new_posted; | 305 | u32 fetch_index, new_posted; |
| @@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, | |||
| 336 | if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) | 337 | if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) |
| 337 | return 0; | 338 | return 0; |
| 338 | 339 | ||
| 340 | result = dc2c->result + dc2c->next_result; | ||
| 341 | color = dc2c->color; | ||
| 342 | |||
| 343 | dc2c->next_result++; | ||
| 344 | if (dc2c->next_result == dc2c->result_size) { | ||
| 345 | dc2c->next_result = 0; | ||
| 346 | dc2c->color = dc2c->color ? 0 : 1; | ||
| 347 | } | ||
| 348 | |||
| 339 | for (delay = 0; delay < wait; delay++) { | 349 | for (delay = 0; delay < wait; delay++) { |
| 340 | if (result->color == dc2c->color) { | 350 | if (result->color == color) { |
| 341 | dc2c->next_result++; | ||
| 342 | if (dc2c->next_result == dc2c->result_size) { | ||
| 343 | dc2c->next_result = 0; | ||
| 344 | dc2c->color = dc2c->color ? 0 : 1; | ||
| 345 | } | ||
| 346 | if (result->error) { | 351 | if (result->error) { |
| 347 | err = result->error; | 352 | err = result->error; |
| 348 | if (err != ERR_ECMDUNKNOWN || | 353 | if (err != ERR_ECMDUNKNOWN || |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index d48d5793407d..e94ca1c3fc7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -2429,7 +2429,7 @@ err_thread: | |||
| 2429 | flush_workqueue(priv->mfunc.master.comm_wq); | 2429 | flush_workqueue(priv->mfunc.master.comm_wq); |
| 2430 | destroy_workqueue(priv->mfunc.master.comm_wq); | 2430 | destroy_workqueue(priv->mfunc.master.comm_wq); |
| 2431 | err_slaves: | 2431 | err_slaves: |
| 2432 | while (--i) { | 2432 | while (i--) { |
| 2433 | for (port = 1; port <= MLX4_MAX_PORTS; port++) | 2433 | for (port = 1; port <= MLX4_MAX_PORTS; port++) |
| 2434 | kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); | 2434 | kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); |
| 2435 | } | 2435 | } |
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 70814b7386b3..fc8bbff2d7e3 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c | |||
| @@ -1880,9 +1880,9 @@ static int dwceqos_open(struct net_device *ndev) | |||
| 1880 | } | 1880 | } |
| 1881 | netdev_reset_queue(ndev); | 1881 | netdev_reset_queue(ndev); |
| 1882 | 1882 | ||
| 1883 | dwceqos_init_hw(lp); | ||
| 1883 | napi_enable(&lp->napi); | 1884 | napi_enable(&lp->napi); |
| 1884 | phy_start(lp->phy_dev); | 1885 | phy_start(lp->phy_dev); |
| 1885 | dwceqos_init_hw(lp); | ||
| 1886 | 1886 | ||
| 1887 | netif_start_queue(ndev); | 1887 | netif_start_queue(ndev); |
| 1888 | tasklet_enable(&lp->tx_bdreclaim_tasklet); | 1888 | tasklet_enable(&lp->tx_bdreclaim_tasklet); |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 0b14ac3b8d11..028e3873c310 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
| @@ -1039,6 +1039,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1039 | return geneve_xmit_skb(skb, dev, info); | 1039 | return geneve_xmit_skb(skb, dev, info); |
| 1040 | } | 1040 | } |
| 1041 | 1041 | ||
| 1042 | static int geneve_change_mtu(struct net_device *dev, int new_mtu) | ||
| 1043 | { | ||
| 1044 | /* GENEVE overhead is not fixed, so we can't enforce a more | ||
| 1045 | * precise max MTU. | ||
| 1046 | */ | ||
| 1047 | if (new_mtu < 68 || new_mtu > IP_MAX_MTU) | ||
| 1048 | return -EINVAL; | ||
| 1049 | dev->mtu = new_mtu; | ||
| 1050 | return 0; | ||
| 1051 | } | ||
| 1052 | |||
| 1042 | static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) | 1053 | static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
| 1043 | { | 1054 | { |
| 1044 | struct ip_tunnel_info *info = skb_tunnel_info(skb); | 1055 | struct ip_tunnel_info *info = skb_tunnel_info(skb); |
| @@ -1083,7 +1094,7 @@ static const struct net_device_ops geneve_netdev_ops = { | |||
| 1083 | .ndo_stop = geneve_stop, | 1094 | .ndo_stop = geneve_stop, |
| 1084 | .ndo_start_xmit = geneve_xmit, | 1095 | .ndo_start_xmit = geneve_xmit, |
| 1085 | .ndo_get_stats64 = ip_tunnel_get_stats64, | 1096 | .ndo_get_stats64 = ip_tunnel_get_stats64, |
| 1086 | .ndo_change_mtu = eth_change_mtu, | 1097 | .ndo_change_mtu = geneve_change_mtu, |
| 1087 | .ndo_validate_addr = eth_validate_addr, | 1098 | .ndo_validate_addr = eth_validate_addr, |
| 1088 | .ndo_set_mac_address = eth_mac_addr, | 1099 | .ndo_set_mac_address = eth_mac_addr, |
| 1089 | .ndo_fill_metadata_dst = geneve_fill_metadata_dst, | 1100 | .ndo_fill_metadata_dst = geneve_fill_metadata_dst, |
| @@ -1442,11 +1453,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, | |||
| 1442 | 1453 | ||
| 1443 | err = geneve_configure(net, dev, &geneve_remote_unspec, | 1454 | err = geneve_configure(net, dev, &geneve_remote_unspec, |
| 1444 | 0, 0, 0, htons(dst_port), true, 0); | 1455 | 0, 0, 0, htons(dst_port), true, 0); |
| 1445 | if (err) { | 1456 | if (err) |
| 1446 | free_netdev(dev); | 1457 | goto err; |
| 1447 | return ERR_PTR(err); | 1458 | |
| 1448 | } | 1459 | /* openvswitch users expect packet sizes to be unrestricted, |
| 1460 | * so set the largest MTU we can. | ||
| 1461 | */ | ||
| 1462 | err = geneve_change_mtu(dev, IP_MAX_MTU); | ||
| 1463 | if (err) | ||
| 1464 | goto err; | ||
| 1465 | |||
| 1449 | return dev; | 1466 | return dev; |
| 1467 | |||
| 1468 | err: | ||
| 1469 | free_netdev(dev); | ||
| 1470 | return ERR_PTR(err); | ||
| 1450 | } | 1471 | } |
| 1451 | EXPORT_SYMBOL_GPL(geneve_dev_create_fb); | 1472 | EXPORT_SYMBOL_GPL(geneve_dev_create_fb); |
| 1452 | 1473 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 65439188c582..a31cd954b308 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -2367,29 +2367,43 @@ static void vxlan_set_multicast_list(struct net_device *dev) | |||
| 2367 | { | 2367 | { |
| 2368 | } | 2368 | } |
| 2369 | 2369 | ||
| 2370 | static int vxlan_change_mtu(struct net_device *dev, int new_mtu) | 2370 | static int __vxlan_change_mtu(struct net_device *dev, |
| 2371 | struct net_device *lowerdev, | ||
| 2372 | struct vxlan_rdst *dst, int new_mtu, bool strict) | ||
| 2371 | { | 2373 | { |
| 2372 | struct vxlan_dev *vxlan = netdev_priv(dev); | 2374 | int max_mtu = IP_MAX_MTU; |
| 2373 | struct vxlan_rdst *dst = &vxlan->default_dst; | ||
| 2374 | struct net_device *lowerdev; | ||
| 2375 | int max_mtu; | ||
| 2376 | 2375 | ||
| 2377 | lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex); | 2376 | if (lowerdev) |
| 2378 | if (lowerdev == NULL) | 2377 | max_mtu = lowerdev->mtu; |
| 2379 | return eth_change_mtu(dev, new_mtu); | ||
| 2380 | 2378 | ||
| 2381 | if (dst->remote_ip.sa.sa_family == AF_INET6) | 2379 | if (dst->remote_ip.sa.sa_family == AF_INET6) |
| 2382 | max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; | 2380 | max_mtu -= VXLAN6_HEADROOM; |
| 2383 | else | 2381 | else |
| 2384 | max_mtu = lowerdev->mtu - VXLAN_HEADROOM; | 2382 | max_mtu -= VXLAN_HEADROOM; |
| 2385 | 2383 | ||
| 2386 | if (new_mtu < 68 || new_mtu > max_mtu) | 2384 | if (new_mtu < 68) |
| 2387 | return -EINVAL; | 2385 | return -EINVAL; |
| 2388 | 2386 | ||
| 2387 | if (new_mtu > max_mtu) { | ||
| 2388 | if (strict) | ||
| 2389 | return -EINVAL; | ||
| 2390 | |||
| 2391 | new_mtu = max_mtu; | ||
| 2392 | } | ||
| 2393 | |||
| 2389 | dev->mtu = new_mtu; | 2394 | dev->mtu = new_mtu; |
| 2390 | return 0; | 2395 | return 0; |
| 2391 | } | 2396 | } |
| 2392 | 2397 | ||
| 2398 | static int vxlan_change_mtu(struct net_device *dev, int new_mtu) | ||
| 2399 | { | ||
| 2400 | struct vxlan_dev *vxlan = netdev_priv(dev); | ||
| 2401 | struct vxlan_rdst *dst = &vxlan->default_dst; | ||
| 2402 | struct net_device *lowerdev = __dev_get_by_index(vxlan->net, | ||
| 2403 | dst->remote_ifindex); | ||
| 2404 | return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true); | ||
| 2405 | } | ||
| 2406 | |||
| 2393 | static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, | 2407 | static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, |
| 2394 | struct ip_tunnel_info *info, | 2408 | struct ip_tunnel_info *info, |
| 2395 | __be16 sport, __be16 dport) | 2409 | __be16 sport, __be16 dport) |
| @@ -2765,6 +2779,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
| 2765 | int err; | 2779 | int err; |
| 2766 | bool use_ipv6 = false; | 2780 | bool use_ipv6 = false; |
| 2767 | __be16 default_port = vxlan->cfg.dst_port; | 2781 | __be16 default_port = vxlan->cfg.dst_port; |
| 2782 | struct net_device *lowerdev = NULL; | ||
| 2768 | 2783 | ||
| 2769 | vxlan->net = src_net; | 2784 | vxlan->net = src_net; |
| 2770 | 2785 | ||
| @@ -2785,9 +2800,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
| 2785 | } | 2800 | } |
| 2786 | 2801 | ||
| 2787 | if (conf->remote_ifindex) { | 2802 | if (conf->remote_ifindex) { |
| 2788 | struct net_device *lowerdev | 2803 | lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); |
| 2789 | = __dev_get_by_index(src_net, conf->remote_ifindex); | ||
| 2790 | |||
| 2791 | dst->remote_ifindex = conf->remote_ifindex; | 2804 | dst->remote_ifindex = conf->remote_ifindex; |
| 2792 | 2805 | ||
| 2793 | if (!lowerdev) { | 2806 | if (!lowerdev) { |
| @@ -2811,6 +2824,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, | |||
| 2811 | needed_headroom = lowerdev->hard_header_len; | 2824 | needed_headroom = lowerdev->hard_header_len; |
| 2812 | } | 2825 | } |
| 2813 | 2826 | ||
| 2827 | if (conf->mtu) { | ||
| 2828 | err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); | ||
| 2829 | if (err) | ||
| 2830 | return err; | ||
| 2831 | } | ||
| 2832 | |||
| 2814 | if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) | 2833 | if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) |
| 2815 | needed_headroom += VXLAN6_HEADROOM; | 2834 | needed_headroom += VXLAN6_HEADROOM; |
| 2816 | else | 2835 | else |
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index bc4ea585b42e..5bd18cc1a69c 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig | |||
| @@ -25,6 +25,15 @@ config NVMEM_IMX_OCOTP | |||
| 25 | This driver can also be built as a module. If so, the module | 25 | This driver can also be built as a module. If so, the module |
| 26 | will be called nvmem-imx-ocotp. | 26 | will be called nvmem-imx-ocotp. |
| 27 | 27 | ||
| 28 | config NVMEM_LPC18XX_EEPROM | ||
| 29 | tristate "NXP LPC18XX EEPROM Memory Support" | ||
| 30 | depends on ARCH_LPC18XX || COMPILE_TEST | ||
| 31 | help | ||
| 32 | Say Y here to include support for NXP LPC18xx EEPROM memory found in | ||
| 33 | NXP LPC185x/3x and LPC435x/3x/2x/1x devices. | ||
| 34 | To compile this driver as a module, choose M here: the module | ||
| 35 | will be called nvmem_lpc18xx_eeprom. | ||
| 36 | |||
| 28 | config NVMEM_MXS_OCOTP | 37 | config NVMEM_MXS_OCOTP |
| 29 | tristate "Freescale MXS On-Chip OTP Memory Support" | 38 | tristate "Freescale MXS On-Chip OTP Memory Support" |
| 30 | depends on ARCH_MXS || COMPILE_TEST | 39 | depends on ARCH_MXS || COMPILE_TEST |
| @@ -36,6 +45,17 @@ config NVMEM_MXS_OCOTP | |||
| 36 | This driver can also be built as a module. If so, the module | 45 | This driver can also be built as a module. If so, the module |
| 37 | will be called nvmem-mxs-ocotp. | 46 | will be called nvmem-mxs-ocotp. |
| 38 | 47 | ||
| 48 | config MTK_EFUSE | ||
| 49 | tristate "Mediatek SoCs EFUSE support" | ||
| 50 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
| 51 | select REGMAP_MMIO | ||
| 52 | help | ||
| 53 | This is a driver to access hardware related data like sensor | ||
| 54 | calibration, HDMI impedance etc. | ||
| 55 | |||
| 56 | This driver can also be built as a module. If so, the module | ||
| 57 | will be called efuse-mtk. | ||
| 58 | |||
| 39 | config QCOM_QFPROM | 59 | config QCOM_QFPROM |
| 40 | tristate "QCOM QFPROM Support" | 60 | tristate "QCOM QFPROM Support" |
| 41 | depends on ARCH_QCOM || COMPILE_TEST | 61 | depends on ARCH_QCOM || COMPILE_TEST |
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 95dde3f8f085..45ab1ae08fa9 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile | |||
| @@ -8,8 +8,12 @@ nvmem_core-y := core.o | |||
| 8 | # Devices | 8 | # Devices |
| 9 | obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o | 9 | obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o |
| 10 | nvmem-imx-ocotp-y := imx-ocotp.o | 10 | nvmem-imx-ocotp-y := imx-ocotp.o |
| 11 | obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o | ||
| 12 | nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o | ||
| 11 | obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o | 13 | obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o |
| 12 | nvmem-mxs-ocotp-y := mxs-ocotp.o | 14 | nvmem-mxs-ocotp-y := mxs-ocotp.o |
| 15 | obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o | ||
| 16 | nvmem_mtk-efuse-y := mtk-efuse.o | ||
| 13 | obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o | 17 | obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o |
| 14 | nvmem_qfprom-y := qfprom.o | 18 | nvmem_qfprom-y := qfprom.o |
| 15 | obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o | 19 | obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o |
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 6fd4e5a5ef4a..de14fae6f7f6 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c | |||
| @@ -70,6 +70,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, | |||
| 70 | if (pos >= nvmem->size) | 70 | if (pos >= nvmem->size) |
| 71 | return 0; | 71 | return 0; |
| 72 | 72 | ||
| 73 | if (count < nvmem->word_size) | ||
| 74 | return -EINVAL; | ||
| 75 | |||
| 73 | if (pos + count > nvmem->size) | 76 | if (pos + count > nvmem->size) |
| 74 | count = nvmem->size - pos; | 77 | count = nvmem->size - pos; |
| 75 | 78 | ||
| @@ -95,6 +98,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, | |||
| 95 | if (pos >= nvmem->size) | 98 | if (pos >= nvmem->size) |
| 96 | return 0; | 99 | return 0; |
| 97 | 100 | ||
| 101 | if (count < nvmem->word_size) | ||
| 102 | return -EINVAL; | ||
| 103 | |||
| 98 | if (pos + count > nvmem->size) | 104 | if (pos + count > nvmem->size) |
| 99 | count = nvmem->size - pos; | 105 | count = nvmem->size - pos; |
| 100 | 106 | ||
| @@ -288,9 +294,11 @@ static int nvmem_add_cells(struct nvmem_device *nvmem, | |||
| 288 | 294 | ||
| 289 | return 0; | 295 | return 0; |
| 290 | err: | 296 | err: |
| 291 | while (--i) | 297 | while (i--) |
| 292 | nvmem_cell_drop(cells[i]); | 298 | nvmem_cell_drop(cells[i]); |
| 293 | 299 | ||
| 300 | kfree(cells); | ||
| 301 | |||
| 294 | return rval; | 302 | return rval; |
| 295 | } | 303 | } |
| 296 | 304 | ||
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c new file mode 100644 index 000000000000..878fce789341 --- /dev/null +++ b/drivers/nvmem/lpc18xx_eeprom.c | |||
| @@ -0,0 +1,330 @@ | |||
| 1 | /* | ||
| 2 | * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published by | ||
| 8 | * the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/clk.h> | ||
| 12 | #include <linux/device.h> | ||
| 13 | #include <linux/delay.h> | ||
| 14 | #include <linux/err.h> | ||
| 15 | #include <linux/io.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/nvmem-provider.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/regmap.h> | ||
| 20 | #include <linux/reset.h> | ||
| 21 | |||
| 22 | /* Registers */ | ||
| 23 | #define LPC18XX_EEPROM_AUTOPROG 0x00c | ||
| 24 | #define LPC18XX_EEPROM_AUTOPROG_WORD 0x1 | ||
| 25 | |||
| 26 | #define LPC18XX_EEPROM_CLKDIV 0x014 | ||
| 27 | |||
| 28 | #define LPC18XX_EEPROM_PWRDWN 0x018 | ||
| 29 | #define LPC18XX_EEPROM_PWRDWN_NO 0x0 | ||
| 30 | #define LPC18XX_EEPROM_PWRDWN_YES 0x1 | ||
| 31 | |||
| 32 | #define LPC18XX_EEPROM_INTSTAT 0xfe0 | ||
| 33 | #define LPC18XX_EEPROM_INTSTAT_END_OF_PROG BIT(2) | ||
| 34 | |||
| 35 | #define LPC18XX_EEPROM_INTSTATCLR 0xfe8 | ||
| 36 | #define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST BIT(2) | ||
| 37 | |||
| 38 | /* Fixed page size (bytes) */ | ||
| 39 | #define LPC18XX_EEPROM_PAGE_SIZE 0x80 | ||
| 40 | |||
| 41 | /* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */ | ||
| 42 | #define LPC18XX_EEPROM_CLOCK_HZ 1500000 | ||
| 43 | |||
| 44 | /* EEPROM requires 3 ms of erase/program time between each writing */ | ||
| 45 | #define LPC18XX_EEPROM_PROGRAM_TIME 3 | ||
| 46 | |||
| 47 | struct lpc18xx_eeprom_dev { | ||
| 48 | struct clk *clk; | ||
| 49 | void __iomem *reg_base; | ||
| 50 | void __iomem *mem_base; | ||
| 51 | struct nvmem_device *nvmem; | ||
| 52 | unsigned reg_bytes; | ||
| 53 | unsigned val_bytes; | ||
| 54 | }; | ||
| 55 | |||
| 56 | static struct regmap_config lpc18xx_regmap_config = { | ||
| 57 | .reg_bits = 32, | ||
| 58 | .reg_stride = 4, | ||
| 59 | .val_bits = 32, | ||
| 60 | }; | ||
| 61 | |||
| 62 | static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom, | ||
| 63 | u32 reg, u32 val) | ||
| 64 | { | ||
| 65 | writel(val, eeprom->reg_base + reg); | ||
| 66 | } | ||
| 67 | |||
| 68 | static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom, | ||
| 69 | u32 reg) | ||
| 70 | { | ||
| 71 | return readl(eeprom->reg_base + reg); | ||
| 72 | } | ||
| 73 | |||
| 74 | static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom) | ||
| 75 | { | ||
| 76 | unsigned long end; | ||
| 77 | u32 val; | ||
| 78 | |||
| 79 | /* Wait until EEPROM program operation has finished */ | ||
| 80 | end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10); | ||
| 81 | |||
| 82 | while (time_is_after_jiffies(end)) { | ||
| 83 | val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT); | ||
| 84 | |||
| 85 | if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) { | ||
| 86 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR, | ||
| 87 | LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST); | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC, | ||
| 92 | (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC); | ||
| 93 | } | ||
| 94 | |||
| 95 | return -ETIMEDOUT; | ||
| 96 | } | ||
| 97 | |||
| 98 | static int lpc18xx_eeprom_gather_write(void *context, const void *reg, | ||
| 99 | size_t reg_size, const void *val, | ||
| 100 | size_t val_size) | ||
| 101 | { | ||
| 102 | struct lpc18xx_eeprom_dev *eeprom = context; | ||
| 103 | unsigned int offset = *(u32 *)reg; | ||
| 104 | int ret; | ||
| 105 | |||
| 106 | if (offset % lpc18xx_regmap_config.reg_stride) | ||
| 107 | return -EINVAL; | ||
| 108 | |||
| 109 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
| 110 | LPC18XX_EEPROM_PWRDWN_NO); | ||
| 111 | |||
| 112 | /* Wait 100 us while the EEPROM wakes up */ | ||
| 113 | usleep_range(100, 200); | ||
| 114 | |||
| 115 | while (val_size) { | ||
| 116 | writel(*(u32 *)val, eeprom->mem_base + offset); | ||
| 117 | ret = lpc18xx_eeprom_busywait_until_prog(eeprom); | ||
| 118 | if (ret < 0) | ||
| 119 | return ret; | ||
| 120 | |||
| 121 | val_size -= eeprom->val_bytes; | ||
| 122 | val += eeprom->val_bytes; | ||
| 123 | offset += eeprom->val_bytes; | ||
| 124 | } | ||
| 125 | |||
| 126 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
| 127 | LPC18XX_EEPROM_PWRDWN_YES); | ||
| 128 | |||
| 129 | return 0; | ||
| 130 | } | ||
| 131 | |||
| 132 | static int lpc18xx_eeprom_write(void *context, const void *data, size_t count) | ||
| 133 | { | ||
| 134 | struct lpc18xx_eeprom_dev *eeprom = context; | ||
| 135 | unsigned int offset = eeprom->reg_bytes; | ||
| 136 | |||
| 137 | if (count <= offset) | ||
| 138 | return -EINVAL; | ||
| 139 | |||
| 140 | return lpc18xx_eeprom_gather_write(context, data, eeprom->reg_bytes, | ||
| 141 | data + offset, count - offset); | ||
| 142 | } | ||
| 143 | |||
| 144 | static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size, | ||
| 145 | void *val, size_t val_size) | ||
| 146 | { | ||
| 147 | struct lpc18xx_eeprom_dev *eeprom = context; | ||
| 148 | unsigned int offset = *(u32 *)reg; | ||
| 149 | |||
| 150 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
| 151 | LPC18XX_EEPROM_PWRDWN_NO); | ||
| 152 | |||
| 153 | /* Wait 100 us while the EEPROM wakes up */ | ||
| 154 | usleep_range(100, 200); | ||
| 155 | |||
| 156 | while (val_size) { | ||
| 157 | *(u32 *)val = readl(eeprom->mem_base + offset); | ||
| 158 | val_size -= eeprom->val_bytes; | ||
| 159 | val += eeprom->val_bytes; | ||
| 160 | offset += eeprom->val_bytes; | ||
| 161 | } | ||
| 162 | |||
| 163 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
| 164 | LPC18XX_EEPROM_PWRDWN_YES); | ||
| 165 | |||
| 166 | return 0; | ||
| 167 | } | ||
| 168 | |||
| 169 | static struct regmap_bus lpc18xx_eeprom_bus = { | ||
| 170 | .write = lpc18xx_eeprom_write, | ||
| 171 | .gather_write = lpc18xx_eeprom_gather_write, | ||
| 172 | .read = lpc18xx_eeprom_read, | ||
| 173 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 174 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 175 | }; | ||
| 176 | |||
| 177 | static bool lpc18xx_eeprom_writeable_reg(struct device *dev, unsigned int reg) | ||
| 178 | { | ||
| 179 | /* | ||
| 180 | * The last page contains the EEPROM initialization data and is not | ||
| 181 | * writable. | ||
| 182 | */ | ||
| 183 | return reg <= lpc18xx_regmap_config.max_register - | ||
| 184 | LPC18XX_EEPROM_PAGE_SIZE; | ||
| 185 | } | ||
| 186 | |||
| 187 | static bool lpc18xx_eeprom_readable_reg(struct device *dev, unsigned int reg) | ||
| 188 | { | ||
| 189 | return reg <= lpc18xx_regmap_config.max_register; | ||
| 190 | } | ||
| 191 | |||
| 192 | static struct nvmem_config lpc18xx_nvmem_config = { | ||
| 193 | .name = "lpc18xx-eeprom", | ||
| 194 | .owner = THIS_MODULE, | ||
| 195 | }; | ||
| 196 | |||
| 197 | static int lpc18xx_eeprom_probe(struct platform_device *pdev) | ||
| 198 | { | ||
| 199 | struct lpc18xx_eeprom_dev *eeprom; | ||
| 200 | struct device *dev = &pdev->dev; | ||
| 201 | struct reset_control *rst; | ||
| 202 | unsigned long clk_rate; | ||
| 203 | struct regmap *regmap; | ||
| 204 | struct resource *res; | ||
| 205 | int ret; | ||
| 206 | |||
| 207 | eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL); | ||
| 208 | if (!eeprom) | ||
| 209 | return -ENOMEM; | ||
| 210 | |||
| 211 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); | ||
| 212 | eeprom->reg_base = devm_ioremap_resource(dev, res); | ||
| 213 | if (IS_ERR(eeprom->reg_base)) | ||
| 214 | return PTR_ERR(eeprom->reg_base); | ||
| 215 | |||
| 216 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); | ||
| 217 | eeprom->mem_base = devm_ioremap_resource(dev, res); | ||
| 218 | if (IS_ERR(eeprom->mem_base)) | ||
| 219 | return PTR_ERR(eeprom->mem_base); | ||
| 220 | |||
| 221 | eeprom->clk = devm_clk_get(&pdev->dev, "eeprom"); | ||
| 222 | if (IS_ERR(eeprom->clk)) { | ||
| 223 | dev_err(&pdev->dev, "failed to get eeprom clock\n"); | ||
| 224 | return PTR_ERR(eeprom->clk); | ||
| 225 | } | ||
| 226 | |||
| 227 | ret = clk_prepare_enable(eeprom->clk); | ||
| 228 | if (ret < 0) { | ||
| 229 | dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret); | ||
| 230 | return ret; | ||
| 231 | } | ||
| 232 | |||
| 233 | rst = devm_reset_control_get(dev, NULL); | ||
| 234 | if (IS_ERR(rst)) { | ||
| 235 | dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst)); | ||
| 236 | ret = PTR_ERR(rst); | ||
| 237 | goto err_clk; | ||
| 238 | } | ||
| 239 | |||
| 240 | ret = reset_control_assert(rst); | ||
| 241 | if (ret < 0) { | ||
| 242 | dev_err(dev, "failed to assert reset: %d\n", ret); | ||
| 243 | goto err_clk; | ||
| 244 | } | ||
| 245 | |||
| 246 | eeprom->val_bytes = lpc18xx_regmap_config.val_bits / BITS_PER_BYTE; | ||
| 247 | eeprom->reg_bytes = lpc18xx_regmap_config.reg_bits / BITS_PER_BYTE; | ||
| 248 | |||
| 249 | /* | ||
| 250 | * Clock rate is generated by dividing the system bus clock by the | ||
| 251 | * division factor, contained in the divider register (minus 1 encoded). | ||
| 252 | */ | ||
| 253 | clk_rate = clk_get_rate(eeprom->clk); | ||
| 254 | clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1; | ||
| 255 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate); | ||
| 256 | |||
| 257 | /* | ||
| 258 | * Writing a single word to the page will start the erase/program cycle | ||
| 259 | * automatically | ||
| 260 | */ | ||
| 261 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG, | ||
| 262 | LPC18XX_EEPROM_AUTOPROG_WORD); | ||
| 263 | |||
| 264 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | ||
| 265 | LPC18XX_EEPROM_PWRDWN_YES); | ||
| 266 | |||
| 267 | lpc18xx_regmap_config.max_register = resource_size(res) - 1; | ||
| 268 | lpc18xx_regmap_config.writeable_reg = lpc18xx_eeprom_writeable_reg; | ||
| 269 | lpc18xx_regmap_config.readable_reg = lpc18xx_eeprom_readable_reg; | ||
| 270 | |||
| 271 | regmap = devm_regmap_init(dev, &lpc18xx_eeprom_bus, eeprom, | ||
| 272 | &lpc18xx_regmap_config); | ||
| 273 | if (IS_ERR(regmap)) { | ||
| 274 | dev_err(dev, "regmap init failed: %ld\n", PTR_ERR(regmap)); | ||
| 275 | ret = PTR_ERR(regmap); | ||
| 276 | goto err_clk; | ||
| 277 | } | ||
| 278 | |||
| 279 | lpc18xx_nvmem_config.dev = dev; | ||
| 280 | |||
| 281 | eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config); | ||
| 282 | if (IS_ERR(eeprom->nvmem)) { | ||
| 283 | ret = PTR_ERR(eeprom->nvmem); | ||
| 284 | goto err_clk; | ||
| 285 | } | ||
| 286 | |||
| 287 | platform_set_drvdata(pdev, eeprom); | ||
| 288 | |||
| 289 | return 0; | ||
| 290 | |||
| 291 | err_clk: | ||
| 292 | clk_disable_unprepare(eeprom->clk); | ||
| 293 | |||
| 294 | return ret; | ||
| 295 | } | ||
| 296 | |||
| 297 | static int lpc18xx_eeprom_remove(struct platform_device *pdev) | ||
| 298 | { | ||
| 299 | struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev); | ||
| 300 | int ret; | ||
| 301 | |||
| 302 | ret = nvmem_unregister(eeprom->nvmem); | ||
| 303 | if (ret < 0) | ||
| 304 | return ret; | ||
| 305 | |||
| 306 | clk_disable_unprepare(eeprom->clk); | ||
| 307 | |||
| 308 | return 0; | ||
| 309 | } | ||
| 310 | |||
| 311 | static const struct of_device_id lpc18xx_eeprom_of_match[] = { | ||
| 312 | { .compatible = "nxp,lpc1857-eeprom" }, | ||
| 313 | { }, | ||
| 314 | }; | ||
| 315 | MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match); | ||
| 316 | |||
| 317 | static struct platform_driver lpc18xx_eeprom_driver = { | ||
| 318 | .probe = lpc18xx_eeprom_probe, | ||
| 319 | .remove = lpc18xx_eeprom_remove, | ||
| 320 | .driver = { | ||
| 321 | .name = "lpc18xx-eeprom", | ||
| 322 | .of_match_table = lpc18xx_eeprom_of_match, | ||
| 323 | }, | ||
| 324 | }; | ||
| 325 | |||
| 326 | module_platform_driver(lpc18xx_eeprom_driver); | ||
| 327 | |||
| 328 | MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>"); | ||
| 329 | MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver"); | ||
| 330 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c new file mode 100644 index 000000000000..7b35f5b630cd --- /dev/null +++ b/drivers/nvmem/mtk-efuse.c | |||
| @@ -0,0 +1,89 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2015 MediaTek Inc. | ||
| 3 | * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License version 2 as | ||
| 7 | * published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/device.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/nvmem-provider.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/regmap.h> | ||
| 20 | |||
| 21 | static struct regmap_config mtk_regmap_config = { | ||
| 22 | .reg_bits = 32, | ||
| 23 | .val_bits = 32, | ||
| 24 | .reg_stride = 4, | ||
| 25 | }; | ||
| 26 | |||
| 27 | static int mtk_efuse_probe(struct platform_device *pdev) | ||
| 28 | { | ||
| 29 | struct device *dev = &pdev->dev; | ||
| 30 | struct resource *res; | ||
| 31 | struct nvmem_device *nvmem; | ||
| 32 | struct nvmem_config *econfig; | ||
| 33 | struct regmap *regmap; | ||
| 34 | void __iomem *base; | ||
| 35 | |||
| 36 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 37 | base = devm_ioremap_resource(dev, res); | ||
| 38 | if (IS_ERR(base)) | ||
| 39 | return PTR_ERR(base); | ||
| 40 | |||
| 41 | econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL); | ||
| 42 | if (!econfig) | ||
| 43 | return -ENOMEM; | ||
| 44 | |||
| 45 | mtk_regmap_config.max_register = resource_size(res) - 1; | ||
| 46 | |||
| 47 | regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config); | ||
| 48 | if (IS_ERR(regmap)) { | ||
| 49 | dev_err(dev, "regmap init failed\n"); | ||
| 50 | return PTR_ERR(regmap); | ||
| 51 | } | ||
| 52 | |||
| 53 | econfig->dev = dev; | ||
| 54 | econfig->owner = THIS_MODULE; | ||
| 55 | nvmem = nvmem_register(econfig); | ||
| 56 | if (IS_ERR(nvmem)) | ||
| 57 | return PTR_ERR(nvmem); | ||
| 58 | |||
| 59 | platform_set_drvdata(pdev, nvmem); | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | |||
| 64 | static int mtk_efuse_remove(struct platform_device *pdev) | ||
| 65 | { | ||
| 66 | struct nvmem_device *nvmem = platform_get_drvdata(pdev); | ||
| 67 | |||
| 68 | return nvmem_unregister(nvmem); | ||
| 69 | } | ||
| 70 | |||
| 71 | static const struct of_device_id mtk_efuse_of_match[] = { | ||
| 72 | { .compatible = "mediatek,mt8173-efuse",}, | ||
| 73 | { .compatible = "mediatek,efuse",}, | ||
| 74 | {/* sentinel */}, | ||
| 75 | }; | ||
| 76 | MODULE_DEVICE_TABLE(of, mtk_efuse_of_match); | ||
| 77 | |||
| 78 | static struct platform_driver mtk_efuse_driver = { | ||
| 79 | .probe = mtk_efuse_probe, | ||
| 80 | .remove = mtk_efuse_remove, | ||
| 81 | .driver = { | ||
| 82 | .name = "mediatek,efuse", | ||
| 83 | .of_match_table = mtk_efuse_of_match, | ||
| 84 | }, | ||
| 85 | }; | ||
| 86 | module_platform_driver(mtk_efuse_driver); | ||
| 87 | MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>"); | ||
| 88 | MODULE_DESCRIPTION("Mediatek EFUSE driver"); | ||
| 89 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index afb67e7eeee4..3829e5fbf8c3 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c | |||
| @@ -21,6 +21,7 @@ static struct regmap_config qfprom_regmap_config = { | |||
| 21 | .reg_bits = 32, | 21 | .reg_bits = 32, |
| 22 | .val_bits = 8, | 22 | .val_bits = 8, |
| 23 | .reg_stride = 1, | 23 | .reg_stride = 1, |
| 24 | .val_format_endian = REGMAP_ENDIAN_LITTLE, | ||
| 24 | }; | 25 | }; |
| 25 | 26 | ||
| 26 | static struct nvmem_config econfig = { | 27 | static struct nvmem_config econfig = { |
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c index f55213424222..a009795111e9 100644 --- a/drivers/nvmem/rockchip-efuse.c +++ b/drivers/nvmem/rockchip-efuse.c | |||
| @@ -14,16 +14,16 @@ | |||
| 14 | * more details. | 14 | * more details. |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <linux/platform_device.h> | 17 | #include <linux/clk.h> |
| 18 | #include <linux/nvmem-provider.h> | 18 | #include <linux/delay.h> |
| 19 | #include <linux/slab.h> | ||
| 20 | #include <linux/regmap.h> | ||
| 21 | #include <linux/device.h> | 19 | #include <linux/device.h> |
| 22 | #include <linux/io.h> | 20 | #include <linux/io.h> |
| 23 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 24 | #include <linux/delay.h> | 22 | #include <linux/nvmem-provider.h> |
| 23 | #include <linux/slab.h> | ||
| 25 | #include <linux/of.h> | 24 | #include <linux/of.h> |
| 26 | #include <linux/clk.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/regmap.h> | ||
| 27 | 27 | ||
| 28 | #define EFUSE_A_SHIFT 6 | 28 | #define EFUSE_A_SHIFT 6 |
| 29 | #define EFUSE_A_MASK 0x3ff | 29 | #define EFUSE_A_MASK 0x3ff |
| @@ -35,10 +35,10 @@ | |||
| 35 | #define REG_EFUSE_CTRL 0x0000 | 35 | #define REG_EFUSE_CTRL 0x0000 |
| 36 | #define REG_EFUSE_DOUT 0x0004 | 36 | #define REG_EFUSE_DOUT 0x0004 |
| 37 | 37 | ||
| 38 | struct rockchip_efuse_context { | 38 | struct rockchip_efuse_chip { |
| 39 | struct device *dev; | 39 | struct device *dev; |
| 40 | void __iomem *base; | 40 | void __iomem *base; |
| 41 | struct clk *efuse_clk; | 41 | struct clk *clk; |
| 42 | }; | 42 | }; |
| 43 | 43 | ||
| 44 | static int rockchip_efuse_write(void *context, const void *data, size_t count) | 44 | static int rockchip_efuse_write(void *context, const void *data, size_t count) |
| @@ -52,34 +52,32 @@ static int rockchip_efuse_read(void *context, | |||
| 52 | void *val, size_t val_size) | 52 | void *val, size_t val_size) |
| 53 | { | 53 | { |
| 54 | unsigned int offset = *(u32 *)reg; | 54 | unsigned int offset = *(u32 *)reg; |
| 55 | struct rockchip_efuse_context *_context = context; | 55 | struct rockchip_efuse_chip *efuse = context; |
| 56 | void __iomem *base = _context->base; | ||
| 57 | struct clk *clk = _context->efuse_clk; | ||
| 58 | u8 *buf = val; | 56 | u8 *buf = val; |
| 59 | int ret; | 57 | int ret; |
| 60 | 58 | ||
| 61 | ret = clk_prepare_enable(clk); | 59 | ret = clk_prepare_enable(efuse->clk); |
| 62 | if (ret < 0) { | 60 | if (ret < 0) { |
| 63 | dev_err(_context->dev, "failed to prepare/enable efuse clk\n"); | 61 | dev_err(efuse->dev, "failed to prepare/enable efuse clk\n"); |
| 64 | return ret; | 62 | return ret; |
| 65 | } | 63 | } |
| 66 | 64 | ||
| 67 | writel(EFUSE_LOAD | EFUSE_PGENB, base + REG_EFUSE_CTRL); | 65 | writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL); |
| 68 | udelay(1); | 66 | udelay(1); |
| 69 | while (val_size) { | 67 | while (val_size) { |
| 70 | writel(readl(base + REG_EFUSE_CTRL) & | 68 | writel(readl(efuse->base + REG_EFUSE_CTRL) & |
| 71 | (~(EFUSE_A_MASK << EFUSE_A_SHIFT)), | 69 | (~(EFUSE_A_MASK << EFUSE_A_SHIFT)), |
| 72 | base + REG_EFUSE_CTRL); | 70 | efuse->base + REG_EFUSE_CTRL); |
| 73 | writel(readl(base + REG_EFUSE_CTRL) | | 71 | writel(readl(efuse->base + REG_EFUSE_CTRL) | |
| 74 | ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT), | 72 | ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT), |
| 75 | base + REG_EFUSE_CTRL); | 73 | efuse->base + REG_EFUSE_CTRL); |
| 76 | udelay(1); | 74 | udelay(1); |
| 77 | writel(readl(base + REG_EFUSE_CTRL) | | 75 | writel(readl(efuse->base + REG_EFUSE_CTRL) | |
| 78 | EFUSE_STROBE, base + REG_EFUSE_CTRL); | 76 | EFUSE_STROBE, efuse->base + REG_EFUSE_CTRL); |
| 79 | udelay(1); | 77 | udelay(1); |
| 80 | *buf++ = readb(base + REG_EFUSE_DOUT); | 78 | *buf++ = readb(efuse->base + REG_EFUSE_DOUT); |
| 81 | writel(readl(base + REG_EFUSE_CTRL) & | 79 | writel(readl(efuse->base + REG_EFUSE_CTRL) & |
| 82 | (~EFUSE_STROBE), base + REG_EFUSE_CTRL); | 80 | (~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL); |
| 83 | udelay(1); | 81 | udelay(1); |
| 84 | 82 | ||
| 85 | val_size -= 1; | 83 | val_size -= 1; |
| @@ -87,9 +85,9 @@ static int rockchip_efuse_read(void *context, | |||
| 87 | } | 85 | } |
| 88 | 86 | ||
| 89 | /* Switch to standby mode */ | 87 | /* Switch to standby mode */ |
| 90 | writel(EFUSE_PGENB | EFUSE_CSB, base + REG_EFUSE_CTRL); | 88 | writel(EFUSE_PGENB | EFUSE_CSB, efuse->base + REG_EFUSE_CTRL); |
| 91 | 89 | ||
| 92 | clk_disable_unprepare(clk); | 90 | clk_disable_unprepare(efuse->clk); |
| 93 | 91 | ||
| 94 | return 0; | 92 | return 0; |
| 95 | } | 93 | } |
| @@ -114,48 +112,44 @@ static struct nvmem_config econfig = { | |||
| 114 | }; | 112 | }; |
| 115 | 113 | ||
| 116 | static const struct of_device_id rockchip_efuse_match[] = { | 114 | static const struct of_device_id rockchip_efuse_match[] = { |
| 117 | { .compatible = "rockchip,rockchip-efuse",}, | 115 | { .compatible = "rockchip,rockchip-efuse", }, |
| 118 | { /* sentinel */}, | 116 | { /* sentinel */}, |
| 119 | }; | 117 | }; |
| 120 | MODULE_DEVICE_TABLE(of, rockchip_efuse_match); | 118 | MODULE_DEVICE_TABLE(of, rockchip_efuse_match); |
| 121 | 119 | ||
| 122 | static int rockchip_efuse_probe(struct platform_device *pdev) | 120 | static int rockchip_efuse_probe(struct platform_device *pdev) |
| 123 | { | 121 | { |
| 124 | struct device *dev = &pdev->dev; | ||
| 125 | struct resource *res; | 122 | struct resource *res; |
| 126 | struct nvmem_device *nvmem; | 123 | struct nvmem_device *nvmem; |
| 127 | struct regmap *regmap; | 124 | struct regmap *regmap; |
| 128 | void __iomem *base; | 125 | struct rockchip_efuse_chip *efuse; |
| 129 | struct clk *clk; | ||
| 130 | struct rockchip_efuse_context *context; | ||
| 131 | 126 | ||
| 132 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 127 | efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip), |
| 133 | base = devm_ioremap_resource(dev, res); | 128 | GFP_KERNEL); |
| 134 | if (IS_ERR(base)) | 129 | if (!efuse) |
| 135 | return PTR_ERR(base); | 130 | return -ENOMEM; |
| 136 | 131 | ||
| 137 | context = devm_kzalloc(dev, sizeof(struct rockchip_efuse_context), | 132 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 138 | GFP_KERNEL); | 133 | efuse->base = devm_ioremap_resource(&pdev->dev, res); |
| 139 | if (IS_ERR(context)) | 134 | if (IS_ERR(efuse->base)) |
| 140 | return PTR_ERR(context); | 135 | return PTR_ERR(efuse->base); |
| 141 | 136 | ||
| 142 | clk = devm_clk_get(dev, "pclk_efuse"); | 137 | efuse->clk = devm_clk_get(&pdev->dev, "pclk_efuse"); |
| 143 | if (IS_ERR(clk)) | 138 | if (IS_ERR(efuse->clk)) |
| 144 | return PTR_ERR(clk); | 139 | return PTR_ERR(efuse->clk); |
| 145 | 140 | ||
| 146 | context->dev = dev; | 141 | efuse->dev = &pdev->dev; |
| 147 | context->base = base; | ||
| 148 | context->efuse_clk = clk; | ||
| 149 | 142 | ||
| 150 | rockchip_efuse_regmap_config.max_register = resource_size(res) - 1; | 143 | rockchip_efuse_regmap_config.max_register = resource_size(res) - 1; |
| 151 | 144 | ||
| 152 | regmap = devm_regmap_init(dev, &rockchip_efuse_bus, | 145 | regmap = devm_regmap_init(efuse->dev, &rockchip_efuse_bus, |
| 153 | context, &rockchip_efuse_regmap_config); | 146 | efuse, &rockchip_efuse_regmap_config); |
| 154 | if (IS_ERR(regmap)) { | 147 | if (IS_ERR(regmap)) { |
| 155 | dev_err(dev, "regmap init failed\n"); | 148 | dev_err(efuse->dev, "regmap init failed\n"); |
| 156 | return PTR_ERR(regmap); | 149 | return PTR_ERR(regmap); |
| 157 | } | 150 | } |
| 158 | econfig.dev = dev; | 151 | |
| 152 | econfig.dev = efuse->dev; | ||
| 159 | nvmem = nvmem_register(&econfig); | 153 | nvmem = nvmem_register(&econfig); |
| 160 | if (IS_ERR(nvmem)) | 154 | if (IS_ERR(nvmem)) |
| 161 | return PTR_ERR(nvmem); | 155 | return PTR_ERR(nvmem); |
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index cfa3b85064dd..bc88b4084055 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c | |||
| @@ -13,10 +13,8 @@ | |||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. | 15 | * GNU General Public License for more details. |
| 16 | * | ||
| 17 | */ | 16 | */ |
| 18 | 17 | ||
| 19 | |||
| 20 | #include <linux/device.h> | 18 | #include <linux/device.h> |
| 21 | #include <linux/io.h> | 19 | #include <linux/io.h> |
| 22 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| @@ -27,7 +25,6 @@ | |||
| 27 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 28 | #include <linux/random.h> | 26 | #include <linux/random.h> |
| 29 | 27 | ||
| 30 | |||
| 31 | static struct nvmem_config econfig = { | 28 | static struct nvmem_config econfig = { |
| 32 | .name = "sunxi-sid", | 29 | .name = "sunxi-sid", |
| 33 | .read_only = true, | 30 | .read_only = true, |
| @@ -55,8 +52,8 @@ static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid, | |||
| 55 | } | 52 | } |
| 56 | 53 | ||
| 57 | static int sunxi_sid_read(void *context, | 54 | static int sunxi_sid_read(void *context, |
| 58 | const void *reg, size_t reg_size, | 55 | const void *reg, size_t reg_size, |
| 59 | void *val, size_t val_size) | 56 | void *val, size_t val_size) |
| 60 | { | 57 | { |
| 61 | struct sunxi_sid *sid = context; | 58 | struct sunxi_sid *sid = context; |
| 62 | unsigned int offset = *(u32 *)reg; | 59 | unsigned int offset = *(u32 *)reg; |
| @@ -130,7 +127,7 @@ static int sunxi_sid_probe(struct platform_device *pdev) | |||
| 130 | if (IS_ERR(nvmem)) | 127 | if (IS_ERR(nvmem)) |
| 131 | return PTR_ERR(nvmem); | 128 | return PTR_ERR(nvmem); |
| 132 | 129 | ||
| 133 | randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); | 130 | randomness = kzalloc(sizeof(u8) * (size), GFP_KERNEL); |
| 134 | if (!randomness) { | 131 | if (!randomness) { |
| 135 | ret = -EINVAL; | 132 | ret = -EINVAL; |
| 136 | goto err_unreg_nvmem; | 133 | goto err_unreg_nvmem; |
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 5648317d355f..39c4be41ef83 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
| @@ -154,6 +154,7 @@ static const struct of_device_id whitelist_phys[] = { | |||
| 154 | { .compatible = "marvell,88E1111", }, | 154 | { .compatible = "marvell,88E1111", }, |
| 155 | { .compatible = "marvell,88e1116", }, | 155 | { .compatible = "marvell,88e1116", }, |
| 156 | { .compatible = "marvell,88e1118", }, | 156 | { .compatible = "marvell,88e1118", }, |
| 157 | { .compatible = "marvell,88e1145", }, | ||
| 157 | { .compatible = "marvell,88e1149r", }, | 158 | { .compatible = "marvell,88e1149r", }, |
| 158 | { .compatible = "marvell,88e1310", }, | 159 | { .compatible = "marvell,88e1310", }, |
| 159 | { .compatible = "marvell,88E1510", }, | 160 | { .compatible = "marvell,88E1510", }, |
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 5816bceddb65..a576aeeb22da 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c | |||
| @@ -64,7 +64,6 @@ | |||
| 64 | #define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) | 64 | #define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) |
| 65 | 65 | ||
| 66 | #define MAX_NUM_OB_WINDOWS 2 | 66 | #define MAX_NUM_OB_WINDOWS 2 |
| 67 | #define MAX_NUM_PAXC_PF 4 | ||
| 68 | 67 | ||
| 69 | #define IPROC_PCIE_REG_INVALID 0xffff | 68 | #define IPROC_PCIE_REG_INVALID 0xffff |
| 70 | 69 | ||
| @@ -170,20 +169,6 @@ static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie, | |||
| 170 | writel(val, pcie->base + offset + (window * 8)); | 169 | writel(val, pcie->base + offset + (window * 8)); |
| 171 | } | 170 | } |
| 172 | 171 | ||
| 173 | static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie, | ||
| 174 | unsigned int slot, | ||
| 175 | unsigned int fn) | ||
| 176 | { | ||
| 177 | if (slot > 0) | ||
| 178 | return false; | ||
| 179 | |||
| 180 | /* PAXC can only support limited number of functions */ | ||
| 181 | if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF) | ||
| 182 | return false; | ||
| 183 | |||
| 184 | return true; | ||
| 185 | } | ||
| 186 | |||
| 187 | /** | 172 | /** |
| 188 | * Note access to the configuration registers are protected at the higher layer | 173 | * Note access to the configuration registers are protected at the higher layer |
| 189 | * by 'pci_lock' in drivers/pci/access.c | 174 | * by 'pci_lock' in drivers/pci/access.c |
| @@ -199,11 +184,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, | |||
| 199 | u32 val; | 184 | u32 val; |
| 200 | u16 offset; | 185 | u16 offset; |
| 201 | 186 | ||
| 202 | if (!iproc_pcie_device_is_valid(pcie, slot, fn)) | ||
| 203 | return NULL; | ||
| 204 | |||
| 205 | /* root complex access */ | 187 | /* root complex access */ |
| 206 | if (busno == 0) { | 188 | if (busno == 0) { |
| 189 | if (slot > 0 || fn > 0) | ||
| 190 | return NULL; | ||
| 191 | |||
| 207 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, | 192 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, |
| 208 | where & CFG_IND_ADDR_MASK); | 193 | where & CFG_IND_ADDR_MASK); |
| 209 | offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); | 194 | offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); |
| @@ -213,6 +198,14 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, | |||
| 213 | return (pcie->base + offset); | 198 | return (pcie->base + offset); |
| 214 | } | 199 | } |
| 215 | 200 | ||
| 201 | /* | ||
| 202 | * PAXC is connected to an internally emulated EP within the SoC. It | ||
| 203 | * allows only one device. | ||
| 204 | */ | ||
| 205 | if (pcie->type == IPROC_PCIE_PAXC) | ||
| 206 | if (slot > 0) | ||
| 207 | return NULL; | ||
| 208 | |||
| 216 | /* EP device access */ | 209 | /* EP device access */ |
| 217 | val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | | 210 | val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | |
| 218 | (slot << CFG_ADDR_DEV_NUM_SHIFT) | | 211 | (slot << CFG_ADDR_DEV_NUM_SHIFT) | |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 0bf82a20a0fb..48d21e0edd56 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
| @@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) | |||
| 262 | rpc->rpd = dev; | 262 | rpc->rpd = dev; |
| 263 | INIT_WORK(&rpc->dpc_handler, aer_isr); | 263 | INIT_WORK(&rpc->dpc_handler, aer_isr); |
| 264 | mutex_init(&rpc->rpc_mutex); | 264 | mutex_init(&rpc->rpc_mutex); |
| 265 | init_waitqueue_head(&rpc->wait_release); | ||
| 266 | 265 | ||
| 267 | /* Use PCIe bus function to store rpc into PCIe device */ | 266 | /* Use PCIe bus function to store rpc into PCIe device */ |
| 268 | set_service_data(dev, rpc); | 267 | set_service_data(dev, rpc); |
| @@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev) | |||
| 285 | if (rpc->isr) | 284 | if (rpc->isr) |
| 286 | free_irq(dev->irq, dev); | 285 | free_irq(dev->irq, dev); |
| 287 | 286 | ||
| 288 | wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); | 287 | flush_work(&rpc->dpc_handler); |
| 289 | |||
| 290 | aer_disable_rootport(rpc); | 288 | aer_disable_rootport(rpc); |
| 291 | kfree(rpc); | 289 | kfree(rpc); |
| 292 | set_service_data(dev, NULL); | 290 | set_service_data(dev, NULL); |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index 84420b7c9456..945c939a86c5 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
| @@ -72,7 +72,6 @@ struct aer_rpc { | |||
| 72 | * recovery on the same | 72 | * recovery on the same |
| 73 | * root port hierarchy | 73 | * root port hierarchy |
| 74 | */ | 74 | */ |
| 75 | wait_queue_head_t wait_release; | ||
| 76 | }; | 75 | }; |
| 77 | 76 | ||
| 78 | struct aer_broadcast_data { | 77 | struct aer_broadcast_data { |
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 712392504ed9..521e39c1b66d 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
| @@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work) | |||
| 811 | while (get_e_source(rpc, &e_src)) | 811 | while (get_e_source(rpc, &e_src)) |
| 812 | aer_isr_one_error(p_device, &e_src); | 812 | aer_isr_one_error(p_device, &e_src); |
| 813 | mutex_unlock(&rpc->rpc_mutex); | 813 | mutex_unlock(&rpc->rpc_mutex); |
| 814 | |||
| 815 | wake_up(&rpc->wait_release); | ||
| 816 | } | 814 | } |
| 817 | 815 | ||
| 818 | /** | 816 | /** |
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index e7e117d5dbbe..0124d17bd9fe 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
| @@ -224,6 +224,7 @@ config PHY_MT65XX_USB3 | |||
| 224 | 224 | ||
| 225 | config PHY_HI6220_USB | 225 | config PHY_HI6220_USB |
| 226 | tristate "hi6220 USB PHY support" | 226 | tristate "hi6220 USB PHY support" |
| 227 | depends on (ARCH_HISI && ARM64) || COMPILE_TEST | ||
| 227 | select GENERIC_PHY | 228 | select GENERIC_PHY |
| 228 | select MFD_SYSCON | 229 | select MFD_SYSCON |
| 229 | help | 230 | help |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 8c7f27db6ad3..e7e574dc667a 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
| @@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit); | |||
| 275 | 275 | ||
| 276 | int phy_power_on(struct phy *phy) | 276 | int phy_power_on(struct phy *phy) |
| 277 | { | 277 | { |
| 278 | int ret; | 278 | int ret = 0; |
| 279 | 279 | ||
| 280 | if (!phy) | 280 | if (!phy) |
| 281 | return 0; | 281 | goto out; |
| 282 | 282 | ||
| 283 | if (phy->pwr) { | 283 | if (phy->pwr) { |
| 284 | ret = regulator_enable(phy->pwr); | 284 | ret = regulator_enable(phy->pwr); |
| 285 | if (ret) | 285 | if (ret) |
| 286 | return ret; | 286 | goto out; |
| 287 | } | 287 | } |
| 288 | 288 | ||
| 289 | ret = phy_pm_runtime_get_sync(phy); | 289 | ret = phy_pm_runtime_get_sync(phy); |
| 290 | if (ret < 0 && ret != -ENOTSUPP) | 290 | if (ret < 0 && ret != -ENOTSUPP) |
| 291 | return ret; | 291 | goto err_pm_sync; |
| 292 | |||
| 292 | ret = 0; /* Override possible ret == -ENOTSUPP */ | 293 | ret = 0; /* Override possible ret == -ENOTSUPP */ |
| 293 | 294 | ||
| 294 | mutex_lock(&phy->mutex); | 295 | mutex_lock(&phy->mutex); |
| @@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy) | |||
| 296 | ret = phy->ops->power_on(phy); | 297 | ret = phy->ops->power_on(phy); |
| 297 | if (ret < 0) { | 298 | if (ret < 0) { |
| 298 | dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); | 299 | dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); |
| 299 | goto out; | 300 | goto err_pwr_on; |
| 300 | } | 301 | } |
| 301 | } | 302 | } |
| 302 | ++phy->power_count; | 303 | ++phy->power_count; |
| 303 | mutex_unlock(&phy->mutex); | 304 | mutex_unlock(&phy->mutex); |
| 304 | return 0; | 305 | return 0; |
| 305 | 306 | ||
| 306 | out: | 307 | err_pwr_on: |
| 307 | mutex_unlock(&phy->mutex); | 308 | mutex_unlock(&phy->mutex); |
| 308 | phy_pm_runtime_put_sync(phy); | 309 | phy_pm_runtime_put_sync(phy); |
| 310 | err_pm_sync: | ||
| 309 | if (phy->pwr) | 311 | if (phy->pwr) |
| 310 | regulator_disable(phy->pwr); | 312 | regulator_disable(phy->pwr); |
| 311 | 313 | out: | |
| 312 | return ret; | 314 | return ret; |
| 313 | } | 315 | } |
| 314 | EXPORT_SYMBOL_GPL(phy_power_on); | 316 | EXPORT_SYMBOL_GPL(phy_power_on); |
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 4a3fc6e59f8e..840f3eae428b 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c | |||
| @@ -715,6 +715,7 @@ static int twl4030_usb_probe(struct platform_device *pdev) | |||
| 715 | pm_runtime_use_autosuspend(&pdev->dev); | 715 | pm_runtime_use_autosuspend(&pdev->dev); |
| 716 | pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); | 716 | pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); |
| 717 | pm_runtime_enable(&pdev->dev); | 717 | pm_runtime_enable(&pdev->dev); |
| 718 | pm_runtime_get_sync(&pdev->dev); | ||
| 718 | 719 | ||
| 719 | /* Our job is to use irqs and status from the power module | 720 | /* Our job is to use irqs and status from the power module |
| 720 | * to keep the transceiver disabled when nothing's connected. | 721 | * to keep the transceiver disabled when nothing's connected. |
| @@ -750,6 +751,7 @@ static int twl4030_usb_remove(struct platform_device *pdev) | |||
| 750 | struct twl4030_usb *twl = platform_get_drvdata(pdev); | 751 | struct twl4030_usb *twl = platform_get_drvdata(pdev); |
| 751 | int val; | 752 | int val; |
| 752 | 753 | ||
| 754 | usb_remove_phy(&twl->phy); | ||
| 753 | pm_runtime_get_sync(twl->dev); | 755 | pm_runtime_get_sync(twl->dev); |
| 754 | cancel_delayed_work(&twl->id_workaround_work); | 756 | cancel_delayed_work(&twl->id_workaround_work); |
| 755 | device_remove_file(twl->dev, &dev_attr_vbus); | 757 | device_remove_file(twl->dev, &dev_attr_vbus); |
| @@ -757,6 +759,13 @@ static int twl4030_usb_remove(struct platform_device *pdev) | |||
| 757 | /* set transceiver mode to power on defaults */ | 759 | /* set transceiver mode to power on defaults */ |
| 758 | twl4030_usb_set_mode(twl, -1); | 760 | twl4030_usb_set_mode(twl, -1); |
| 759 | 761 | ||
| 762 | /* idle ulpi before powering off */ | ||
| 763 | if (cable_present(twl->linkstat)) | ||
| 764 | pm_runtime_put_noidle(twl->dev); | ||
| 765 | pm_runtime_mark_last_busy(twl->dev); | ||
| 766 | pm_runtime_put_sync_suspend(twl->dev); | ||
| 767 | pm_runtime_disable(twl->dev); | ||
| 768 | |||
| 760 | /* autogate 60MHz ULPI clock, | 769 | /* autogate 60MHz ULPI clock, |
| 761 | * clear dpll clock request for i2c access, | 770 | * clear dpll clock request for i2c access, |
| 762 | * disable 32KHz | 771 | * disable 32KHz |
| @@ -771,11 +780,6 @@ static int twl4030_usb_remove(struct platform_device *pdev) | |||
| 771 | /* disable complete OTG block */ | 780 | /* disable complete OTG block */ |
| 772 | twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); | 781 | twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); |
| 773 | 782 | ||
| 774 | if (cable_present(twl->linkstat)) | ||
| 775 | pm_runtime_put_noidle(twl->dev); | ||
| 776 | pm_runtime_mark_last_busy(twl->dev); | ||
| 777 | pm_runtime_put(twl->dev); | ||
| 778 | |||
| 779 | return 0; | 783 | return 0; |
| 780 | } | 784 | } |
| 781 | 785 | ||
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 0adccbf5c83f..c11db8bceea1 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig | |||
| @@ -4,8 +4,7 @@ endif | |||
| 4 | if MIPS | 4 | if MIPS |
| 5 | source "drivers/platform/mips/Kconfig" | 5 | source "drivers/platform/mips/Kconfig" |
| 6 | endif | 6 | endif |
| 7 | if GOLDFISH | 7 | |
| 8 | source "drivers/platform/goldfish/Kconfig" | 8 | source "drivers/platform/goldfish/Kconfig" |
| 9 | endif | ||
| 10 | 9 | ||
| 11 | source "drivers/platform/chrome/Kconfig" | 10 | source "drivers/platform/chrome/Kconfig" |
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig index 635ef25cc722..50331e3e54f3 100644 --- a/drivers/platform/goldfish/Kconfig +++ b/drivers/platform/goldfish/Kconfig | |||
| @@ -1,5 +1,23 @@ | |||
| 1 | menuconfig GOLDFISH | ||
| 2 | bool "Platform support for Goldfish virtual devices" | ||
| 3 | depends on X86_32 || X86_64 || ARM || ARM64 || MIPS | ||
| 4 | ---help--- | ||
| 5 | Say Y here to get to see options for the Goldfish virtual platform. | ||
| 6 | This option alone does not add any kernel code. | ||
| 7 | |||
| 8 | Unless you are building for the Android Goldfish emulator say N here. | ||
| 9 | |||
| 10 | if GOLDFISH | ||
| 11 | |||
| 12 | config GOLDFISH_BUS | ||
| 13 | bool "Goldfish platform bus" | ||
| 14 | ---help--- | ||
| 15 | This is a virtual bus to host Goldfish Android Virtual Devices. | ||
| 16 | |||
| 1 | config GOLDFISH_PIPE | 17 | config GOLDFISH_PIPE |
| 2 | tristate "Goldfish virtual device for QEMU pipes" | 18 | tristate "Goldfish virtual device for QEMU pipes" |
| 3 | ---help--- | 19 | ---help--- |
| 4 | This is a virtual device to drive the QEMU pipe interface used by | 20 | This is a virtual device to drive the QEMU pipe interface used by |
| 5 | the Goldfish Android Virtual Device. | 21 | the Goldfish Android Virtual Device. |
| 22 | |||
| 23 | endif # GOLDFISH | ||
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile index a0022395eee9..d3487125838c 100644 --- a/drivers/platform/goldfish/Makefile +++ b/drivers/platform/goldfish/Makefile | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for Goldfish platform specific drivers | 2 | # Makefile for Goldfish platform specific drivers |
| 3 | # | 3 | # |
| 4 | obj-$(CONFIG_GOLDFISH) += pdev_bus.o | 4 | obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o |
| 5 | obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o | 5 | obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o |
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 9f6734ce1873..9973cebb4d6f 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | * Copyright (C) 2011 Google, Inc. | 2 | * Copyright (C) 2011 Google, Inc. |
| 3 | * Copyright (C) 2012 Intel, Inc. | 3 | * Copyright (C) 2012 Intel, Inc. |
| 4 | * Copyright (C) 2013 Intel, Inc. | 4 | * Copyright (C) 2013 Intel, Inc. |
| 5 | * Copyright (C) 2014 Linaro Limited | ||
| 5 | * | 6 | * |
| 6 | * This software is licensed under the terms of the GNU General Public | 7 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and | 8 | * License version 2, as published by the Free Software Foundation, and |
| @@ -58,6 +59,8 @@ | |||
| 58 | #include <linux/io.h> | 59 | #include <linux/io.h> |
| 59 | #include <linux/goldfish.h> | 60 | #include <linux/goldfish.h> |
| 60 | #include <linux/dma-mapping.h> | 61 | #include <linux/dma-mapping.h> |
| 62 | #include <linux/mm.h> | ||
| 63 | #include <linux/acpi.h> | ||
| 61 | 64 | ||
| 62 | /* | 65 | /* |
| 63 | * IMPORTANT: The following constants must match the ones used and defined | 66 | * IMPORTANT: The following constants must match the ones used and defined |
| @@ -76,6 +79,7 @@ | |||
| 76 | #define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */ | 79 | #define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */ |
| 77 | #define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */ | 80 | #define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */ |
| 78 | #define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */ | 81 | #define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */ |
| 82 | #define PIPE_REG_VERSION 0x24 /* read: device version */ | ||
| 79 | 83 | ||
| 80 | /* list of commands for PIPE_REG_COMMAND */ | 84 | /* list of commands for PIPE_REG_COMMAND */ |
| 81 | #define CMD_OPEN 1 /* open new channel */ | 85 | #define CMD_OPEN 1 /* open new channel */ |
| @@ -91,12 +95,6 @@ | |||
| 91 | #define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */ | 95 | #define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */ |
| 92 | #define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing | 96 | #define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing |
| 93 | is possible */ | 97 | is possible */ |
| 94 | |||
| 95 | /* The following commands are related to read operations, they must be | ||
| 96 | * listed in the same order than the corresponding write ones, since we | ||
| 97 | * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset | ||
| 98 | * in goldfish_pipe_read_write() below. | ||
| 99 | */ | ||
| 100 | #define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */ | 98 | #define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */ |
| 101 | #define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading | 99 | #define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading |
| 102 | * is possible */ | 100 | * is possible */ |
| @@ -131,6 +129,7 @@ struct goldfish_pipe_dev { | |||
| 131 | unsigned char __iomem *base; | 129 | unsigned char __iomem *base; |
| 132 | struct access_params *aps; | 130 | struct access_params *aps; |
| 133 | int irq; | 131 | int irq; |
| 132 | u32 version; | ||
| 134 | }; | 133 | }; |
| 135 | 134 | ||
| 136 | static struct goldfish_pipe_dev pipe_dev[1]; | 135 | static struct goldfish_pipe_dev pipe_dev[1]; |
| @@ -263,19 +262,14 @@ static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd, | |||
| 263 | return 0; | 262 | return 0; |
| 264 | } | 263 | } |
| 265 | 264 | ||
| 266 | /* This function is used for both reading from and writing to a given | ||
| 267 | * pipe. | ||
| 268 | */ | ||
| 269 | static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, | 265 | static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, |
| 270 | size_t bufflen, int is_write) | 266 | size_t bufflen, int is_write) |
| 271 | { | 267 | { |
| 272 | unsigned long irq_flags; | 268 | unsigned long irq_flags; |
| 273 | struct goldfish_pipe *pipe = filp->private_data; | 269 | struct goldfish_pipe *pipe = filp->private_data; |
| 274 | struct goldfish_pipe_dev *dev = pipe->dev; | 270 | struct goldfish_pipe_dev *dev = pipe->dev; |
| 275 | const int cmd_offset = is_write ? 0 | ||
| 276 | : (CMD_READ_BUFFER - CMD_WRITE_BUFFER); | ||
| 277 | unsigned long address, address_end; | 271 | unsigned long address, address_end; |
| 278 | int ret = 0; | 272 | int count = 0, ret = -EINVAL; |
| 279 | 273 | ||
| 280 | /* If the emulator already closed the pipe, no need to go further */ | 274 | /* If the emulator already closed the pipe, no need to go further */ |
| 281 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) | 275 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
| @@ -298,79 +292,107 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, | |||
| 298 | address_end = address + bufflen; | 292 | address_end = address + bufflen; |
| 299 | 293 | ||
| 300 | while (address < address_end) { | 294 | while (address < address_end) { |
| 301 | unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE; | 295 | unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE; |
| 302 | unsigned long next = page_end < address_end ? page_end | 296 | unsigned long next = page_end < address_end ? page_end |
| 303 | : address_end; | 297 | : address_end; |
| 304 | unsigned long avail = next - address; | 298 | unsigned long avail = next - address; |
| 305 | int status, wakeBit; | 299 | int status, wakeBit; |
| 300 | struct page *page; | ||
| 301 | |||
| 302 | /* Either vaddr or paddr depending on the device version */ | ||
| 303 | unsigned long xaddr; | ||
| 304 | |||
| 305 | /* | ||
| 306 | * We grab the pages on a page-by-page basis in case user | ||
| 307 | * space gives us a potentially huge buffer but the read only | ||
| 308 | * returns a small amount, then there's no need to pin that | ||
| 309 | * much memory to the process. | ||
| 310 | */ | ||
| 311 | down_read(¤t->mm->mmap_sem); | ||
| 312 | ret = get_user_pages(current, current->mm, address, 1, | ||
| 313 | !is_write, 0, &page, NULL); | ||
| 314 | up_read(¤t->mm->mmap_sem); | ||
| 315 | if (ret < 0) | ||
| 316 | break; | ||
| 306 | 317 | ||
| 307 | /* Ensure that the corresponding page is properly mapped */ | 318 | if (dev->version) { |
| 308 | /* FIXME: this isn't safe or sufficient - use get_user_pages */ | 319 | /* Device version 1 or newer (qemu-android) expects the |
| 309 | if (is_write) { | 320 | * physical address. |
| 310 | char c; | 321 | */ |
| 311 | /* Ensure that the page is mapped and readable */ | 322 | xaddr = page_to_phys(page) | (address & ~PAGE_MASK); |
| 312 | if (__get_user(c, (char __user *)address)) { | ||
| 313 | if (!ret) | ||
| 314 | ret = -EFAULT; | ||
| 315 | break; | ||
| 316 | } | ||
| 317 | } else { | 323 | } else { |
| 318 | /* Ensure that the page is mapped and writable */ | 324 | /* Device version 0 (classic emulator) expects the |
| 319 | if (__put_user(0, (char __user *)address)) { | 325 | * virtual address. |
| 320 | if (!ret) | 326 | */ |
| 321 | ret = -EFAULT; | 327 | xaddr = address; |
| 322 | break; | ||
| 323 | } | ||
| 324 | } | 328 | } |
| 325 | 329 | ||
| 326 | /* Now, try to transfer the bytes in the current page */ | 330 | /* Now, try to transfer the bytes in the current page */ |
| 327 | spin_lock_irqsave(&dev->lock, irq_flags); | 331 | spin_lock_irqsave(&dev->lock, irq_flags); |
| 328 | if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset, | 332 | if (access_with_param(dev, |
| 329 | address, avail, pipe, &status)) { | 333 | is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER, |
| 334 | xaddr, avail, pipe, &status)) { | ||
| 330 | gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL, | 335 | gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL, |
| 331 | dev->base + PIPE_REG_CHANNEL_HIGH); | 336 | dev->base + PIPE_REG_CHANNEL_HIGH); |
| 332 | writel(avail, dev->base + PIPE_REG_SIZE); | 337 | writel(avail, dev->base + PIPE_REG_SIZE); |
| 333 | gf_write_ptr((void *)address, | 338 | gf_write_ptr((void *)xaddr, |
| 334 | dev->base + PIPE_REG_ADDRESS, | 339 | dev->base + PIPE_REG_ADDRESS, |
| 335 | dev->base + PIPE_REG_ADDRESS_HIGH); | 340 | dev->base + PIPE_REG_ADDRESS_HIGH); |
| 336 | writel(CMD_WRITE_BUFFER + cmd_offset, | 341 | writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER, |
| 337 | dev->base + PIPE_REG_COMMAND); | 342 | dev->base + PIPE_REG_COMMAND); |
| 338 | status = readl(dev->base + PIPE_REG_STATUS); | 343 | status = readl(dev->base + PIPE_REG_STATUS); |
| 339 | } | 344 | } |
| 340 | spin_unlock_irqrestore(&dev->lock, irq_flags); | 345 | spin_unlock_irqrestore(&dev->lock, irq_flags); |
| 341 | 346 | ||
| 347 | if (status > 0 && !is_write) | ||
| 348 | set_page_dirty(page); | ||
| 349 | put_page(page); | ||
| 350 | |||
| 342 | if (status > 0) { /* Correct transfer */ | 351 | if (status > 0) { /* Correct transfer */ |
| 343 | ret += status; | 352 | count += status; |
| 344 | address += status; | 353 | address += status; |
| 345 | continue; | 354 | continue; |
| 346 | } | 355 | } else if (status == 0) { /* EOF */ |
| 347 | 356 | ret = 0; | |
| 348 | if (status == 0) /* EOF */ | ||
| 349 | break; | 357 | break; |
| 350 | 358 | } else if (status < 0 && count > 0) { | |
| 351 | /* An error occured. If we already transfered stuff, just | 359 | /* |
| 352 | * return with its count. We expect the next call to return | 360 | * An error occurred and we already transferred |
| 353 | * an error code */ | 361 | * something on one of the previous pages. |
| 354 | if (ret > 0) | 362 | * Just return what we already copied and log this |
| 363 | * err. | ||
| 364 | * | ||
| 365 | * Note: This seems like an incorrect approach but | ||
| 366 | * cannot change it until we check if any user space | ||
| 367 | * ABI relies on this behavior. | ||
| 368 | */ | ||
| 369 | if (status != PIPE_ERROR_AGAIN) | ||
| 370 | pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n", | ||
| 371 | status, is_write ? "write" : "read"); | ||
| 372 | ret = 0; | ||
| 355 | break; | 373 | break; |
| 374 | } | ||
| 356 | 375 | ||
| 357 | /* If the error is not PIPE_ERROR_AGAIN, or if we are not in | 376 | /* |
| 358 | * non-blocking mode, just return the error code. | 377 | * If the error is not PIPE_ERROR_AGAIN, or if we are not in |
| 359 | */ | 378 | * non-blocking mode, just return the error code. |
| 379 | */ | ||
| 360 | if (status != PIPE_ERROR_AGAIN || | 380 | if (status != PIPE_ERROR_AGAIN || |
| 361 | (filp->f_flags & O_NONBLOCK) != 0) { | 381 | (filp->f_flags & O_NONBLOCK) != 0) { |
| 362 | ret = goldfish_pipe_error_convert(status); | 382 | ret = goldfish_pipe_error_convert(status); |
| 363 | break; | 383 | break; |
| 364 | } | 384 | } |
| 365 | 385 | ||
| 366 | /* We will have to wait until more data/space is available. | 386 | /* |
| 367 | * First, mark the pipe as waiting for a specific wake signal. | 387 | * The backend blocked the read/write, wait until the backend |
| 368 | */ | 388 | * tells us it's ready to process more data. |
| 389 | */ | ||
| 369 | wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; | 390 | wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
| 370 | set_bit(wakeBit, &pipe->flags); | 391 | set_bit(wakeBit, &pipe->flags); |
| 371 | 392 | ||
| 372 | /* Tell the emulator we're going to wait for a wake event */ | 393 | /* Tell the emulator we're going to wait for a wake event */ |
| 373 | goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset); | 394 | goldfish_cmd(pipe, |
| 395 | is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ); | ||
| 374 | 396 | ||
| 375 | /* Unlock the pipe, then wait for the wake signal */ | 397 | /* Unlock the pipe, then wait for the wake signal */ |
| 376 | mutex_unlock(&pipe->lock); | 398 | mutex_unlock(&pipe->lock); |
| @@ -388,12 +410,13 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, | |||
| 388 | /* Try to re-acquire the lock */ | 410 | /* Try to re-acquire the lock */ |
| 389 | if (mutex_lock_interruptible(&pipe->lock)) | 411 | if (mutex_lock_interruptible(&pipe->lock)) |
| 390 | return -ERESTARTSYS; | 412 | return -ERESTARTSYS; |
| 391 | |||
| 392 | /* Try the transfer again */ | ||
| 393 | continue; | ||
| 394 | } | 413 | } |
| 395 | mutex_unlock(&pipe->lock); | 414 | mutex_unlock(&pipe->lock); |
| 396 | return ret; | 415 | |
| 416 | if (ret < 0) | ||
| 417 | return ret; | ||
| 418 | else | ||
| 419 | return count; | ||
| 397 | } | 420 | } |
| 398 | 421 | ||
| 399 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, | 422 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, |
| @@ -446,10 +469,11 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) | |||
| 446 | unsigned long irq_flags; | 469 | unsigned long irq_flags; |
| 447 | int count = 0; | 470 | int count = 0; |
| 448 | 471 | ||
| 449 | /* We're going to read from the emulator a list of (channel,flags) | 472 | /* |
| 450 | * pairs corresponding to the wake events that occured on each | 473 | * We're going to read from the emulator a list of (channel,flags) |
| 451 | * blocked pipe (i.e. channel). | 474 | * pairs corresponding to the wake events that occurred on each |
| 452 | */ | 475 | * blocked pipe (i.e. channel). |
| 476 | */ | ||
| 453 | spin_lock_irqsave(&dev->lock, irq_flags); | 477 | spin_lock_irqsave(&dev->lock, irq_flags); |
| 454 | for (;;) { | 478 | for (;;) { |
| 455 | /* First read the channel, 0 means the end of the list */ | 479 | /* First read the channel, 0 means the end of the list */ |
| @@ -600,6 +624,12 @@ static int goldfish_pipe_probe(struct platform_device *pdev) | |||
| 600 | goto error; | 624 | goto error; |
| 601 | } | 625 | } |
| 602 | setup_access_params_addr(pdev, dev); | 626 | setup_access_params_addr(pdev, dev); |
| 627 | |||
| 628 | /* Although the pipe device in the classic Android emulator does not | ||
| 629 | * recognize the 'version' register, it won't treat this as an error | ||
| 630 | * either and will simply return 0, which is fine. | ||
| 631 | */ | ||
| 632 | dev->version = readl(dev->base + PIPE_REG_VERSION); | ||
| 603 | return 0; | 633 | return 0; |
| 604 | 634 | ||
| 605 | error: | 635 | error: |
| @@ -615,11 +645,26 @@ static int goldfish_pipe_remove(struct platform_device *pdev) | |||
| 615 | return 0; | 645 | return 0; |
| 616 | } | 646 | } |
| 617 | 647 | ||
| 648 | static const struct acpi_device_id goldfish_pipe_acpi_match[] = { | ||
| 649 | { "GFSH0003", 0 }, | ||
| 650 | { }, | ||
| 651 | }; | ||
| 652 | MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match); | ||
| 653 | |||
| 654 | static const struct of_device_id goldfish_pipe_of_match[] = { | ||
| 655 | { .compatible = "google,android-pipe", }, | ||
| 656 | {}, | ||
| 657 | }; | ||
| 658 | MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); | ||
| 659 | |||
| 618 | static struct platform_driver goldfish_pipe = { | 660 | static struct platform_driver goldfish_pipe = { |
| 619 | .probe = goldfish_pipe_probe, | 661 | .probe = goldfish_pipe_probe, |
| 620 | .remove = goldfish_pipe_remove, | 662 | .remove = goldfish_pipe_remove, |
| 621 | .driver = { | 663 | .driver = { |
| 622 | .name = "goldfish_pipe" | 664 | .name = "goldfish_pipe", |
| 665 | .owner = THIS_MODULE, | ||
| 666 | .of_match_table = goldfish_pipe_of_match, | ||
| 667 | .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match), | ||
| 623 | } | 668 | } |
| 624 | }; | 669 | }; |
| 625 | 670 | ||
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 20f0ad9bb9f3..e20f23e04c24 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c | |||
| @@ -41,8 +41,7 @@ static const struct key_entry intel_hid_keymap[] = { | |||
| 41 | { KE_KEY, 4, { KEY_HOME } }, | 41 | { KE_KEY, 4, { KEY_HOME } }, |
| 42 | { KE_KEY, 5, { KEY_END } }, | 42 | { KE_KEY, 5, { KEY_END } }, |
| 43 | { KE_KEY, 6, { KEY_PAGEUP } }, | 43 | { KE_KEY, 6, { KEY_PAGEUP } }, |
| 44 | { KE_KEY, 4, { KEY_PAGEDOWN } }, | 44 | { KE_KEY, 7, { KEY_PAGEDOWN } }, |
| 45 | { KE_KEY, 4, { KEY_HOME } }, | ||
| 46 | { KE_KEY, 8, { KEY_RFKILL } }, | 45 | { KE_KEY, 8, { KEY_RFKILL } }, |
| 47 | { KE_KEY, 9, { KEY_POWER } }, | 46 | { KE_KEY, 9, { KEY_POWER } }, |
| 48 | { KE_KEY, 11, { KEY_SLEEP } }, | 47 | { KE_KEY, 11, { KEY_SLEEP } }, |
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c index 02bc5a6343c3..aa454241489c 100644 --- a/drivers/platform/x86/intel_scu_ipcutil.c +++ b/drivers/platform/x86/intel_scu_ipcutil.c | |||
| @@ -49,7 +49,7 @@ struct scu_ipc_data { | |||
| 49 | 49 | ||
| 50 | static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) | 50 | static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) |
| 51 | { | 51 | { |
| 52 | int count = data->count; | 52 | unsigned int count = data->count; |
| 53 | 53 | ||
| 54 | if (count == 0 || count == 3 || count > 4) | 54 | if (count == 0 || count == 3 || count > 4) |
| 55 | return -EINVAL; | 55 | return -EINVAL; |
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 361358134315..93880ed6291c 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c | |||
| @@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev, | |||
| 562 | /* | 562 | /* |
| 563 | * Command Lock contention | 563 | * Command Lock contention |
| 564 | */ | 564 | */ |
| 565 | err = SCSI_DH_RETRY; | 565 | err = SCSI_DH_IMM_RETRY; |
| 566 | break; | 566 | break; |
| 567 | default: | 567 | default: |
| 568 | break; | 568 | break; |
| @@ -612,6 +612,8 @@ retry: | |||
| 612 | err = mode_select_handle_sense(sdev, h->sense); | 612 | err = mode_select_handle_sense(sdev, h->sense); |
| 613 | if (err == SCSI_DH_RETRY && retry_cnt--) | 613 | if (err == SCSI_DH_RETRY && retry_cnt--) |
| 614 | goto retry; | 614 | goto retry; |
| 615 | if (err == SCSI_DH_IMM_RETRY) | ||
| 616 | goto retry; | ||
| 615 | } | 617 | } |
| 616 | if (err == SCSI_DH_OK) { | 618 | if (err == SCSI_DH_OK) { |
| 617 | h->state = RDAC_STATE_ACTIVE; | 619 | h->state = RDAC_STATE_ACTIVE; |
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig index b67661836c9f..d1dd1616f983 100644 --- a/drivers/scsi/hisi_sas/Kconfig +++ b/drivers/scsi/hisi_sas/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config SCSI_HISI_SAS | 1 | config SCSI_HISI_SAS |
| 2 | tristate "HiSilicon SAS" | 2 | tristate "HiSilicon SAS" |
| 3 | depends on HAS_DMA | 3 | depends on HAS_DMA && HAS_IOMEM |
| 4 | depends on ARM64 || COMPILE_TEST | 4 | depends on ARM64 || COMPILE_TEST |
| 5 | select SCSI_SAS_LIBSAS | 5 | select SCSI_SAS_LIBSAS |
| 6 | select BLK_DEV_INTEGRITY | 6 | select BLK_DEV_INTEGRITY |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 057fdeb720ac..eea24d7531cf 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | |||
| @@ -1289,13 +1289,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, | |||
| 1289 | goto out; | 1289 | goto out; |
| 1290 | } | 1290 | } |
| 1291 | 1291 | ||
| 1292 | if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) { | 1292 | if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK && |
| 1293 | if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) || | 1293 | !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) { |
| 1294 | !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) | ||
| 1295 | ts->stat = SAS_DATA_OVERRUN; | ||
| 1296 | else | ||
| 1297 | slot_err_v1_hw(hisi_hba, task, slot); | ||
| 1298 | 1294 | ||
| 1295 | slot_err_v1_hw(hisi_hba, task, slot); | ||
| 1299 | goto out; | 1296 | goto out; |
| 1300 | } | 1297 | } |
| 1301 | 1298 | ||
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 52a87657c7dd..692a7570b5e1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -2204,7 +2204,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
| 2204 | /* Clear outstanding commands array. */ | 2204 | /* Clear outstanding commands array. */ |
| 2205 | for (que = 0; que < ha->max_req_queues; que++) { | 2205 | for (que = 0; que < ha->max_req_queues; que++) { |
| 2206 | req = ha->req_q_map[que]; | 2206 | req = ha->req_q_map[que]; |
| 2207 | if (!req) | 2207 | if (!req || !test_bit(que, ha->req_qid_map)) |
| 2208 | continue; | 2208 | continue; |
| 2209 | req->out_ptr = (void *)(req->ring + req->length); | 2209 | req->out_ptr = (void *)(req->ring + req->length); |
| 2210 | *req->out_ptr = 0; | 2210 | *req->out_ptr = 0; |
| @@ -2221,7 +2221,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) | |||
| 2221 | 2221 | ||
| 2222 | for (que = 0; que < ha->max_rsp_queues; que++) { | 2222 | for (que = 0; que < ha->max_rsp_queues; que++) { |
| 2223 | rsp = ha->rsp_q_map[que]; | 2223 | rsp = ha->rsp_q_map[que]; |
| 2224 | if (!rsp) | 2224 | if (!rsp || !test_bit(que, ha->rsp_qid_map)) |
| 2225 | continue; | 2225 | continue; |
| 2226 | rsp->in_ptr = (void *)(rsp->ring + rsp->length); | 2226 | rsp->in_ptr = (void *)(rsp->ring + rsp->length); |
| 2227 | *rsp->in_ptr = 0; | 2227 | *rsp->in_ptr = 0; |
| @@ -4981,7 +4981,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) | |||
| 4981 | 4981 | ||
| 4982 | for (i = 1; i < ha->max_rsp_queues; i++) { | 4982 | for (i = 1; i < ha->max_rsp_queues; i++) { |
| 4983 | rsp = ha->rsp_q_map[i]; | 4983 | rsp = ha->rsp_q_map[i]; |
| 4984 | if (rsp) { | 4984 | if (rsp && test_bit(i, ha->rsp_qid_map)) { |
| 4985 | rsp->options &= ~BIT_0; | 4985 | rsp->options &= ~BIT_0; |
| 4986 | ret = qla25xx_init_rsp_que(base_vha, rsp); | 4986 | ret = qla25xx_init_rsp_que(base_vha, rsp); |
| 4987 | if (ret != QLA_SUCCESS) | 4987 | if (ret != QLA_SUCCESS) |
| @@ -4996,8 +4996,8 @@ qla25xx_init_queues(struct qla_hw_data *ha) | |||
| 4996 | } | 4996 | } |
| 4997 | for (i = 1; i < ha->max_req_queues; i++) { | 4997 | for (i = 1; i < ha->max_req_queues; i++) { |
| 4998 | req = ha->req_q_map[i]; | 4998 | req = ha->req_q_map[i]; |
| 4999 | if (req) { | 4999 | if (req && test_bit(i, ha->req_qid_map)) { |
| 5000 | /* Clear outstanding commands array. */ | 5000 | /* Clear outstanding commands array. */ |
| 5001 | req->options &= ~BIT_0; | 5001 | req->options &= ~BIT_0; |
| 5002 | ret = qla25xx_init_req_que(base_vha, req); | 5002 | ret = qla25xx_init_req_que(base_vha, req); |
| 5003 | if (ret != QLA_SUCCESS) | 5003 | if (ret != QLA_SUCCESS) |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d4d65eb0e9b4..4af95479a9db 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -3063,9 +3063,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
| 3063 | "MSI-X: Failed to enable support " | 3063 | "MSI-X: Failed to enable support " |
| 3064 | "-- %d/%d\n Retry with %d vectors.\n", | 3064 | "-- %d/%d\n Retry with %d vectors.\n", |
| 3065 | ha->msix_count, ret, ret); | 3065 | ha->msix_count, ret, ret); |
| 3066 | ha->msix_count = ret; | ||
| 3067 | ha->max_rsp_queues = ha->msix_count - 1; | ||
| 3066 | } | 3068 | } |
| 3067 | ha->msix_count = ret; | ||
| 3068 | ha->max_rsp_queues = ha->msix_count - 1; | ||
| 3069 | ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * | 3069 | ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * |
| 3070 | ha->msix_count, GFP_KERNEL); | 3070 | ha->msix_count, GFP_KERNEL); |
| 3071 | if (!ha->msix_entries) { | 3071 | if (!ha->msix_entries) { |
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c5dd594f6c31..cf7ba52bae66 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
| @@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) | |||
| 600 | /* Delete request queues */ | 600 | /* Delete request queues */ |
| 601 | for (cnt = 1; cnt < ha->max_req_queues; cnt++) { | 601 | for (cnt = 1; cnt < ha->max_req_queues; cnt++) { |
| 602 | req = ha->req_q_map[cnt]; | 602 | req = ha->req_q_map[cnt]; |
| 603 | if (req) { | 603 | if (req && test_bit(cnt, ha->req_qid_map)) { |
| 604 | ret = qla25xx_delete_req_que(vha, req); | 604 | ret = qla25xx_delete_req_que(vha, req); |
| 605 | if (ret != QLA_SUCCESS) { | 605 | if (ret != QLA_SUCCESS) { |
| 606 | ql_log(ql_log_warn, vha, 0x00ea, | 606 | ql_log(ql_log_warn, vha, 0x00ea, |
| @@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) | |||
| 614 | /* Delete response queues */ | 614 | /* Delete response queues */ |
| 615 | for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { | 615 | for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { |
| 616 | rsp = ha->rsp_q_map[cnt]; | 616 | rsp = ha->rsp_q_map[cnt]; |
| 617 | if (rsp) { | 617 | if (rsp && test_bit(cnt, ha->rsp_qid_map)) { |
| 618 | ret = qla25xx_delete_rsp_que(vha, rsp); | 618 | ret = qla25xx_delete_rsp_que(vha, rsp); |
| 619 | if (ret != QLA_SUCCESS) { | 619 | if (ret != QLA_SUCCESS) { |
| 620 | ql_log(ql_log_warn, vha, 0x00eb, | 620 | ql_log(ql_log_warn, vha, 0x00eb, |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f1788db43195..f6c7ce35b542 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -409,6 +409,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) | |||
| 409 | int cnt; | 409 | int cnt; |
| 410 | 410 | ||
| 411 | for (cnt = 0; cnt < ha->max_req_queues; cnt++) { | 411 | for (cnt = 0; cnt < ha->max_req_queues; cnt++) { |
| 412 | if (!test_bit(cnt, ha->req_qid_map)) | ||
| 413 | continue; | ||
| 414 | |||
| 412 | req = ha->req_q_map[cnt]; | 415 | req = ha->req_q_map[cnt]; |
| 413 | qla2x00_free_req_que(ha, req); | 416 | qla2x00_free_req_que(ha, req); |
| 414 | } | 417 | } |
| @@ -416,6 +419,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) | |||
| 416 | ha->req_q_map = NULL; | 419 | ha->req_q_map = NULL; |
| 417 | 420 | ||
| 418 | for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { | 421 | for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { |
| 422 | if (!test_bit(cnt, ha->rsp_qid_map)) | ||
| 423 | continue; | ||
| 424 | |||
| 419 | rsp = ha->rsp_q_map[cnt]; | 425 | rsp = ha->rsp_q_map[cnt]; |
| 420 | qla2x00_free_rsp_que(ha, rsp); | 426 | qla2x00_free_rsp_que(ha, rsp); |
| 421 | } | 427 | } |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 8075a4cdb45c..ee967becd257 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
| @@ -105,7 +105,7 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); | |||
| 105 | static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, | 105 | static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, |
| 106 | int fn, void *iocb, int flags); | 106 | int fn, void *iocb, int flags); |
| 107 | static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd | 107 | static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd |
| 108 | *cmd, struct atio_from_isp *atio, int ha_locked); | 108 | *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); |
| 109 | static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, | 109 | static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, |
| 110 | struct qla_tgt_srr_imm *imm, int ha_lock); | 110 | struct qla_tgt_srr_imm *imm, int ha_lock); |
| 111 | static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, | 111 | static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, |
| @@ -1756,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) | |||
| 1756 | qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, | 1756 | qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, |
| 1757 | 0, 0, 0, 0, 0, 0); | 1757 | 0, 0, 0, 0, 0, 0); |
| 1758 | else { | 1758 | else { |
| 1759 | if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) | 1759 | if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) |
| 1760 | qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, | 1760 | qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, |
| 1761 | mcmd->fc_tm_rsp, false); | 1761 | mcmd->fc_tm_rsp, false); |
| 1762 | else | 1762 | else |
| @@ -2665,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
| 2665 | /* no need to terminate. FW already freed exchange. */ | 2665 | /* no need to terminate. FW already freed exchange. */ |
| 2666 | qlt_abort_cmd_on_host_reset(cmd->vha, cmd); | 2666 | qlt_abort_cmd_on_host_reset(cmd->vha, cmd); |
| 2667 | else | 2667 | else |
| 2668 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); | 2668 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); |
| 2669 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2669 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 2670 | return 0; | 2670 | return 0; |
| 2671 | } | 2671 | } |
| @@ -3173,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, | |||
| 3173 | } | 3173 | } |
| 3174 | 3174 | ||
| 3175 | static void qlt_send_term_exchange(struct scsi_qla_host *vha, | 3175 | static void qlt_send_term_exchange(struct scsi_qla_host *vha, |
| 3176 | struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) | 3176 | struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, |
| 3177 | int ul_abort) | ||
| 3177 | { | 3178 | { |
| 3178 | unsigned long flags = 0; | 3179 | unsigned long flags = 0; |
| 3179 | int rc; | 3180 | int rc; |
| @@ -3193,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, | |||
| 3193 | qlt_alloc_qfull_cmd(vha, atio, 0, 0); | 3194 | qlt_alloc_qfull_cmd(vha, atio, 0, 0); |
| 3194 | 3195 | ||
| 3195 | done: | 3196 | done: |
| 3196 | if (cmd && (!cmd->aborted || | 3197 | if (cmd && !ul_abort && !cmd->aborted) { |
| 3197 | !cmd->cmd_sent_to_fw)) { | ||
| 3198 | if (cmd->sg_mapped) | 3198 | if (cmd->sg_mapped) |
| 3199 | qlt_unmap_sg(vha, cmd); | 3199 | qlt_unmap_sg(vha, cmd); |
| 3200 | vha->hw->tgt.tgt_ops->free_cmd(cmd); | 3200 | vha->hw->tgt.tgt_ops->free_cmd(cmd); |
| @@ -3253,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) | |||
| 3253 | 3253 | ||
| 3254 | } | 3254 | } |
| 3255 | 3255 | ||
| 3256 | void qlt_abort_cmd(struct qla_tgt_cmd *cmd) | 3256 | int qlt_abort_cmd(struct qla_tgt_cmd *cmd) |
| 3257 | { | 3257 | { |
| 3258 | struct qla_tgt *tgt = cmd->tgt; | 3258 | struct qla_tgt *tgt = cmd->tgt; |
| 3259 | struct scsi_qla_host *vha = tgt->vha; | 3259 | struct scsi_qla_host *vha = tgt->vha; |
| 3260 | struct se_cmd *se_cmd = &cmd->se_cmd; | 3260 | struct se_cmd *se_cmd = &cmd->se_cmd; |
| 3261 | unsigned long flags; | ||
| 3261 | 3262 | ||
| 3262 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, | 3263 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, |
| 3263 | "qla_target(%d): terminating exchange for aborted cmd=%p " | 3264 | "qla_target(%d): terminating exchange for aborted cmd=%p " |
| 3264 | "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, | 3265 | "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, |
| 3265 | se_cmd->tag); | 3266 | se_cmd->tag); |
| 3266 | 3267 | ||
| 3268 | spin_lock_irqsave(&cmd->cmd_lock, flags); | ||
| 3269 | if (cmd->aborted) { | ||
| 3270 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 3271 | /* | ||
| 3272 | * It's normal to see 2 calls in this path: | ||
| 3273 | * 1) XFER Rdy completion + CMD_T_ABORT | ||
| 3274 | * 2) TCM TMR - drain_state_list | ||
| 3275 | */ | ||
| 3276 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, | ||
| 3277 | "multiple abort. %p transport_state %x, t_state %x," | ||
| 3278 | " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state, | ||
| 3279 | cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags); | ||
| 3280 | return EIO; | ||
| 3281 | } | ||
| 3267 | cmd->aborted = 1; | 3282 | cmd->aborted = 1; |
| 3268 | cmd->cmd_flags |= BIT_6; | 3283 | cmd->cmd_flags |= BIT_6; |
| 3284 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 3269 | 3285 | ||
| 3270 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); | 3286 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1); |
| 3287 | return 0; | ||
| 3271 | } | 3288 | } |
| 3272 | EXPORT_SYMBOL(qlt_abort_cmd); | 3289 | EXPORT_SYMBOL(qlt_abort_cmd); |
| 3273 | 3290 | ||
| @@ -3282,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) | |||
| 3282 | 3299 | ||
| 3283 | BUG_ON(cmd->cmd_in_wq); | 3300 | BUG_ON(cmd->cmd_in_wq); |
| 3284 | 3301 | ||
| 3302 | if (cmd->sg_mapped) | ||
| 3303 | qlt_unmap_sg(cmd->vha, cmd); | ||
| 3304 | |||
| 3285 | if (!cmd->q_full) | 3305 | if (!cmd->q_full) |
| 3286 | qlt_decr_num_pend_cmds(cmd->vha); | 3306 | qlt_decr_num_pend_cmds(cmd->vha); |
| 3287 | 3307 | ||
| @@ -3399,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, | |||
| 3399 | term = 1; | 3419 | term = 1; |
| 3400 | 3420 | ||
| 3401 | if (term) | 3421 | if (term) |
| 3402 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); | 3422 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); |
| 3403 | 3423 | ||
| 3404 | return term; | 3424 | return term; |
| 3405 | } | 3425 | } |
| @@ -3580,12 +3600,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
| 3580 | case CTIO_PORT_LOGGED_OUT: | 3600 | case CTIO_PORT_LOGGED_OUT: |
| 3581 | case CTIO_PORT_UNAVAILABLE: | 3601 | case CTIO_PORT_UNAVAILABLE: |
| 3582 | { | 3602 | { |
| 3583 | int logged_out = (status & 0xFFFF); | 3603 | int logged_out = |
| 3604 | (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; | ||
| 3605 | |||
| 3584 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, | 3606 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, |
| 3585 | "qla_target(%d): CTIO with %s status %x " | 3607 | "qla_target(%d): CTIO with %s status %x " |
| 3586 | "received (state %x, se_cmd %p)\n", vha->vp_idx, | 3608 | "received (state %x, se_cmd %p)\n", vha->vp_idx, |
| 3587 | (logged_out == CTIO_PORT_LOGGED_OUT) ? | 3609 | logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", |
| 3588 | "PORT LOGGED OUT" : "PORT UNAVAILABLE", | ||
| 3589 | status, cmd->state, se_cmd); | 3610 | status, cmd->state, se_cmd); |
| 3590 | 3611 | ||
| 3591 | if (logged_out && cmd->sess) { | 3612 | if (logged_out && cmd->sess) { |
| @@ -3754,6 +3775,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) | |||
| 3754 | goto out_term; | 3775 | goto out_term; |
| 3755 | } | 3776 | } |
| 3756 | 3777 | ||
| 3778 | spin_lock_init(&cmd->cmd_lock); | ||
| 3757 | cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; | 3779 | cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; |
| 3758 | cmd->se_cmd.tag = atio->u.isp24.exchange_addr; | 3780 | cmd->se_cmd.tag = atio->u.isp24.exchange_addr; |
| 3759 | cmd->unpacked_lun = scsilun_to_int( | 3781 | cmd->unpacked_lun = scsilun_to_int( |
| @@ -3796,7 +3818,7 @@ out_term: | |||
| 3796 | */ | 3818 | */ |
| 3797 | cmd->cmd_flags |= BIT_2; | 3819 | cmd->cmd_flags |= BIT_2; |
| 3798 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3820 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 3799 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); | 3821 | qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0); |
| 3800 | 3822 | ||
| 3801 | qlt_decr_num_pend_cmds(vha); | 3823 | qlt_decr_num_pend_cmds(vha); |
| 3802 | percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); | 3824 | percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); |
| @@ -3918,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work) | |||
| 3918 | 3940 | ||
| 3919 | out_term: | 3941 | out_term: |
| 3920 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3942 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 3921 | qlt_send_term_exchange(vha, NULL, &op->atio, 1); | 3943 | qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0); |
| 3922 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 3944 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 3923 | kfree(op); | 3945 | kfree(op); |
| 3924 | 3946 | ||
| @@ -3982,7 +4004,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, | |||
| 3982 | 4004 | ||
| 3983 | cmd->cmd_in_wq = 1; | 4005 | cmd->cmd_in_wq = 1; |
| 3984 | cmd->cmd_flags |= BIT_0; | 4006 | cmd->cmd_flags |= BIT_0; |
| 3985 | cmd->se_cmd.cpuid = -1; | 4007 | cmd->se_cmd.cpuid = ha->msix_count ? |
| 4008 | ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND; | ||
| 3986 | 4009 | ||
| 3987 | spin_lock(&vha->cmd_list_lock); | 4010 | spin_lock(&vha->cmd_list_lock); |
| 3988 | list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); | 4011 | list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); |
| @@ -3990,7 +4013,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, | |||
| 3990 | 4013 | ||
| 3991 | INIT_WORK(&cmd->work, qlt_do_work); | 4014 | INIT_WORK(&cmd->work, qlt_do_work); |
| 3992 | if (ha->msix_count) { | 4015 | if (ha->msix_count) { |
| 3993 | cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid; | ||
| 3994 | if (cmd->atio.u.isp24.fcp_cmnd.rddata) | 4016 | if (cmd->atio.u.isp24.fcp_cmnd.rddata) |
| 3995 | queue_work_on(smp_processor_id(), qla_tgt_wq, | 4017 | queue_work_on(smp_processor_id(), qla_tgt_wq, |
| 3996 | &cmd->work); | 4018 | &cmd->work); |
| @@ -4771,7 +4793,7 @@ out_reject: | |||
| 4771 | dump_stack(); | 4793 | dump_stack(); |
| 4772 | } else { | 4794 | } else { |
| 4773 | cmd->cmd_flags |= BIT_9; | 4795 | cmd->cmd_flags |= BIT_9; |
| 4774 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); | 4796 | qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); |
| 4775 | } | 4797 | } |
| 4776 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 4798 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 4777 | } | 4799 | } |
| @@ -4950,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, | |||
| 4950 | sctio, sctio->srr_id); | 4972 | sctio, sctio->srr_id); |
| 4951 | list_del(&sctio->srr_list_entry); | 4973 | list_del(&sctio->srr_list_entry); |
| 4952 | qlt_send_term_exchange(vha, sctio->cmd, | 4974 | qlt_send_term_exchange(vha, sctio->cmd, |
| 4953 | &sctio->cmd->atio, 1); | 4975 | &sctio->cmd->atio, 1, 0); |
| 4954 | kfree(sctio); | 4976 | kfree(sctio); |
| 4955 | } | 4977 | } |
| 4956 | } | 4978 | } |
| @@ -5123,7 +5145,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, | |||
| 5123 | atio->u.isp24.fcp_hdr.s_id); | 5145 | atio->u.isp24.fcp_hdr.s_id); |
| 5124 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 5146 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
| 5125 | if (!sess) { | 5147 | if (!sess) { |
| 5126 | qlt_send_term_exchange(vha, NULL, atio, 1); | 5148 | qlt_send_term_exchange(vha, NULL, atio, 1, 0); |
| 5127 | return 0; | 5149 | return 0; |
| 5128 | } | 5150 | } |
| 5129 | /* Sending marker isn't necessary, since we called from ISR */ | 5151 | /* Sending marker isn't necessary, since we called from ISR */ |
| @@ -5406,7 +5428,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, | |||
| 5406 | #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ | 5428 | #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ |
| 5407 | qlt_send_busy(vha, atio, SAM_STAT_BUSY); | 5429 | qlt_send_busy(vha, atio, SAM_STAT_BUSY); |
| 5408 | #else | 5430 | #else |
| 5409 | qlt_send_term_exchange(vha, NULL, atio, 1); | 5431 | qlt_send_term_exchange(vha, NULL, atio, 1, 0); |
| 5410 | #endif | 5432 | #endif |
| 5411 | 5433 | ||
| 5412 | if (!ha_locked) | 5434 | if (!ha_locked) |
| @@ -5523,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) | |||
| 5523 | #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ | 5545 | #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ |
| 5524 | qlt_send_busy(vha, atio, 0); | 5546 | qlt_send_busy(vha, atio, 0); |
| 5525 | #else | 5547 | #else |
| 5526 | qlt_send_term_exchange(vha, NULL, atio, 1); | 5548 | qlt_send_term_exchange(vha, NULL, atio, 1, 0); |
| 5527 | #endif | 5549 | #endif |
| 5528 | } else { | 5550 | } else { |
| 5529 | if (tgt->tgt_stop) { | 5551 | if (tgt->tgt_stop) { |
| @@ -5532,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) | |||
| 5532 | "command to target, sending TERM " | 5554 | "command to target, sending TERM " |
| 5533 | "EXCHANGE for rsp\n"); | 5555 | "EXCHANGE for rsp\n"); |
| 5534 | qlt_send_term_exchange(vha, NULL, | 5556 | qlt_send_term_exchange(vha, NULL, |
| 5535 | atio, 1); | 5557 | atio, 1, 0); |
| 5536 | } else { | 5558 | } else { |
| 5537 | ql_dbg(ql_dbg_tgt, vha, 0xe060, | 5559 | ql_dbg(ql_dbg_tgt, vha, 0xe060, |
| 5538 | "qla_target(%d): Unable to send " | 5560 | "qla_target(%d): Unable to send " |
| @@ -5960,7 +5982,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, | |||
| 5960 | return; | 5982 | return; |
| 5961 | 5983 | ||
| 5962 | out_term: | 5984 | out_term: |
| 5963 | qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0); | 5985 | qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); |
| 5964 | if (sess) | 5986 | if (sess) |
| 5965 | ha->tgt.tgt_ops->put_sess(sess); | 5987 | ha->tgt.tgt_ops->put_sess(sess); |
| 5966 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 5988 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 71b2865ba3c8..22a6a767fe07 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
| @@ -943,6 +943,36 @@ struct qla_tgt_sess { | |||
| 943 | qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; | 943 | qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; |
| 944 | }; | 944 | }; |
| 945 | 945 | ||
| 946 | typedef enum { | ||
| 947 | /* | ||
| 948 | * BIT_0 - Atio Arrival / schedule to work | ||
| 949 | * BIT_1 - qlt_do_work | ||
| 950 | * BIT_2 - qlt_do work failed | ||
| 951 | * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending | ||
| 952 | * BIT_4 - read respond/tcm_qla2xx_queue_data_in | ||
| 953 | * BIT_5 - status respond / tcm_qla2xx_queue_status | ||
| 954 | * BIT_6 - tcm request to abort/Term exchange. | ||
| 955 | * pre_xmit_response->qlt_send_term_exchange | ||
| 956 | * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) | ||
| 957 | * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) | ||
| 958 | * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) | ||
| 959 | * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data | ||
| 960 | |||
| 961 | * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd | ||
| 962 | * BIT_13 - Bad completion - | ||
| 963 | * qlt_ctio_do_completion --> qlt_term_ctio_exchange | ||
| 964 | * BIT_14 - Back end data received/sent. | ||
| 965 | * BIT_15 - SRR prepare ctio | ||
| 966 | * BIT_16 - complete free | ||
| 967 | * BIT_17 - flush - qlt_abort_cmd_on_host_reset | ||
| 968 | * BIT_18 - completion w/abort status | ||
| 969 | * BIT_19 - completion w/unknown status | ||
| 970 | * BIT_20 - tcm_qla2xxx_free_cmd | ||
| 971 | */ | ||
| 972 | CMD_FLAG_DATA_WORK = BIT_11, | ||
| 973 | CMD_FLAG_DATA_WORK_FREE = BIT_21, | ||
| 974 | } cmd_flags_t; | ||
| 975 | |||
| 946 | struct qla_tgt_cmd { | 976 | struct qla_tgt_cmd { |
| 947 | struct se_cmd se_cmd; | 977 | struct se_cmd se_cmd; |
| 948 | struct qla_tgt_sess *sess; | 978 | struct qla_tgt_sess *sess; |
| @@ -952,6 +982,7 @@ struct qla_tgt_cmd { | |||
| 952 | /* Sense buffer that will be mapped into outgoing status */ | 982 | /* Sense buffer that will be mapped into outgoing status */ |
| 953 | unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; | 983 | unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; |
| 954 | 984 | ||
| 985 | spinlock_t cmd_lock; | ||
| 955 | /* to save extra sess dereferences */ | 986 | /* to save extra sess dereferences */ |
| 956 | unsigned int conf_compl_supported:1; | 987 | unsigned int conf_compl_supported:1; |
| 957 | unsigned int sg_mapped:1; | 988 | unsigned int sg_mapped:1; |
| @@ -986,30 +1017,8 @@ struct qla_tgt_cmd { | |||
| 986 | 1017 | ||
| 987 | uint64_t jiffies_at_alloc; | 1018 | uint64_t jiffies_at_alloc; |
| 988 | uint64_t jiffies_at_free; | 1019 | uint64_t jiffies_at_free; |
| 989 | /* BIT_0 - Atio Arrival / schedule to work | 1020 | |
| 990 | * BIT_1 - qlt_do_work | 1021 | cmd_flags_t cmd_flags; |
| 991 | * BIT_2 - qlt_do work failed | ||
| 992 | * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending | ||
| 993 | * BIT_4 - read respond/tcm_qla2xx_queue_data_in | ||
| 994 | * BIT_5 - status respond / tcm_qla2xx_queue_status | ||
| 995 | * BIT_6 - tcm request to abort/Term exchange. | ||
| 996 | * pre_xmit_response->qlt_send_term_exchange | ||
| 997 | * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) | ||
| 998 | * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) | ||
| 999 | * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) | ||
| 1000 | * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data | ||
| 1001 | * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work | ||
| 1002 | * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd | ||
| 1003 | * BIT_13 - Bad completion - | ||
| 1004 | * qlt_ctio_do_completion --> qlt_term_ctio_exchange | ||
| 1005 | * BIT_14 - Back end data received/sent. | ||
| 1006 | * BIT_15 - SRR prepare ctio | ||
| 1007 | * BIT_16 - complete free | ||
| 1008 | * BIT_17 - flush - qlt_abort_cmd_on_host_reset | ||
| 1009 | * BIT_18 - completion w/abort status | ||
| 1010 | * BIT_19 - completion w/unknown status | ||
| 1011 | */ | ||
| 1012 | uint32_t cmd_flags; | ||
| 1013 | }; | 1022 | }; |
| 1014 | 1023 | ||
| 1015 | struct qla_tgt_sess_work_param { | 1024 | struct qla_tgt_sess_work_param { |
| @@ -1148,7 +1157,7 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p) | |||
| 1148 | extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); | 1157 | extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); |
| 1149 | extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); | 1158 | extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); |
| 1150 | extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); | 1159 | extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); |
| 1151 | extern void qlt_abort_cmd(struct qla_tgt_cmd *); | 1160 | extern int qlt_abort_cmd(struct qla_tgt_cmd *); |
| 1152 | extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); | 1161 | extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); |
| 1153 | extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); | 1162 | extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); |
| 1154 | extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); | 1163 | extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); |
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index ddbe2e7ac14d..c3e622524604 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c | |||
| @@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, | |||
| 395 | if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { | 395 | if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { |
| 396 | for (i = 0; i < vha->hw->max_req_queues; i++) { | 396 | for (i = 0; i < vha->hw->max_req_queues; i++) { |
| 397 | struct req_que *req = vha->hw->req_q_map[i]; | 397 | struct req_que *req = vha->hw->req_q_map[i]; |
| 398 | |||
| 399 | if (!test_bit(i, vha->hw->req_qid_map)) | ||
| 400 | continue; | ||
| 401 | |||
| 398 | if (req || !buf) { | 402 | if (req || !buf) { |
| 399 | length = req ? | 403 | length = req ? |
| 400 | req->length : REQUEST_ENTRY_CNT_24XX; | 404 | req->length : REQUEST_ENTRY_CNT_24XX; |
| @@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, | |||
| 408 | } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { | 412 | } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { |
| 409 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { | 413 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { |
| 410 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; | 414 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; |
| 415 | |||
| 416 | if (!test_bit(i, vha->hw->rsp_qid_map)) | ||
| 417 | continue; | ||
| 418 | |||
| 411 | if (rsp || !buf) { | 419 | if (rsp || !buf) { |
| 412 | length = rsp ? | 420 | length = rsp ? |
| 413 | rsp->length : RESPONSE_ENTRY_CNT_MQ; | 421 | rsp->length : RESPONSE_ENTRY_CNT_MQ; |
| @@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, | |||
| 634 | if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { | 642 | if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { |
| 635 | for (i = 0; i < vha->hw->max_req_queues; i++) { | 643 | for (i = 0; i < vha->hw->max_req_queues; i++) { |
| 636 | struct req_que *req = vha->hw->req_q_map[i]; | 644 | struct req_que *req = vha->hw->req_q_map[i]; |
| 645 | |||
| 646 | if (!test_bit(i, vha->hw->req_qid_map)) | ||
| 647 | continue; | ||
| 648 | |||
| 637 | if (req || !buf) { | 649 | if (req || !buf) { |
| 638 | qla27xx_insert16(i, buf, len); | 650 | qla27xx_insert16(i, buf, len); |
| 639 | qla27xx_insert16(1, buf, len); | 651 | qla27xx_insert16(1, buf, len); |
| @@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, | |||
| 645 | } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { | 657 | } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { |
| 646 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { | 658 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { |
| 647 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; | 659 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; |
| 660 | |||
| 661 | if (!test_bit(i, vha->hw->rsp_qid_map)) | ||
| 662 | continue; | ||
| 663 | |||
| 648 | if (rsp || !buf) { | 664 | if (rsp || !buf) { |
| 649 | qla27xx_insert16(i, buf, len); | 665 | qla27xx_insert16(i, buf, len); |
| 650 | qla27xx_insert16(1, buf, len); | 666 | qla27xx_insert16(1, buf, len); |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index faf0a126627f..1808a01cfb7e 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -298,6 +298,10 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) | |||
| 298 | { | 298 | { |
| 299 | cmd->vha->tgt_counters.core_qla_free_cmd++; | 299 | cmd->vha->tgt_counters.core_qla_free_cmd++; |
| 300 | cmd->cmd_in_wq = 1; | 300 | cmd->cmd_in_wq = 1; |
| 301 | |||
| 302 | BUG_ON(cmd->cmd_flags & BIT_20); | ||
| 303 | cmd->cmd_flags |= BIT_20; | ||
| 304 | |||
| 301 | INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); | 305 | INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); |
| 302 | queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); | 306 | queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); |
| 303 | } | 307 | } |
| @@ -374,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) | |||
| 374 | { | 378 | { |
| 375 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | 379 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
| 376 | struct qla_tgt_cmd, se_cmd); | 380 | struct qla_tgt_cmd, se_cmd); |
| 381 | |||
| 382 | if (cmd->aborted) { | ||
| 383 | /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task | ||
| 384 | * can get ahead of this cmd. tcm_qla2xxx_aborted_task | ||
| 385 | * already kick start the free. | ||
| 386 | */ | ||
| 387 | pr_debug("write_pending aborted cmd[%p] refcount %d " | ||
| 388 | "transport_state %x, t_state %x, se_cmd_flags %x\n", | ||
| 389 | cmd,cmd->se_cmd.cmd_kref.refcount.counter, | ||
| 390 | cmd->se_cmd.transport_state, | ||
| 391 | cmd->se_cmd.t_state, | ||
| 392 | cmd->se_cmd.se_cmd_flags); | ||
| 393 | return 0; | ||
| 394 | } | ||
| 377 | cmd->cmd_flags |= BIT_3; | 395 | cmd->cmd_flags |= BIT_3; |
| 378 | cmd->bufflen = se_cmd->data_length; | 396 | cmd->bufflen = se_cmd->data_length; |
| 379 | cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); | 397 | cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); |
| @@ -405,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) | |||
| 405 | se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { | 423 | se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { |
| 406 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | 424 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
| 407 | wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, | 425 | wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, |
| 408 | 3 * HZ); | 426 | 50); |
| 409 | return 0; | 427 | return 0; |
| 410 | } | 428 | } |
| 411 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | 429 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); |
| @@ -444,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | |||
| 444 | if (bidi) | 462 | if (bidi) |
| 445 | flags |= TARGET_SCF_BIDI_OP; | 463 | flags |= TARGET_SCF_BIDI_OP; |
| 446 | 464 | ||
| 465 | if (se_cmd->cpuid != WORK_CPU_UNBOUND) | ||
| 466 | flags |= TARGET_SCF_USE_CPUID; | ||
| 467 | |||
| 447 | sess = cmd->sess; | 468 | sess = cmd->sess; |
| 448 | if (!sess) { | 469 | if (!sess) { |
| 449 | pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); | 470 | pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); |
| @@ -465,13 +486,25 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | |||
| 465 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | 486 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) |
| 466 | { | 487 | { |
| 467 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | 488 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); |
| 489 | unsigned long flags; | ||
| 468 | 490 | ||
| 469 | /* | 491 | /* |
| 470 | * Ensure that the complete FCP WRITE payload has been received. | 492 | * Ensure that the complete FCP WRITE payload has been received. |
| 471 | * Otherwise return an exception via CHECK_CONDITION status. | 493 | * Otherwise return an exception via CHECK_CONDITION status. |
| 472 | */ | 494 | */ |
| 473 | cmd->cmd_in_wq = 0; | 495 | cmd->cmd_in_wq = 0; |
| 474 | cmd->cmd_flags |= BIT_11; | 496 | |
| 497 | spin_lock_irqsave(&cmd->cmd_lock, flags); | ||
| 498 | cmd->cmd_flags |= CMD_FLAG_DATA_WORK; | ||
| 499 | if (cmd->aborted) { | ||
| 500 | cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; | ||
| 501 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 502 | |||
| 503 | tcm_qla2xxx_free_cmd(cmd); | ||
| 504 | return; | ||
| 505 | } | ||
| 506 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 507 | |||
| 475 | cmd->vha->tgt_counters.qla_core_ret_ctio++; | 508 | cmd->vha->tgt_counters.qla_core_ret_ctio++; |
| 476 | if (!cmd->write_data_transferred) { | 509 | if (!cmd->write_data_transferred) { |
| 477 | /* | 510 | /* |
| @@ -546,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) | |||
| 546 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | 579 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
| 547 | struct qla_tgt_cmd, se_cmd); | 580 | struct qla_tgt_cmd, se_cmd); |
| 548 | 581 | ||
| 582 | if (cmd->aborted) { | ||
| 583 | /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task | ||
| 584 | * can get ahead of this cmd. tcm_qla2xxx_aborted_task | ||
| 585 | * already kick start the free. | ||
| 586 | */ | ||
| 587 | pr_debug("queue_data_in aborted cmd[%p] refcount %d " | ||
| 588 | "transport_state %x, t_state %x, se_cmd_flags %x\n", | ||
| 589 | cmd,cmd->se_cmd.cmd_kref.refcount.counter, | ||
| 590 | cmd->se_cmd.transport_state, | ||
| 591 | cmd->se_cmd.t_state, | ||
| 592 | cmd->se_cmd.se_cmd_flags); | ||
| 593 | return 0; | ||
| 594 | } | ||
| 595 | |||
| 549 | cmd->cmd_flags |= BIT_4; | 596 | cmd->cmd_flags |= BIT_4; |
| 550 | cmd->bufflen = se_cmd->data_length; | 597 | cmd->bufflen = se_cmd->data_length; |
| 551 | cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); | 598 | cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); |
| @@ -637,11 +684,34 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) | |||
| 637 | qlt_xmit_tm_rsp(mcmd); | 684 | qlt_xmit_tm_rsp(mcmd); |
| 638 | } | 685 | } |
| 639 | 686 | ||
| 687 | |||
| 688 | #define DATA_WORK_NOT_FREE(_flags) \ | ||
| 689 | (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \ | ||
| 690 | CMD_FLAG_DATA_WORK) | ||
| 640 | static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) | 691 | static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) |
| 641 | { | 692 | { |
| 642 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | 693 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
| 643 | struct qla_tgt_cmd, se_cmd); | 694 | struct qla_tgt_cmd, se_cmd); |
| 644 | qlt_abort_cmd(cmd); | 695 | unsigned long flags; |
| 696 | |||
| 697 | if (qlt_abort_cmd(cmd)) | ||
| 698 | return; | ||
| 699 | |||
| 700 | spin_lock_irqsave(&cmd->cmd_lock, flags); | ||
| 701 | if ((cmd->state == QLA_TGT_STATE_NEW)|| | ||
| 702 | ((cmd->state == QLA_TGT_STATE_DATA_IN) && | ||
| 703 | DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) { | ||
| 704 | |||
| 705 | cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; | ||
| 706 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 707 | /* Cmd have not reached firmware. | ||
| 708 | * Use this trigger to free it. */ | ||
| 709 | tcm_qla2xxx_free_cmd(cmd); | ||
| 710 | return; | ||
| 711 | } | ||
| 712 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 713 | return; | ||
| 714 | |||
| 645 | } | 715 | } |
| 646 | 716 | ||
| 647 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, | 717 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 47b9d13f97b8..da2e068ee47d 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -205,6 +205,7 @@ static struct { | |||
| 205 | {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, | 205 | {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, |
| 206 | {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, | 206 | {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, |
| 207 | {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, | 207 | {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, |
| 208 | {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES}, | ||
| 208 | {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 209 | {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
| 209 | {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, | 210 | {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, |
| 210 | {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, | 211 | {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bb669d32ccd0..d749da765df1 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -761,7 +761,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) | |||
| 761 | break; | 761 | break; |
| 762 | 762 | ||
| 763 | default: | 763 | default: |
| 764 | ret = BLKPREP_KILL; | 764 | ret = BLKPREP_INVALID; |
| 765 | goto out; | 765 | goto out; |
| 766 | } | 766 | } |
| 767 | 767 | ||
| @@ -839,7 +839,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) | |||
| 839 | int ret; | 839 | int ret; |
| 840 | 840 | ||
| 841 | if (sdkp->device->no_write_same) | 841 | if (sdkp->device->no_write_same) |
| 842 | return BLKPREP_KILL; | 842 | return BLKPREP_INVALID; |
| 843 | 843 | ||
| 844 | BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); | 844 | BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); |
| 845 | 845 | ||
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 55627d097873..292c04eec9ad 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <scsi/scsi_devinfo.h> | 42 | #include <scsi/scsi_devinfo.h> |
| 43 | #include <scsi/scsi_dbg.h> | 43 | #include <scsi/scsi_dbg.h> |
| 44 | #include <scsi/scsi_transport_fc.h> | 44 | #include <scsi/scsi_transport_fc.h> |
| 45 | #include <scsi/scsi_transport.h> | ||
| 45 | 46 | ||
| 46 | /* | 47 | /* |
| 47 | * All wire protocol details (storage protocol between the guest and the host) | 48 | * All wire protocol details (storage protocol between the guest and the host) |
| @@ -477,19 +478,18 @@ struct hv_host_device { | |||
| 477 | struct storvsc_scan_work { | 478 | struct storvsc_scan_work { |
| 478 | struct work_struct work; | 479 | struct work_struct work; |
| 479 | struct Scsi_Host *host; | 480 | struct Scsi_Host *host; |
| 480 | uint lun; | 481 | u8 lun; |
| 482 | u8 tgt_id; | ||
| 481 | }; | 483 | }; |
| 482 | 484 | ||
| 483 | static void storvsc_device_scan(struct work_struct *work) | 485 | static void storvsc_device_scan(struct work_struct *work) |
| 484 | { | 486 | { |
| 485 | struct storvsc_scan_work *wrk; | 487 | struct storvsc_scan_work *wrk; |
| 486 | uint lun; | ||
| 487 | struct scsi_device *sdev; | 488 | struct scsi_device *sdev; |
| 488 | 489 | ||
| 489 | wrk = container_of(work, struct storvsc_scan_work, work); | 490 | wrk = container_of(work, struct storvsc_scan_work, work); |
| 490 | lun = wrk->lun; | ||
| 491 | 491 | ||
| 492 | sdev = scsi_device_lookup(wrk->host, 0, 0, lun); | 492 | sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); |
| 493 | if (!sdev) | 493 | if (!sdev) |
| 494 | goto done; | 494 | goto done; |
| 495 | scsi_rescan_device(&sdev->sdev_gendev); | 495 | scsi_rescan_device(&sdev->sdev_gendev); |
| @@ -540,7 +540,7 @@ static void storvsc_remove_lun(struct work_struct *work) | |||
| 540 | if (!scsi_host_get(wrk->host)) | 540 | if (!scsi_host_get(wrk->host)) |
| 541 | goto done; | 541 | goto done; |
| 542 | 542 | ||
| 543 | sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun); | 543 | sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); |
| 544 | 544 | ||
| 545 | if (sdev) { | 545 | if (sdev) { |
| 546 | scsi_remove_device(sdev); | 546 | scsi_remove_device(sdev); |
| @@ -940,6 +940,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, | |||
| 940 | 940 | ||
| 941 | wrk->host = host; | 941 | wrk->host = host; |
| 942 | wrk->lun = vm_srb->lun; | 942 | wrk->lun = vm_srb->lun; |
| 943 | wrk->tgt_id = vm_srb->target_id; | ||
| 943 | INIT_WORK(&wrk->work, process_err_fn); | 944 | INIT_WORK(&wrk->work, process_err_fn); |
| 944 | schedule_work(&wrk->work); | 945 | schedule_work(&wrk->work); |
| 945 | } | 946 | } |
| @@ -1770,6 +1771,11 @@ static int __init storvsc_drv_init(void) | |||
| 1770 | fc_transport_template = fc_attach_transport(&fc_transport_functions); | 1771 | fc_transport_template = fc_attach_transport(&fc_transport_functions); |
| 1771 | if (!fc_transport_template) | 1772 | if (!fc_transport_template) |
| 1772 | return -ENODEV; | 1773 | return -ENODEV; |
| 1774 | |||
| 1775 | /* | ||
| 1776 | * Install Hyper-V specific timeout handler. | ||
| 1777 | */ | ||
| 1778 | fc_transport_template->eh_timed_out = storvsc_eh_timed_out; | ||
| 1773 | #endif | 1779 | #endif |
| 1774 | 1780 | ||
| 1775 | ret = vmbus_driver_register(&storvsc_drv); | 1781 | ret = vmbus_driver_register(&storvsc_drv); |
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index be822f7a9ce6..aca282d45421 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. | 11 | * GNU General Public License for more details. |
| 12 | */ | 12 | */ |
| 13 | #include <linux/bitmap.h> | ||
| 13 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 14 | #include <linux/err.h> | 15 | #include <linux/err.h> |
| 15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| @@ -47,9 +48,9 @@ | |||
| 47 | #define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1) | 48 | #define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1) |
| 48 | #define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF) | 49 | #define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF) |
| 49 | 50 | ||
| 50 | #define SPMI_MAPPING_TABLE_LEN 255 | ||
| 51 | #define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */ | 51 | #define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */ |
| 52 | #define PPID_TO_CHAN_TABLE_SZ BIT(12) /* PPID is 12bit chan is 1byte*/ | 52 | #define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */ |
| 53 | #define PMIC_ARB_CHAN_VALID BIT(15) | ||
| 53 | 54 | ||
| 54 | /* Ownership Table */ | 55 | /* Ownership Table */ |
| 55 | #define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N))) | 56 | #define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N))) |
| @@ -85,9 +86,7 @@ enum pmic_arb_cmd_op_code { | |||
| 85 | }; | 86 | }; |
| 86 | 87 | ||
| 87 | /* Maximum number of support PMIC peripherals */ | 88 | /* Maximum number of support PMIC peripherals */ |
| 88 | #define PMIC_ARB_MAX_PERIPHS 256 | 89 | #define PMIC_ARB_MAX_PERIPHS 512 |
| 89 | #define PMIC_ARB_MAX_CHNL 128 | ||
| 90 | #define PMIC_ARB_PERIPH_ID_VALID (1 << 15) | ||
| 91 | #define PMIC_ARB_TIMEOUT_US 100 | 90 | #define PMIC_ARB_TIMEOUT_US 100 |
| 92 | #define PMIC_ARB_MAX_TRANS_BYTES (8) | 91 | #define PMIC_ARB_MAX_TRANS_BYTES (8) |
| 93 | 92 | ||
| @@ -125,18 +124,22 @@ struct spmi_pmic_arb_dev { | |||
| 125 | void __iomem *wr_base; | 124 | void __iomem *wr_base; |
| 126 | void __iomem *intr; | 125 | void __iomem *intr; |
| 127 | void __iomem *cnfg; | 126 | void __iomem *cnfg; |
| 127 | void __iomem *core; | ||
| 128 | resource_size_t core_size; | ||
| 128 | raw_spinlock_t lock; | 129 | raw_spinlock_t lock; |
| 129 | u8 channel; | 130 | u8 channel; |
| 130 | int irq; | 131 | int irq; |
| 131 | u8 ee; | 132 | u8 ee; |
| 132 | u8 min_apid; | 133 | u16 min_apid; |
| 133 | u8 max_apid; | 134 | u16 max_apid; |
| 134 | u32 mapping_table[SPMI_MAPPING_TABLE_LEN]; | 135 | u32 *mapping_table; |
| 136 | DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS); | ||
| 135 | struct irq_domain *domain; | 137 | struct irq_domain *domain; |
| 136 | struct spmi_controller *spmic; | 138 | struct spmi_controller *spmic; |
| 137 | u16 apid_to_ppid[256]; | 139 | u16 *apid_to_ppid; |
| 138 | const struct pmic_arb_ver_ops *ver_ops; | 140 | const struct pmic_arb_ver_ops *ver_ops; |
| 139 | u8 *ppid_to_chan; | 141 | u16 *ppid_to_chan; |
| 142 | u16 last_channel; | ||
| 140 | }; | 143 | }; |
| 141 | 144 | ||
| 142 | /** | 145 | /** |
| @@ -158,7 +161,8 @@ struct spmi_pmic_arb_dev { | |||
| 158 | */ | 161 | */ |
| 159 | struct pmic_arb_ver_ops { | 162 | struct pmic_arb_ver_ops { |
| 160 | /* spmi commands (read_cmd, write_cmd, cmd) functionality */ | 163 | /* spmi commands (read_cmd, write_cmd, cmd) functionality */ |
| 161 | u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr); | 164 | int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr, |
| 165 | u32 *offset); | ||
| 162 | u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc); | 166 | u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc); |
| 163 | int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid); | 167 | int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid); |
| 164 | /* Interrupts controller functionality (offset of PIC registers) */ | 168 | /* Interrupts controller functionality (offset of PIC registers) */ |
| @@ -212,7 +216,14 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl, | |||
| 212 | struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl); | 216 | struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl); |
| 213 | u32 status = 0; | 217 | u32 status = 0; |
| 214 | u32 timeout = PMIC_ARB_TIMEOUT_US; | 218 | u32 timeout = PMIC_ARB_TIMEOUT_US; |
| 215 | u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS; | 219 | u32 offset; |
| 220 | int rc; | ||
| 221 | |||
| 222 | rc = dev->ver_ops->offset(dev, sid, addr, &offset); | ||
| 223 | if (rc) | ||
| 224 | return rc; | ||
| 225 | |||
| 226 | offset += PMIC_ARB_STATUS; | ||
| 216 | 227 | ||
| 217 | while (timeout--) { | 228 | while (timeout--) { |
| 218 | status = readl_relaxed(base + offset); | 229 | status = readl_relaxed(base + offset); |
| @@ -257,7 +268,11 @@ pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid) | |||
| 257 | unsigned long flags; | 268 | unsigned long flags; |
| 258 | u32 cmd; | 269 | u32 cmd; |
| 259 | int rc; | 270 | int rc; |
| 260 | u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0); | 271 | u32 offset; |
| 272 | |||
| 273 | rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset); | ||
| 274 | if (rc) | ||
| 275 | return rc; | ||
| 261 | 276 | ||
| 262 | cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20); | 277 | cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20); |
| 263 | 278 | ||
| @@ -297,7 +312,11 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, | |||
| 297 | u8 bc = len - 1; | 312 | u8 bc = len - 1; |
| 298 | u32 cmd; | 313 | u32 cmd; |
| 299 | int rc; | 314 | int rc; |
| 300 | u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr); | 315 | u32 offset; |
| 316 | |||
| 317 | rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset); | ||
| 318 | if (rc) | ||
| 319 | return rc; | ||
| 301 | 320 | ||
| 302 | if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { | 321 | if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { |
| 303 | dev_err(&ctrl->dev, | 322 | dev_err(&ctrl->dev, |
| @@ -344,7 +363,11 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, | |||
| 344 | u8 bc = len - 1; | 363 | u8 bc = len - 1; |
| 345 | u32 cmd; | 364 | u32 cmd; |
| 346 | int rc; | 365 | int rc; |
| 347 | u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr); | 366 | u32 offset; |
| 367 | |||
| 368 | rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset); | ||
| 369 | if (rc) | ||
| 370 | return rc; | ||
| 348 | 371 | ||
| 349 | if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { | 372 | if (bc >= PMIC_ARB_MAX_TRANS_BYTES) { |
| 350 | dev_err(&ctrl->dev, | 373 | dev_err(&ctrl->dev, |
| @@ -614,6 +637,10 @@ static int search_mapping_table(struct spmi_pmic_arb_dev *pa, | |||
| 614 | u32 data; | 637 | u32 data; |
| 615 | 638 | ||
| 616 | for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) { | 639 | for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) { |
| 640 | if (!test_and_set_bit(index, pa->mapping_table_valid)) | ||
| 641 | mapping_table[index] = readl_relaxed(pa->cnfg + | ||
| 642 | SPMI_MAPPING_TABLE_REG(index)); | ||
| 643 | |||
| 617 | data = mapping_table[index]; | 644 | data = mapping_table[index]; |
| 618 | 645 | ||
| 619 | if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) { | 646 | if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) { |
| @@ -701,18 +728,61 @@ static int qpnpint_irq_domain_map(struct irq_domain *d, | |||
| 701 | } | 728 | } |
| 702 | 729 | ||
| 703 | /* v1 offset per ee */ | 730 | /* v1 offset per ee */ |
| 704 | static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr) | 731 | static int |
| 732 | pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset) | ||
| 705 | { | 733 | { |
| 706 | return 0x800 + 0x80 * pa->channel; | 734 | *offset = 0x800 + 0x80 * pa->channel; |
| 735 | return 0; | ||
| 707 | } | 736 | } |
| 708 | 737 | ||
| 738 | static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid) | ||
| 739 | { | ||
| 740 | u32 regval, offset; | ||
| 741 | u16 chan; | ||
| 742 | u16 id; | ||
| 743 | |||
| 744 | /* | ||
| 745 | * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid. | ||
| 746 | * ppid_to_chan is an in-memory invert of that table. | ||
| 747 | */ | ||
| 748 | for (chan = pa->last_channel; ; chan++) { | ||
| 749 | offset = PMIC_ARB_REG_CHNL(chan); | ||
| 750 | if (offset >= pa->core_size) | ||
| 751 | break; | ||
| 752 | |||
| 753 | regval = readl_relaxed(pa->core + offset); | ||
| 754 | if (!regval) | ||
| 755 | continue; | ||
| 756 | |||
| 757 | id = (regval >> 8) & PMIC_ARB_PPID_MASK; | ||
| 758 | pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID; | ||
| 759 | if (id == ppid) { | ||
| 760 | chan |= PMIC_ARB_CHAN_VALID; | ||
| 761 | break; | ||
| 762 | } | ||
| 763 | } | ||
| 764 | pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID; | ||
| 765 | |||
| 766 | return chan; | ||
| 767 | } | ||
| 768 | |||
| 769 | |||
| 709 | /* v2 offset per ppid (chan) and per ee */ | 770 | /* v2 offset per ppid (chan) and per ee */ |
| 710 | static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr) | 771 | static int |
| 772 | pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset) | ||
| 711 | { | 773 | { |
| 712 | u16 ppid = (sid << 8) | (addr >> 8); | 774 | u16 ppid = (sid << 8) | (addr >> 8); |
| 713 | u8 chan = pa->ppid_to_chan[ppid]; | 775 | u16 chan; |
| 714 | 776 | ||
| 715 | return 0x1000 * pa->ee + 0x8000 * chan; | 777 | chan = pa->ppid_to_chan[ppid]; |
| 778 | if (!(chan & PMIC_ARB_CHAN_VALID)) | ||
| 779 | chan = pmic_arb_find_chan(pa, ppid); | ||
| 780 | if (!(chan & PMIC_ARB_CHAN_VALID)) | ||
| 781 | return -ENODEV; | ||
| 782 | chan &= ~PMIC_ARB_CHAN_VALID; | ||
| 783 | |||
| 784 | *offset = 0x1000 * pa->ee + 0x8000 * chan; | ||
| 785 | return 0; | ||
| 716 | } | 786 | } |
| 717 | 787 | ||
| 718 | static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc) | 788 | static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc) |
| @@ -797,7 +867,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
| 797 | struct resource *res; | 867 | struct resource *res; |
| 798 | void __iomem *core; | 868 | void __iomem *core; |
| 799 | u32 channel, ee, hw_ver; | 869 | u32 channel, ee, hw_ver; |
| 800 | int err, i; | 870 | int err; |
| 801 | bool is_v1; | 871 | bool is_v1; |
| 802 | 872 | ||
| 803 | ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa)); | 873 | ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa)); |
| @@ -808,6 +878,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
| 808 | pa->spmic = ctrl; | 878 | pa->spmic = ctrl; |
| 809 | 879 | ||
| 810 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); | 880 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); |
| 881 | pa->core_size = resource_size(res); | ||
| 811 | core = devm_ioremap_resource(&ctrl->dev, res); | 882 | core = devm_ioremap_resource(&ctrl->dev, res); |
| 812 | if (IS_ERR(core)) { | 883 | if (IS_ERR(core)) { |
| 813 | err = PTR_ERR(core); | 884 | err = PTR_ERR(core); |
| @@ -825,10 +896,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
| 825 | pa->wr_base = core; | 896 | pa->wr_base = core; |
| 826 | pa->rd_base = core; | 897 | pa->rd_base = core; |
| 827 | } else { | 898 | } else { |
| 828 | u8 chan; | 899 | pa->core = core; |
| 829 | u16 ppid; | ||
| 830 | u32 regval; | ||
| 831 | |||
| 832 | pa->ver_ops = &pmic_arb_v2; | 900 | pa->ver_ops = &pmic_arb_v2; |
| 833 | 901 | ||
| 834 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 902 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
| @@ -847,24 +915,14 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
| 847 | goto err_put_ctrl; | 915 | goto err_put_ctrl; |
| 848 | } | 916 | } |
| 849 | 917 | ||
| 850 | pa->ppid_to_chan = devm_kzalloc(&ctrl->dev, | 918 | pa->ppid_to_chan = devm_kcalloc(&ctrl->dev, |
| 851 | PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL); | 919 | PMIC_ARB_MAX_PPID, |
| 920 | sizeof(*pa->ppid_to_chan), | ||
| 921 | GFP_KERNEL); | ||
| 852 | if (!pa->ppid_to_chan) { | 922 | if (!pa->ppid_to_chan) { |
| 853 | err = -ENOMEM; | 923 | err = -ENOMEM; |
| 854 | goto err_put_ctrl; | 924 | goto err_put_ctrl; |
| 855 | } | 925 | } |
| 856 | /* | ||
| 857 | * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid. | ||
| 858 | * ppid_to_chan is an in-memory invert of that table. | ||
| 859 | */ | ||
| 860 | for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) { | ||
| 861 | regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan)); | ||
| 862 | if (!regval) | ||
| 863 | continue; | ||
| 864 | |||
| 865 | ppid = (regval >> 8) & 0xFFF; | ||
| 866 | pa->ppid_to_chan[ppid] = chan; | ||
| 867 | } | ||
| 868 | } | 926 | } |
| 869 | 927 | ||
| 870 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr"); | 928 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr"); |
| @@ -915,9 +973,20 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) | |||
| 915 | 973 | ||
| 916 | pa->ee = ee; | 974 | pa->ee = ee; |
| 917 | 975 | ||
| 918 | for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i) | 976 | pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS, |
| 919 | pa->mapping_table[i] = readl_relaxed( | 977 | sizeof(*pa->apid_to_ppid), |
| 920 | pa->cnfg + SPMI_MAPPING_TABLE_REG(i)); | 978 | GFP_KERNEL); |
| 979 | if (!pa->apid_to_ppid) { | ||
| 980 | err = -ENOMEM; | ||
| 981 | goto err_put_ctrl; | ||
| 982 | } | ||
| 983 | |||
| 984 | pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1, | ||
| 985 | sizeof(*pa->mapping_table), GFP_KERNEL); | ||
| 986 | if (!pa->mapping_table) { | ||
| 987 | err = -ENOMEM; | ||
| 988 | goto err_put_ctrl; | ||
| 989 | } | ||
| 921 | 990 | ||
| 922 | /* Initialize max_apid/min_apid to the opposite bounds, during | 991 | /* Initialize max_apid/min_apid to the opposite bounds, during |
| 923 | * the irq domain translation, we are sure to update these */ | 992 | * the irq domain translation, we are sure to update these */ |
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c index 891dfaaf1593..364fdcdd3a06 100644 --- a/drivers/staging/goldfish/goldfish_audio.c +++ b/drivers/staging/goldfish/goldfish_audio.c | |||
| @@ -63,7 +63,7 @@ struct goldfish_audio { | |||
| 63 | #define AUDIO_READ(data, addr) (readl(data->reg_base + addr)) | 63 | #define AUDIO_READ(data, addr) (readl(data->reg_base + addr)) |
| 64 | #define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr)) | 64 | #define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr)) |
| 65 | #define AUDIO_WRITE64(data, addr, addr2, x) \ | 65 | #define AUDIO_WRITE64(data, addr, addr2, x) \ |
| 66 | (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base+addr2)) | 66 | (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base + addr2)) |
| 67 | 67 | ||
| 68 | /* | 68 | /* |
| 69 | * temporary variable used between goldfish_audio_probe() and | 69 | * temporary variable used between goldfish_audio_probe() and |
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c index 5c4f61c006e2..76d60eed1490 100644 --- a/drivers/staging/goldfish/goldfish_nand.c +++ b/drivers/staging/goldfish/goldfish_nand.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
| 28 | #include <linux/goldfish.h> | 28 | #include <linux/goldfish.h> |
| 29 | #include <asm/div64.h> | 29 | #include <asm/div64.h> |
| 30 | #include <linux/dma-mapping.h> | ||
| 30 | 31 | ||
| 31 | #include "goldfish_nand_reg.h" | 32 | #include "goldfish_nand_reg.h" |
| 32 | 33 | ||
| @@ -99,11 +100,11 @@ static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
| 99 | { | 100 | { |
| 100 | loff_t ofs = instr->addr; | 101 | loff_t ofs = instr->addr; |
| 101 | u32 len = instr->len; | 102 | u32 len = instr->len; |
| 102 | u32 rem; | 103 | s32 rem; |
| 103 | 104 | ||
| 104 | if (ofs + len > mtd->size) | 105 | if (ofs + len > mtd->size) |
| 105 | goto invalid_arg; | 106 | goto invalid_arg; |
| 106 | rem = do_div(ofs, mtd->writesize); | 107 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
| 107 | if (rem) | 108 | if (rem) |
| 108 | goto invalid_arg; | 109 | goto invalid_arg; |
| 109 | ofs *= (mtd->writesize + mtd->oobsize); | 110 | ofs *= (mtd->writesize + mtd->oobsize); |
| @@ -132,7 +133,7 @@ invalid_arg: | |||
| 132 | static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, | 133 | static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, |
| 133 | struct mtd_oob_ops *ops) | 134 | struct mtd_oob_ops *ops) |
| 134 | { | 135 | { |
| 135 | u32 rem; | 136 | s32 rem; |
| 136 | 137 | ||
| 137 | if (ofs + ops->len > mtd->size) | 138 | if (ofs + ops->len > mtd->size) |
| 138 | goto invalid_arg; | 139 | goto invalid_arg; |
| @@ -141,7 +142,7 @@ static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, | |||
| 141 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) | 142 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) |
| 142 | goto invalid_arg; | 143 | goto invalid_arg; |
| 143 | 144 | ||
| 144 | rem = do_div(ofs, mtd->writesize); | 145 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
| 145 | if (rem) | 146 | if (rem) |
| 146 | goto invalid_arg; | 147 | goto invalid_arg; |
| 147 | ofs *= (mtd->writesize + mtd->oobsize); | 148 | ofs *= (mtd->writesize + mtd->oobsize); |
| @@ -164,7 +165,7 @@ invalid_arg: | |||
| 164 | static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, | 165 | static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, |
| 165 | struct mtd_oob_ops *ops) | 166 | struct mtd_oob_ops *ops) |
| 166 | { | 167 | { |
| 167 | u32 rem; | 168 | s32 rem; |
| 168 | 169 | ||
| 169 | if (ofs + ops->len > mtd->size) | 170 | if (ofs + ops->len > mtd->size) |
| 170 | goto invalid_arg; | 171 | goto invalid_arg; |
| @@ -173,7 +174,7 @@ static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
| 173 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) | 174 | if (ops->ooblen + ops->ooboffs > mtd->oobsize) |
| 174 | goto invalid_arg; | 175 | goto invalid_arg; |
| 175 | 176 | ||
| 176 | rem = do_div(ofs, mtd->writesize); | 177 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
| 177 | if (rem) | 178 | if (rem) |
| 178 | goto invalid_arg; | 179 | goto invalid_arg; |
| 179 | ofs *= (mtd->writesize + mtd->oobsize); | 180 | ofs *= (mtd->writesize + mtd->oobsize); |
| @@ -196,12 +197,12 @@ invalid_arg: | |||
| 196 | static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len, | 197 | static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len, |
| 197 | size_t *retlen, u_char *buf) | 198 | size_t *retlen, u_char *buf) |
| 198 | { | 199 | { |
| 199 | u32 rem; | 200 | s32 rem; |
| 200 | 201 | ||
| 201 | if (from + len > mtd->size) | 202 | if (from + len > mtd->size) |
| 202 | goto invalid_arg; | 203 | goto invalid_arg; |
| 203 | 204 | ||
| 204 | rem = do_div(from, mtd->writesize); | 205 | from = div_s64_rem(from, mtd->writesize, &rem); |
| 205 | if (rem) | 206 | if (rem) |
| 206 | goto invalid_arg; | 207 | goto invalid_arg; |
| 207 | from *= (mtd->writesize + mtd->oobsize); | 208 | from *= (mtd->writesize + mtd->oobsize); |
| @@ -218,12 +219,12 @@ invalid_arg: | |||
| 218 | static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | 219 | static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 219 | size_t *retlen, const u_char *buf) | 220 | size_t *retlen, const u_char *buf) |
| 220 | { | 221 | { |
| 221 | u32 rem; | 222 | s32 rem; |
| 222 | 223 | ||
| 223 | if (to + len > mtd->size) | 224 | if (to + len > mtd->size) |
| 224 | goto invalid_arg; | 225 | goto invalid_arg; |
| 225 | 226 | ||
| 226 | rem = do_div(to, mtd->writesize); | 227 | to = div_s64_rem(to, mtd->writesize, &rem); |
| 227 | if (rem) | 228 | if (rem) |
| 228 | goto invalid_arg; | 229 | goto invalid_arg; |
| 229 | to *= (mtd->writesize + mtd->oobsize); | 230 | to *= (mtd->writesize + mtd->oobsize); |
| @@ -239,12 +240,12 @@ invalid_arg: | |||
| 239 | 240 | ||
| 240 | static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) | 241 | static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) |
| 241 | { | 242 | { |
| 242 | u32 rem; | 243 | s32 rem; |
| 243 | 244 | ||
| 244 | if (ofs >= mtd->size) | 245 | if (ofs >= mtd->size) |
| 245 | goto invalid_arg; | 246 | goto invalid_arg; |
| 246 | 247 | ||
| 247 | rem = do_div(ofs, mtd->erasesize); | 248 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
| 248 | if (rem) | 249 | if (rem) |
| 249 | goto invalid_arg; | 250 | goto invalid_arg; |
| 250 | ofs *= mtd->erasesize / mtd->writesize; | 251 | ofs *= mtd->erasesize / mtd->writesize; |
| @@ -260,12 +261,12 @@ invalid_arg: | |||
| 260 | 261 | ||
| 261 | static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) | 262 | static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) |
| 262 | { | 263 | { |
| 263 | u32 rem; | 264 | s32 rem; |
| 264 | 265 | ||
| 265 | if (ofs >= mtd->size) | 266 | if (ofs >= mtd->size) |
| 266 | goto invalid_arg; | 267 | goto invalid_arg; |
| 267 | 268 | ||
| 268 | rem = do_div(ofs, mtd->erasesize); | 269 | ofs = div_s64_rem(ofs, mtd->writesize, &rem); |
| 269 | if (rem) | 270 | if (rem) |
| 270 | goto invalid_arg; | 271 | goto invalid_arg; |
| 271 | ofs *= mtd->erasesize / mtd->writesize; | 272 | ofs *= mtd->erasesize / mtd->writesize; |
| @@ -284,17 +285,18 @@ invalid_arg: | |||
| 284 | static int nand_setup_cmd_params(struct platform_device *pdev, | 285 | static int nand_setup_cmd_params(struct platform_device *pdev, |
| 285 | struct goldfish_nand *nand) | 286 | struct goldfish_nand *nand) |
| 286 | { | 287 | { |
| 287 | u64 paddr; | 288 | dma_addr_t dma_handle; |
| 288 | unsigned char __iomem *base = nand->base; | 289 | unsigned char __iomem *base = nand->base; |
| 289 | 290 | ||
| 290 | nand->cmd_params = devm_kzalloc(&pdev->dev, | 291 | nand->cmd_params = dmam_alloc_coherent(&pdev->dev, |
| 291 | sizeof(struct cmd_params), GFP_KERNEL); | 292 | sizeof(struct cmd_params), |
| 292 | if (!nand->cmd_params) | 293 | &dma_handle, GFP_KERNEL); |
| 294 | if (!nand->cmd_params) { | ||
| 295 | dev_err(&pdev->dev, "allocate buffer failed\n"); | ||
| 293 | return -ENOMEM; | 296 | return -ENOMEM; |
| 294 | 297 | } | |
| 295 | paddr = __pa(nand->cmd_params); | 298 | writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH); |
| 296 | writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH); | 299 | writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW); |
| 297 | writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW); | ||
| 298 | return 0; | 300 | return 0; |
| 299 | } | 301 | } |
| 300 | 302 | ||
| @@ -319,7 +321,7 @@ static int goldfish_nand_init_device(struct platform_device *pdev, | |||
| 319 | mtd->oobavail = mtd->oobsize; | 321 | mtd->oobavail = mtd->oobsize; |
| 320 | mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) / | 322 | mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) / |
| 321 | (mtd->writesize + mtd->oobsize) * mtd->writesize; | 323 | (mtd->writesize + mtd->oobsize) * mtd->writesize; |
| 322 | do_div(mtd->size, mtd->writesize + mtd->oobsize); | 324 | mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize); |
| 323 | mtd->size *= mtd->writesize; | 325 | mtd->size *= mtd->writesize; |
| 324 | dev_dbg(&pdev->dev, | 326 | dev_dbg(&pdev->dev, |
| 325 | "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n", | 327 | "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n", |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 3327c49674d3..713c63d9681b 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
| @@ -898,7 +898,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item, | |||
| 898 | da->unmap_zeroes_data = flag; | 898 | da->unmap_zeroes_data = flag; |
| 899 | pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", | 899 | pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", |
| 900 | da->da_dev, flag); | 900 | da->da_dev, flag); |
| 901 | return 0; | 901 | return count; |
| 902 | } | 902 | } |
| 903 | 903 | ||
| 904 | /* | 904 | /* |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index cacd97a8cbd0..da457e25717a 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
| @@ -828,6 +828,50 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) | |||
| 828 | return dev; | 828 | return dev; |
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | /* | ||
| 832 | * Check if the underlying struct block_device request_queue supports | ||
| 833 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | ||
| 834 | * in ATA and we need to set TPE=1 | ||
| 835 | */ | ||
| 836 | bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, | ||
| 837 | struct request_queue *q, int block_size) | ||
| 838 | { | ||
| 839 | if (!blk_queue_discard(q)) | ||
| 840 | return false; | ||
| 841 | |||
| 842 | attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / | ||
| 843 | block_size; | ||
| 844 | /* | ||
| 845 | * Currently hardcoded to 1 in Linux/SCSI code.. | ||
| 846 | */ | ||
| 847 | attrib->max_unmap_block_desc_count = 1; | ||
| 848 | attrib->unmap_granularity = q->limits.discard_granularity / block_size; | ||
| 849 | attrib->unmap_granularity_alignment = q->limits.discard_alignment / | ||
| 850 | block_size; | ||
| 851 | attrib->unmap_zeroes_data = q->limits.discard_zeroes_data; | ||
| 852 | return true; | ||
| 853 | } | ||
| 854 | EXPORT_SYMBOL(target_configure_unmap_from_queue); | ||
| 855 | |||
| 856 | /* | ||
| 857 | * Convert from blocksize advertised to the initiator to the 512 byte | ||
| 858 | * units unconditionally used by the Linux block layer. | ||
| 859 | */ | ||
| 860 | sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) | ||
| 861 | { | ||
| 862 | switch (dev->dev_attrib.block_size) { | ||
| 863 | case 4096: | ||
| 864 | return lb << 3; | ||
| 865 | case 2048: | ||
| 866 | return lb << 2; | ||
| 867 | case 1024: | ||
| 868 | return lb << 1; | ||
| 869 | default: | ||
| 870 | return lb; | ||
| 871 | } | ||
| 872 | } | ||
| 873 | EXPORT_SYMBOL(target_to_linux_sector); | ||
| 874 | |||
| 831 | int target_configure_device(struct se_device *dev) | 875 | int target_configure_device(struct se_device *dev) |
| 832 | { | 876 | { |
| 833 | struct se_hba *hba = dev->se_hba; | 877 | struct se_hba *hba = dev->se_hba; |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index e3195700211a..75f0f08b2a34 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
| @@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev) | |||
| 160 | " block_device blocks: %llu logical_block_size: %d\n", | 160 | " block_device blocks: %llu logical_block_size: %d\n", |
| 161 | dev_size, div_u64(dev_size, fd_dev->fd_block_size), | 161 | dev_size, div_u64(dev_size, fd_dev->fd_block_size), |
| 162 | fd_dev->fd_block_size); | 162 | fd_dev->fd_block_size); |
| 163 | /* | 163 | |
| 164 | * Check if the underlying struct block_device request_queue supports | 164 | if (target_configure_unmap_from_queue(&dev->dev_attrib, q, |
| 165 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | 165 | fd_dev->fd_block_size)) |
| 166 | * in ATA and we need to set TPE=1 | ||
| 167 | */ | ||
| 168 | if (blk_queue_discard(q)) { | ||
| 169 | dev->dev_attrib.max_unmap_lba_count = | ||
| 170 | q->limits.max_discard_sectors; | ||
| 171 | /* | ||
| 172 | * Currently hardcoded to 1 in Linux/SCSI code.. | ||
| 173 | */ | ||
| 174 | dev->dev_attrib.max_unmap_block_desc_count = 1; | ||
| 175 | dev->dev_attrib.unmap_granularity = | ||
| 176 | q->limits.discard_granularity >> 9; | ||
| 177 | dev->dev_attrib.unmap_granularity_alignment = | ||
| 178 | q->limits.discard_alignment; | ||
| 179 | pr_debug("IFILE: BLOCK Discard support available," | 166 | pr_debug("IFILE: BLOCK Discard support available," |
| 180 | " disabled by default\n"); | 167 | " disabled by default\n"); |
| 181 | } | ||
| 182 | /* | 168 | /* |
| 183 | * Enable write same emulation for IBLOCK and use 0xFFFF as | 169 | * Enable write same emulation for IBLOCK and use 0xFFFF as |
| 184 | * the smaller WRITE_SAME(10) only has a two-byte block count. | 170 | * the smaller WRITE_SAME(10) only has a two-byte block count. |
| @@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) | |||
| 490 | if (S_ISBLK(inode->i_mode)) { | 476 | if (S_ISBLK(inode->i_mode)) { |
| 491 | /* The backend is block device, use discard */ | 477 | /* The backend is block device, use discard */ |
| 492 | struct block_device *bdev = inode->i_bdev; | 478 | struct block_device *bdev = inode->i_bdev; |
| 479 | struct se_device *dev = cmd->se_dev; | ||
| 493 | 480 | ||
| 494 | ret = blkdev_issue_discard(bdev, lba, | 481 | ret = blkdev_issue_discard(bdev, |
| 495 | nolb, GFP_KERNEL, 0); | 482 | target_to_linux_sector(dev, lba), |
| 483 | target_to_linux_sector(dev, nolb), | ||
| 484 | GFP_KERNEL, 0); | ||
| 496 | if (ret < 0) { | 485 | if (ret < 0) { |
| 497 | pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", | 486 | pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", |
| 498 | ret); | 487 | ret); |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 5a2899f9f50b..abe4eb997a84 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
| @@ -121,29 +121,11 @@ static int iblock_configure_device(struct se_device *dev) | |||
| 121 | dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); | 121 | dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); |
| 122 | dev->dev_attrib.hw_queue_depth = q->nr_requests; | 122 | dev->dev_attrib.hw_queue_depth = q->nr_requests; |
| 123 | 123 | ||
| 124 | /* | 124 | if (target_configure_unmap_from_queue(&dev->dev_attrib, q, |
| 125 | * Check if the underlying struct block_device request_queue supports | 125 | dev->dev_attrib.hw_block_size)) |
| 126 | * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM | ||
| 127 | * in ATA and we need to set TPE=1 | ||
| 128 | */ | ||
| 129 | if (blk_queue_discard(q)) { | ||
| 130 | dev->dev_attrib.max_unmap_lba_count = | ||
| 131 | q->limits.max_discard_sectors; | ||
| 132 | |||
| 133 | /* | ||
| 134 | * Currently hardcoded to 1 in Linux/SCSI code.. | ||
| 135 | */ | ||
| 136 | dev->dev_attrib.max_unmap_block_desc_count = 1; | ||
| 137 | dev->dev_attrib.unmap_granularity = | ||
| 138 | q->limits.discard_granularity >> 9; | ||
| 139 | dev->dev_attrib.unmap_granularity_alignment = | ||
| 140 | q->limits.discard_alignment; | ||
| 141 | dev->dev_attrib.unmap_zeroes_data = | ||
| 142 | q->limits.discard_zeroes_data; | ||
| 143 | |||
| 144 | pr_debug("IBLOCK: BLOCK Discard support available," | 126 | pr_debug("IBLOCK: BLOCK Discard support available," |
| 145 | " disabled by default\n"); | 127 | " disabled by default\n"); |
| 146 | } | 128 | |
| 147 | /* | 129 | /* |
| 148 | * Enable write same emulation for IBLOCK and use 0xFFFF as | 130 | * Enable write same emulation for IBLOCK and use 0xFFFF as |
| 149 | * the smaller WRITE_SAME(10) only has a two-byte block count. | 131 | * the smaller WRITE_SAME(10) only has a two-byte block count. |
| @@ -415,9 +397,13 @@ static sense_reason_t | |||
| 415 | iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) | 397 | iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) |
| 416 | { | 398 | { |
| 417 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; | 399 | struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; |
| 400 | struct se_device *dev = cmd->se_dev; | ||
| 418 | int ret; | 401 | int ret; |
| 419 | 402 | ||
| 420 | ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); | 403 | ret = blkdev_issue_discard(bdev, |
| 404 | target_to_linux_sector(dev, lba), | ||
| 405 | target_to_linux_sector(dev, nolb), | ||
| 406 | GFP_KERNEL, 0); | ||
| 421 | if (ret < 0) { | 407 | if (ret < 0) { |
| 422 | pr_err("blkdev_issue_discard() failed: %d\n", ret); | 408 | pr_err("blkdev_issue_discard() failed: %d\n", ret); |
| 423 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 409 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
| @@ -433,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd) | |||
| 433 | struct scatterlist *sg; | 419 | struct scatterlist *sg; |
| 434 | struct bio *bio; | 420 | struct bio *bio; |
| 435 | struct bio_list list; | 421 | struct bio_list list; |
| 436 | sector_t block_lba = cmd->t_task_lba; | 422 | struct se_device *dev = cmd->se_dev; |
| 437 | sector_t sectors = sbc_get_write_same_sectors(cmd); | 423 | sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); |
| 424 | sector_t sectors = target_to_linux_sector(dev, | ||
| 425 | sbc_get_write_same_sectors(cmd)); | ||
| 438 | 426 | ||
| 439 | if (cmd->prot_op) { | 427 | if (cmd->prot_op) { |
| 440 | pr_err("WRITE_SAME: Protection information with IBLOCK" | 428 | pr_err("WRITE_SAME: Protection information with IBLOCK" |
| @@ -648,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
| 648 | enum dma_data_direction data_direction) | 636 | enum dma_data_direction data_direction) |
| 649 | { | 637 | { |
| 650 | struct se_device *dev = cmd->se_dev; | 638 | struct se_device *dev = cmd->se_dev; |
| 639 | sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); | ||
| 651 | struct iblock_req *ibr; | 640 | struct iblock_req *ibr; |
| 652 | struct bio *bio, *bio_start; | 641 | struct bio *bio, *bio_start; |
| 653 | struct bio_list list; | 642 | struct bio_list list; |
| 654 | struct scatterlist *sg; | 643 | struct scatterlist *sg; |
| 655 | u32 sg_num = sgl_nents; | 644 | u32 sg_num = sgl_nents; |
| 656 | sector_t block_lba; | ||
| 657 | unsigned bio_cnt; | 645 | unsigned bio_cnt; |
| 658 | int rw = 0; | 646 | int rw = 0; |
| 659 | int i; | 647 | int i; |
| @@ -679,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
| 679 | rw = READ; | 667 | rw = READ; |
| 680 | } | 668 | } |
| 681 | 669 | ||
| 682 | /* | ||
| 683 | * Convert the blocksize advertised to the initiator to the 512 byte | ||
| 684 | * units unconditionally used by the Linux block layer. | ||
| 685 | */ | ||
| 686 | if (dev->dev_attrib.block_size == 4096) | ||
| 687 | block_lba = (cmd->t_task_lba << 3); | ||
| 688 | else if (dev->dev_attrib.block_size == 2048) | ||
| 689 | block_lba = (cmd->t_task_lba << 2); | ||
| 690 | else if (dev->dev_attrib.block_size == 1024) | ||
| 691 | block_lba = (cmd->t_task_lba << 1); | ||
| 692 | else if (dev->dev_attrib.block_size == 512) | ||
| 693 | block_lba = cmd->t_task_lba; | ||
| 694 | else { | ||
| 695 | pr_err("Unsupported SCSI -> BLOCK LBA conversion:" | ||
| 696 | " %u\n", dev->dev_attrib.block_size); | ||
| 697 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | ||
| 698 | } | ||
| 699 | |||
| 700 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); | 670 | ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); |
| 701 | if (!ibr) | 671 | if (!ibr) |
| 702 | goto fail; | 672 | goto fail; |
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index dae0750c2032..db4412fe6b8a 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
| @@ -141,7 +141,6 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int); | |||
| 141 | int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); | 141 | int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); |
| 142 | int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); | 142 | int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); |
| 143 | int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); | 143 | int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); |
| 144 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); | ||
| 145 | void transport_clear_lun_ref(struct se_lun *); | 144 | void transport_clear_lun_ref(struct se_lun *); |
| 146 | void transport_send_task_abort(struct se_cmd *); | 145 | void transport_send_task_abort(struct se_cmd *); |
| 147 | sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); | 146 | sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index fcdcb117c60d..82a663ba9800 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
| @@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr) | |||
| 68 | 68 | ||
| 69 | if (dev) { | 69 | if (dev) { |
| 70 | spin_lock_irqsave(&dev->se_tmr_lock, flags); | 70 | spin_lock_irqsave(&dev->se_tmr_lock, flags); |
| 71 | list_del(&tmr->tmr_list); | 71 | list_del_init(&tmr->tmr_list); |
| 72 | spin_unlock_irqrestore(&dev->se_tmr_lock, flags); | 72 | spin_unlock_irqrestore(&dev->se_tmr_lock, flags); |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | kfree(tmr); | 75 | kfree(tmr); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void core_tmr_handle_tas_abort( | 78 | static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) |
| 79 | struct se_node_acl *tmr_nacl, | ||
| 80 | struct se_cmd *cmd, | ||
| 81 | int tas) | ||
| 82 | { | 79 | { |
| 83 | bool remove = true; | 80 | unsigned long flags; |
| 81 | bool remove = true, send_tas; | ||
| 84 | /* | 82 | /* |
| 85 | * TASK ABORTED status (TAS) bit support | 83 | * TASK ABORTED status (TAS) bit support |
| 86 | */ | 84 | */ |
| 87 | if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { | 85 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
| 86 | send_tas = (cmd->transport_state & CMD_T_TAS); | ||
| 87 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 88 | |||
| 89 | if (send_tas) { | ||
| 88 | remove = false; | 90 | remove = false; |
| 89 | transport_send_task_abort(cmd); | 91 | transport_send_task_abort(cmd); |
| 90 | } | 92 | } |
| @@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list, | |||
| 107 | return 1; | 109 | return 1; |
| 108 | } | 110 | } |
| 109 | 111 | ||
| 112 | static bool __target_check_io_state(struct se_cmd *se_cmd, | ||
| 113 | struct se_session *tmr_sess, int tas) | ||
| 114 | { | ||
| 115 | struct se_session *sess = se_cmd->se_sess; | ||
| 116 | |||
| 117 | assert_spin_locked(&sess->sess_cmd_lock); | ||
| 118 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 119 | /* | ||
| 120 | * If command already reached CMD_T_COMPLETE state within | ||
| 121 | * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, | ||
| 122 | * this se_cmd has been passed to fabric driver and will | ||
| 123 | * not be aborted. | ||
| 124 | * | ||
| 125 | * Otherwise, obtain a local se_cmd->cmd_kref now for TMR | ||
| 126 | * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as | ||
| 127 | * long as se_cmd->cmd_kref is still active unless zero. | ||
| 128 | */ | ||
| 129 | spin_lock(&se_cmd->t_state_lock); | ||
| 130 | if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { | ||
| 131 | pr_debug("Attempted to abort io tag: %llu already complete or" | ||
| 132 | " fabric stop, skipping\n", se_cmd->tag); | ||
| 133 | spin_unlock(&se_cmd->t_state_lock); | ||
| 134 | return false; | ||
| 135 | } | ||
| 136 | if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { | ||
| 137 | pr_debug("Attempted to abort io tag: %llu already shutdown," | ||
| 138 | " skipping\n", se_cmd->tag); | ||
| 139 | spin_unlock(&se_cmd->t_state_lock); | ||
| 140 | return false; | ||
| 141 | } | ||
| 142 | se_cmd->transport_state |= CMD_T_ABORTED; | ||
| 143 | |||
| 144 | if ((tmr_sess != se_cmd->se_sess) && tas) | ||
| 145 | se_cmd->transport_state |= CMD_T_TAS; | ||
| 146 | |||
| 147 | spin_unlock(&se_cmd->t_state_lock); | ||
| 148 | |||
| 149 | return kref_get_unless_zero(&se_cmd->cmd_kref); | ||
| 150 | } | ||
| 151 | |||
| 110 | void core_tmr_abort_task( | 152 | void core_tmr_abort_task( |
| 111 | struct se_device *dev, | 153 | struct se_device *dev, |
| 112 | struct se_tmr_req *tmr, | 154 | struct se_tmr_req *tmr, |
| @@ -130,34 +172,22 @@ void core_tmr_abort_task( | |||
| 130 | if (tmr->ref_task_tag != ref_tag) | 172 | if (tmr->ref_task_tag != ref_tag) |
| 131 | continue; | 173 | continue; |
| 132 | 174 | ||
| 133 | if (!kref_get_unless_zero(&se_cmd->cmd_kref)) | ||
| 134 | continue; | ||
| 135 | |||
| 136 | printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", | 175 | printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", |
| 137 | se_cmd->se_tfo->get_fabric_name(), ref_tag); | 176 | se_cmd->se_tfo->get_fabric_name(), ref_tag); |
| 138 | 177 | ||
| 139 | spin_lock(&se_cmd->t_state_lock); | 178 | if (!__target_check_io_state(se_cmd, se_sess, 0)) { |
| 140 | if (se_cmd->transport_state & CMD_T_COMPLETE) { | ||
| 141 | printk("ABORT_TASK: ref_tag: %llu already complete," | ||
| 142 | " skipping\n", ref_tag); | ||
| 143 | spin_unlock(&se_cmd->t_state_lock); | ||
| 144 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 179 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 145 | |||
| 146 | target_put_sess_cmd(se_cmd); | 180 | target_put_sess_cmd(se_cmd); |
| 147 | |||
| 148 | goto out; | 181 | goto out; |
| 149 | } | 182 | } |
| 150 | se_cmd->transport_state |= CMD_T_ABORTED; | ||
| 151 | spin_unlock(&se_cmd->t_state_lock); | ||
| 152 | |||
| 153 | list_del_init(&se_cmd->se_cmd_list); | 183 | list_del_init(&se_cmd->se_cmd_list); |
| 154 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 184 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 155 | 185 | ||
| 156 | cancel_work_sync(&se_cmd->work); | 186 | cancel_work_sync(&se_cmd->work); |
| 157 | transport_wait_for_tasks(se_cmd); | 187 | transport_wait_for_tasks(se_cmd); |
| 158 | 188 | ||
| 159 | target_put_sess_cmd(se_cmd); | ||
| 160 | transport_cmd_finish_abort(se_cmd, true); | 189 | transport_cmd_finish_abort(se_cmd, true); |
| 190 | target_put_sess_cmd(se_cmd); | ||
| 161 | 191 | ||
| 162 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" | 192 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" |
| 163 | " ref_tag: %llu\n", ref_tag); | 193 | " ref_tag: %llu\n", ref_tag); |
| @@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list( | |||
| 178 | struct list_head *preempt_and_abort_list) | 208 | struct list_head *preempt_and_abort_list) |
| 179 | { | 209 | { |
| 180 | LIST_HEAD(drain_tmr_list); | 210 | LIST_HEAD(drain_tmr_list); |
| 211 | struct se_session *sess; | ||
| 181 | struct se_tmr_req *tmr_p, *tmr_pp; | 212 | struct se_tmr_req *tmr_p, *tmr_pp; |
| 182 | struct se_cmd *cmd; | 213 | struct se_cmd *cmd; |
| 183 | unsigned long flags; | 214 | unsigned long flags; |
| 215 | bool rc; | ||
| 184 | /* | 216 | /* |
| 185 | * Release all pending and outgoing TMRs aside from the received | 217 | * Release all pending and outgoing TMRs aside from the received |
| 186 | * LUN_RESET tmr.. | 218 | * LUN_RESET tmr.. |
| @@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list( | |||
| 206 | if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) | 238 | if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) |
| 207 | continue; | 239 | continue; |
| 208 | 240 | ||
| 241 | sess = cmd->se_sess; | ||
| 242 | if (WARN_ON_ONCE(!sess)) | ||
| 243 | continue; | ||
| 244 | |||
| 245 | spin_lock(&sess->sess_cmd_lock); | ||
| 209 | spin_lock(&cmd->t_state_lock); | 246 | spin_lock(&cmd->t_state_lock); |
| 210 | if (!(cmd->transport_state & CMD_T_ACTIVE)) { | 247 | if (!(cmd->transport_state & CMD_T_ACTIVE) || |
| 248 | (cmd->transport_state & CMD_T_FABRIC_STOP)) { | ||
| 211 | spin_unlock(&cmd->t_state_lock); | 249 | spin_unlock(&cmd->t_state_lock); |
| 250 | spin_unlock(&sess->sess_cmd_lock); | ||
| 212 | continue; | 251 | continue; |
| 213 | } | 252 | } |
| 214 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { | 253 | if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { |
| 215 | spin_unlock(&cmd->t_state_lock); | 254 | spin_unlock(&cmd->t_state_lock); |
| 255 | spin_unlock(&sess->sess_cmd_lock); | ||
| 216 | continue; | 256 | continue; |
| 217 | } | 257 | } |
| 258 | if (sess->sess_tearing_down || cmd->cmd_wait_set) { | ||
| 259 | spin_unlock(&cmd->t_state_lock); | ||
| 260 | spin_unlock(&sess->sess_cmd_lock); | ||
| 261 | continue; | ||
| 262 | } | ||
| 263 | cmd->transport_state |= CMD_T_ABORTED; | ||
| 218 | spin_unlock(&cmd->t_state_lock); | 264 | spin_unlock(&cmd->t_state_lock); |
| 219 | 265 | ||
| 266 | rc = kref_get_unless_zero(&cmd->cmd_kref); | ||
| 267 | if (!rc) { | ||
| 268 | printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n"); | ||
| 269 | spin_unlock(&sess->sess_cmd_lock); | ||
| 270 | continue; | ||
| 271 | } | ||
| 272 | spin_unlock(&sess->sess_cmd_lock); | ||
| 273 | |||
| 220 | list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); | 274 | list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); |
| 221 | } | 275 | } |
| 222 | spin_unlock_irqrestore(&dev->se_tmr_lock, flags); | 276 | spin_unlock_irqrestore(&dev->se_tmr_lock, flags); |
| @@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list( | |||
| 230 | (preempt_and_abort_list) ? "Preempt" : "", tmr_p, | 284 | (preempt_and_abort_list) ? "Preempt" : "", tmr_p, |
| 231 | tmr_p->function, tmr_p->response, cmd->t_state); | 285 | tmr_p->function, tmr_p->response, cmd->t_state); |
| 232 | 286 | ||
| 287 | cancel_work_sync(&cmd->work); | ||
| 288 | transport_wait_for_tasks(cmd); | ||
| 289 | |||
| 233 | transport_cmd_finish_abort(cmd, 1); | 290 | transport_cmd_finish_abort(cmd, 1); |
| 291 | target_put_sess_cmd(cmd); | ||
| 234 | } | 292 | } |
| 235 | } | 293 | } |
| 236 | 294 | ||
| 237 | static void core_tmr_drain_state_list( | 295 | static void core_tmr_drain_state_list( |
| 238 | struct se_device *dev, | 296 | struct se_device *dev, |
| 239 | struct se_cmd *prout_cmd, | 297 | struct se_cmd *prout_cmd, |
| 240 | struct se_node_acl *tmr_nacl, | 298 | struct se_session *tmr_sess, |
| 241 | int tas, | 299 | int tas, |
| 242 | struct list_head *preempt_and_abort_list) | 300 | struct list_head *preempt_and_abort_list) |
| 243 | { | 301 | { |
| 244 | LIST_HEAD(drain_task_list); | 302 | LIST_HEAD(drain_task_list); |
| 303 | struct se_session *sess; | ||
| 245 | struct se_cmd *cmd, *next; | 304 | struct se_cmd *cmd, *next; |
| 246 | unsigned long flags; | 305 | unsigned long flags; |
| 306 | int rc; | ||
| 247 | 307 | ||
| 248 | /* | 308 | /* |
| 249 | * Complete outstanding commands with TASK_ABORTED SAM status. | 309 | * Complete outstanding commands with TASK_ABORTED SAM status. |
| @@ -282,6 +342,16 @@ static void core_tmr_drain_state_list( | |||
| 282 | if (prout_cmd == cmd) | 342 | if (prout_cmd == cmd) |
| 283 | continue; | 343 | continue; |
| 284 | 344 | ||
| 345 | sess = cmd->se_sess; | ||
| 346 | if (WARN_ON_ONCE(!sess)) | ||
| 347 | continue; | ||
| 348 | |||
| 349 | spin_lock(&sess->sess_cmd_lock); | ||
| 350 | rc = __target_check_io_state(cmd, tmr_sess, tas); | ||
| 351 | spin_unlock(&sess->sess_cmd_lock); | ||
| 352 | if (!rc) | ||
| 353 | continue; | ||
| 354 | |||
| 285 | list_move_tail(&cmd->state_list, &drain_task_list); | 355 | list_move_tail(&cmd->state_list, &drain_task_list); |
| 286 | cmd->state_active = false; | 356 | cmd->state_active = false; |
| 287 | } | 357 | } |
| @@ -289,7 +359,7 @@ static void core_tmr_drain_state_list( | |||
| 289 | 359 | ||
| 290 | while (!list_empty(&drain_task_list)) { | 360 | while (!list_empty(&drain_task_list)) { |
| 291 | cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); | 361 | cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); |
| 292 | list_del(&cmd->state_list); | 362 | list_del_init(&cmd->state_list); |
| 293 | 363 | ||
| 294 | pr_debug("LUN_RESET: %s cmd: %p" | 364 | pr_debug("LUN_RESET: %s cmd: %p" |
| 295 | " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" | 365 | " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" |
| @@ -313,16 +383,11 @@ static void core_tmr_drain_state_list( | |||
| 313 | * loop above, but we do it down here given that | 383 | * loop above, but we do it down here given that |
| 314 | * cancel_work_sync may block. | 384 | * cancel_work_sync may block. |
| 315 | */ | 385 | */ |
| 316 | if (cmd->t_state == TRANSPORT_COMPLETE) | 386 | cancel_work_sync(&cmd->work); |
| 317 | cancel_work_sync(&cmd->work); | 387 | transport_wait_for_tasks(cmd); |
| 318 | |||
| 319 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
| 320 | target_stop_cmd(cmd, &flags); | ||
| 321 | |||
| 322 | cmd->transport_state |= CMD_T_ABORTED; | ||
| 323 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 324 | 388 | ||
| 325 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); | 389 | core_tmr_handle_tas_abort(cmd, tas); |
| 390 | target_put_sess_cmd(cmd); | ||
| 326 | } | 391 | } |
| 327 | } | 392 | } |
| 328 | 393 | ||
| @@ -334,6 +399,7 @@ int core_tmr_lun_reset( | |||
| 334 | { | 399 | { |
| 335 | struct se_node_acl *tmr_nacl = NULL; | 400 | struct se_node_acl *tmr_nacl = NULL; |
| 336 | struct se_portal_group *tmr_tpg = NULL; | 401 | struct se_portal_group *tmr_tpg = NULL; |
| 402 | struct se_session *tmr_sess = NULL; | ||
| 337 | int tas; | 403 | int tas; |
| 338 | /* | 404 | /* |
| 339 | * TASK_ABORTED status bit, this is configurable via ConfigFS | 405 | * TASK_ABORTED status bit, this is configurable via ConfigFS |
| @@ -352,8 +418,9 @@ int core_tmr_lun_reset( | |||
| 352 | * or struct se_device passthrough.. | 418 | * or struct se_device passthrough.. |
| 353 | */ | 419 | */ |
| 354 | if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { | 420 | if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { |
| 355 | tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; | 421 | tmr_sess = tmr->task_cmd->se_sess; |
| 356 | tmr_tpg = tmr->task_cmd->se_sess->se_tpg; | 422 | tmr_nacl = tmr_sess->se_node_acl; |
| 423 | tmr_tpg = tmr_sess->se_tpg; | ||
| 357 | if (tmr_nacl && tmr_tpg) { | 424 | if (tmr_nacl && tmr_tpg) { |
| 358 | pr_debug("LUN_RESET: TMR caller fabric: %s" | 425 | pr_debug("LUN_RESET: TMR caller fabric: %s" |
| 359 | " initiator port %s\n", | 426 | " initiator port %s\n", |
| @@ -366,7 +433,7 @@ int core_tmr_lun_reset( | |||
| 366 | dev->transport->name, tas); | 433 | dev->transport->name, tas); |
| 367 | 434 | ||
| 368 | core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); | 435 | core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); |
| 369 | core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, | 436 | core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, |
| 370 | preempt_and_abort_list); | 437 | preempt_and_abort_list); |
| 371 | 438 | ||
| 372 | /* | 439 | /* |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9f3608e10f25..867bc6d0a68a 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -534,9 +534,6 @@ void transport_deregister_session(struct se_session *se_sess) | |||
| 534 | } | 534 | } |
| 535 | EXPORT_SYMBOL(transport_deregister_session); | 535 | EXPORT_SYMBOL(transport_deregister_session); |
| 536 | 536 | ||
| 537 | /* | ||
| 538 | * Called with cmd->t_state_lock held. | ||
| 539 | */ | ||
| 540 | static void target_remove_from_state_list(struct se_cmd *cmd) | 537 | static void target_remove_from_state_list(struct se_cmd *cmd) |
| 541 | { | 538 | { |
| 542 | struct se_device *dev = cmd->se_dev; | 539 | struct se_device *dev = cmd->se_dev; |
| @@ -561,10 +558,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, | |||
| 561 | { | 558 | { |
| 562 | unsigned long flags; | 559 | unsigned long flags; |
| 563 | 560 | ||
| 564 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
| 565 | if (write_pending) | ||
| 566 | cmd->t_state = TRANSPORT_WRITE_PENDING; | ||
| 567 | |||
| 568 | if (remove_from_lists) { | 561 | if (remove_from_lists) { |
| 569 | target_remove_from_state_list(cmd); | 562 | target_remove_from_state_list(cmd); |
| 570 | 563 | ||
| @@ -574,6 +567,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, | |||
| 574 | cmd->se_lun = NULL; | 567 | cmd->se_lun = NULL; |
| 575 | } | 568 | } |
| 576 | 569 | ||
| 570 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
| 571 | if (write_pending) | ||
| 572 | cmd->t_state = TRANSPORT_WRITE_PENDING; | ||
| 573 | |||
| 577 | /* | 574 | /* |
| 578 | * Determine if frontend context caller is requesting the stopping of | 575 | * Determine if frontend context caller is requesting the stopping of |
| 579 | * this command for frontend exceptions. | 576 | * this command for frontend exceptions. |
| @@ -627,6 +624,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) | |||
| 627 | 624 | ||
| 628 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 625 | void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) |
| 629 | { | 626 | { |
| 627 | bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); | ||
| 628 | |||
| 630 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) | 629 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) |
| 631 | transport_lun_remove_cmd(cmd); | 630 | transport_lun_remove_cmd(cmd); |
| 632 | /* | 631 | /* |
| @@ -638,7 +637,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | |||
| 638 | 637 | ||
| 639 | if (transport_cmd_check_stop_to_fabric(cmd)) | 638 | if (transport_cmd_check_stop_to_fabric(cmd)) |
| 640 | return; | 639 | return; |
| 641 | if (remove) | 640 | if (remove && ack_kref) |
| 642 | transport_put_cmd(cmd); | 641 | transport_put_cmd(cmd); |
| 643 | } | 642 | } |
| 644 | 643 | ||
| @@ -694,19 +693,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) | |||
| 694 | } | 693 | } |
| 695 | 694 | ||
| 696 | /* | 695 | /* |
| 697 | * See if we are waiting to complete for an exception condition. | ||
| 698 | */ | ||
| 699 | if (cmd->transport_state & CMD_T_REQUEST_STOP) { | ||
| 700 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 701 | complete(&cmd->task_stop_comp); | ||
| 702 | return; | ||
| 703 | } | ||
| 704 | |||
| 705 | /* | ||
| 706 | * Check for case where an explicit ABORT_TASK has been received | 696 | * Check for case where an explicit ABORT_TASK has been received |
| 707 | * and transport_wait_for_tasks() will be waiting for completion.. | 697 | * and transport_wait_for_tasks() will be waiting for completion.. |
| 708 | */ | 698 | */ |
| 709 | if (cmd->transport_state & CMD_T_ABORTED && | 699 | if (cmd->transport_state & CMD_T_ABORTED || |
| 710 | cmd->transport_state & CMD_T_STOP) { | 700 | cmd->transport_state & CMD_T_STOP) { |
| 711 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 701 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
| 712 | complete_all(&cmd->t_transport_stop_comp); | 702 | complete_all(&cmd->t_transport_stop_comp); |
| @@ -721,10 +711,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) | |||
| 721 | cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); | 711 | cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); |
| 722 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 712 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
| 723 | 713 | ||
| 724 | if (cmd->cpuid == -1) | 714 | if (cmd->se_cmd_flags & SCF_USE_CPUID) |
| 725 | queue_work(target_completion_wq, &cmd->work); | ||
| 726 | else | ||
| 727 | queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); | 715 | queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); |
| 716 | else | ||
| 717 | queue_work(target_completion_wq, &cmd->work); | ||
| 728 | } | 718 | } |
| 729 | EXPORT_SYMBOL(target_complete_cmd); | 719 | EXPORT_SYMBOL(target_complete_cmd); |
| 730 | 720 | ||
| @@ -1203,7 +1193,6 @@ void transport_init_se_cmd( | |||
| 1203 | INIT_LIST_HEAD(&cmd->state_list); | 1193 | INIT_LIST_HEAD(&cmd->state_list); |
| 1204 | init_completion(&cmd->t_transport_stop_comp); | 1194 | init_completion(&cmd->t_transport_stop_comp); |
| 1205 | init_completion(&cmd->cmd_wait_comp); | 1195 | init_completion(&cmd->cmd_wait_comp); |
| 1206 | init_completion(&cmd->task_stop_comp); | ||
| 1207 | spin_lock_init(&cmd->t_state_lock); | 1196 | spin_lock_init(&cmd->t_state_lock); |
| 1208 | kref_init(&cmd->cmd_kref); | 1197 | kref_init(&cmd->cmd_kref); |
| 1209 | cmd->transport_state = CMD_T_DEV_ACTIVE; | 1198 | cmd->transport_state = CMD_T_DEV_ACTIVE; |
| @@ -1437,6 +1426,12 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess | |||
| 1437 | */ | 1426 | */ |
| 1438 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, | 1427 | transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, |
| 1439 | data_length, data_dir, task_attr, sense); | 1428 | data_length, data_dir, task_attr, sense); |
| 1429 | |||
| 1430 | if (flags & TARGET_SCF_USE_CPUID) | ||
| 1431 | se_cmd->se_cmd_flags |= SCF_USE_CPUID; | ||
| 1432 | else | ||
| 1433 | se_cmd->cpuid = WORK_CPU_UNBOUND; | ||
| 1434 | |||
| 1440 | if (flags & TARGET_SCF_UNKNOWN_SIZE) | 1435 | if (flags & TARGET_SCF_UNKNOWN_SIZE) |
| 1441 | se_cmd->unknown_data_length = 1; | 1436 | se_cmd->unknown_data_length = 1; |
| 1442 | /* | 1437 | /* |
| @@ -1635,33 +1630,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, | |||
| 1635 | EXPORT_SYMBOL(target_submit_tmr); | 1630 | EXPORT_SYMBOL(target_submit_tmr); |
| 1636 | 1631 | ||
| 1637 | /* | 1632 | /* |
| 1638 | * If the cmd is active, request it to be stopped and sleep until it | ||
| 1639 | * has completed. | ||
| 1640 | */ | ||
| 1641 | bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) | ||
| 1642 | __releases(&cmd->t_state_lock) | ||
| 1643 | __acquires(&cmd->t_state_lock) | ||
| 1644 | { | ||
| 1645 | bool was_active = false; | ||
| 1646 | |||
| 1647 | if (cmd->transport_state & CMD_T_BUSY) { | ||
| 1648 | cmd->transport_state |= CMD_T_REQUEST_STOP; | ||
| 1649 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); | ||
| 1650 | |||
| 1651 | pr_debug("cmd %p waiting to complete\n", cmd); | ||
| 1652 | wait_for_completion(&cmd->task_stop_comp); | ||
| 1653 | pr_debug("cmd %p stopped successfully\n", cmd); | ||
| 1654 | |||
| 1655 | spin_lock_irqsave(&cmd->t_state_lock, *flags); | ||
| 1656 | cmd->transport_state &= ~CMD_T_REQUEST_STOP; | ||
| 1657 | cmd->transport_state &= ~CMD_T_BUSY; | ||
| 1658 | was_active = true; | ||
| 1659 | } | ||
| 1660 | |||
| 1661 | return was_active; | ||
| 1662 | } | ||
| 1663 | |||
| 1664 | /* | ||
| 1665 | * Handle SAM-esque emulation for generic transport request failures. | 1633 | * Handle SAM-esque emulation for generic transport request failures. |
| 1666 | */ | 1634 | */ |
| 1667 | void transport_generic_request_failure(struct se_cmd *cmd, | 1635 | void transport_generic_request_failure(struct se_cmd *cmd, |
| @@ -1859,19 +1827,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd) | |||
| 1859 | return true; | 1827 | return true; |
| 1860 | } | 1828 | } |
| 1861 | 1829 | ||
| 1830 | static int __transport_check_aborted_status(struct se_cmd *, int); | ||
| 1831 | |||
| 1862 | void target_execute_cmd(struct se_cmd *cmd) | 1832 | void target_execute_cmd(struct se_cmd *cmd) |
| 1863 | { | 1833 | { |
| 1864 | /* | 1834 | /* |
| 1865 | * If the received CDB has aleady been aborted stop processing it here. | ||
| 1866 | */ | ||
| 1867 | if (transport_check_aborted_status(cmd, 1)) | ||
| 1868 | return; | ||
| 1869 | |||
| 1870 | /* | ||
| 1871 | * Determine if frontend context caller is requesting the stopping of | 1835 | * Determine if frontend context caller is requesting the stopping of |
| 1872 | * this command for frontend exceptions. | 1836 | * this command for frontend exceptions. |
| 1837 | * | ||
| 1838 | * If the received CDB has aleady been aborted stop processing it here. | ||
| 1873 | */ | 1839 | */ |
| 1874 | spin_lock_irq(&cmd->t_state_lock); | 1840 | spin_lock_irq(&cmd->t_state_lock); |
| 1841 | if (__transport_check_aborted_status(cmd, 1)) { | ||
| 1842 | spin_unlock_irq(&cmd->t_state_lock); | ||
| 1843 | return; | ||
| 1844 | } | ||
| 1875 | if (cmd->transport_state & CMD_T_STOP) { | 1845 | if (cmd->transport_state & CMD_T_STOP) { |
| 1876 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", | 1846 | pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", |
| 1877 | __func__, __LINE__, cmd->tag); | 1847 | __func__, __LINE__, cmd->tag); |
| @@ -2222,20 +2192,14 @@ static inline void transport_free_pages(struct se_cmd *cmd) | |||
| 2222 | } | 2192 | } |
| 2223 | 2193 | ||
| 2224 | /** | 2194 | /** |
| 2225 | * transport_release_cmd - free a command | 2195 | * transport_put_cmd - release a reference to a command |
| 2226 | * @cmd: command to free | 2196 | * @cmd: command to release |
| 2227 | * | 2197 | * |
| 2228 | * This routine unconditionally frees a command, and reference counting | 2198 | * This routine releases our reference to the command and frees it if possible. |
| 2229 | * or list removal must be done in the caller. | ||
| 2230 | */ | 2199 | */ |
| 2231 | static int transport_release_cmd(struct se_cmd *cmd) | 2200 | static int transport_put_cmd(struct se_cmd *cmd) |
| 2232 | { | 2201 | { |
| 2233 | BUG_ON(!cmd->se_tfo); | 2202 | BUG_ON(!cmd->se_tfo); |
| 2234 | |||
| 2235 | if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) | ||
| 2236 | core_tmr_release_req(cmd->se_tmr_req); | ||
| 2237 | if (cmd->t_task_cdb != cmd->__t_task_cdb) | ||
| 2238 | kfree(cmd->t_task_cdb); | ||
| 2239 | /* | 2203 | /* |
| 2240 | * If this cmd has been setup with target_get_sess_cmd(), drop | 2204 | * If this cmd has been setup with target_get_sess_cmd(), drop |
| 2241 | * the kref and call ->release_cmd() in kref callback. | 2205 | * the kref and call ->release_cmd() in kref callback. |
| @@ -2243,18 +2207,6 @@ static int transport_release_cmd(struct se_cmd *cmd) | |||
| 2243 | return target_put_sess_cmd(cmd); | 2207 | return target_put_sess_cmd(cmd); |
| 2244 | } | 2208 | } |
| 2245 | 2209 | ||
| 2246 | /** | ||
| 2247 | * transport_put_cmd - release a reference to a command | ||
| 2248 | * @cmd: command to release | ||
| 2249 | * | ||
| 2250 | * This routine releases our reference to the command and frees it if possible. | ||
| 2251 | */ | ||
| 2252 | static int transport_put_cmd(struct se_cmd *cmd) | ||
| 2253 | { | ||
| 2254 | transport_free_pages(cmd); | ||
| 2255 | return transport_release_cmd(cmd); | ||
| 2256 | } | ||
| 2257 | |||
| 2258 | void *transport_kmap_data_sg(struct se_cmd *cmd) | 2210 | void *transport_kmap_data_sg(struct se_cmd *cmd) |
| 2259 | { | 2211 | { |
| 2260 | struct scatterlist *sg = cmd->t_data_sg; | 2212 | struct scatterlist *sg = cmd->t_data_sg; |
| @@ -2450,34 +2402,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd) | |||
| 2450 | } | 2402 | } |
| 2451 | } | 2403 | } |
| 2452 | 2404 | ||
| 2453 | int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | 2405 | static bool |
| 2406 | __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, | ||
| 2407 | unsigned long *flags); | ||
| 2408 | |||
| 2409 | static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) | ||
| 2454 | { | 2410 | { |
| 2455 | unsigned long flags; | 2411 | unsigned long flags; |
| 2412 | |||
| 2413 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
| 2414 | __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); | ||
| 2415 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 2416 | } | ||
| 2417 | |||
| 2418 | int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | ||
| 2419 | { | ||
| 2456 | int ret = 0; | 2420 | int ret = 0; |
| 2421 | bool aborted = false, tas = false; | ||
| 2457 | 2422 | ||
| 2458 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { | 2423 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { |
| 2459 | if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) | 2424 | if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) |
| 2460 | transport_wait_for_tasks(cmd); | 2425 | target_wait_free_cmd(cmd, &aborted, &tas); |
| 2461 | 2426 | ||
| 2462 | ret = transport_release_cmd(cmd); | 2427 | if (!aborted || tas) |
| 2428 | ret = transport_put_cmd(cmd); | ||
| 2463 | } else { | 2429 | } else { |
| 2464 | if (wait_for_tasks) | 2430 | if (wait_for_tasks) |
| 2465 | transport_wait_for_tasks(cmd); | 2431 | target_wait_free_cmd(cmd, &aborted, &tas); |
| 2466 | /* | 2432 | /* |
| 2467 | * Handle WRITE failure case where transport_generic_new_cmd() | 2433 | * Handle WRITE failure case where transport_generic_new_cmd() |
| 2468 | * has already added se_cmd to state_list, but fabric has | 2434 | * has already added se_cmd to state_list, but fabric has |
| 2469 | * failed command before I/O submission. | 2435 | * failed command before I/O submission. |
| 2470 | */ | 2436 | */ |
| 2471 | if (cmd->state_active) { | 2437 | if (cmd->state_active) |
| 2472 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
| 2473 | target_remove_from_state_list(cmd); | 2438 | target_remove_from_state_list(cmd); |
| 2474 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 2475 | } | ||
| 2476 | 2439 | ||
| 2477 | if (cmd->se_lun) | 2440 | if (cmd->se_lun) |
| 2478 | transport_lun_remove_cmd(cmd); | 2441 | transport_lun_remove_cmd(cmd); |
| 2479 | 2442 | ||
| 2480 | ret = transport_put_cmd(cmd); | 2443 | if (!aborted || tas) |
| 2444 | ret = transport_put_cmd(cmd); | ||
| 2445 | } | ||
| 2446 | /* | ||
| 2447 | * If the task has been internally aborted due to TMR ABORT_TASK | ||
| 2448 | * or LUN_RESET, target_core_tmr.c is responsible for performing | ||
| 2449 | * the remaining calls to target_put_sess_cmd(), and not the | ||
| 2450 | * callers of this function. | ||
| 2451 | */ | ||
| 2452 | if (aborted) { | ||
| 2453 | pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); | ||
| 2454 | wait_for_completion(&cmd->cmd_wait_comp); | ||
| 2455 | cmd->se_tfo->release_cmd(cmd); | ||
| 2456 | ret = 1; | ||
| 2481 | } | 2457 | } |
| 2482 | return ret; | 2458 | return ret; |
| 2483 | } | 2459 | } |
| @@ -2517,26 +2493,46 @@ out: | |||
| 2517 | } | 2493 | } |
| 2518 | EXPORT_SYMBOL(target_get_sess_cmd); | 2494 | EXPORT_SYMBOL(target_get_sess_cmd); |
| 2519 | 2495 | ||
| 2496 | static void target_free_cmd_mem(struct se_cmd *cmd) | ||
| 2497 | { | ||
| 2498 | transport_free_pages(cmd); | ||
| 2499 | |||
| 2500 | if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) | ||
| 2501 | core_tmr_release_req(cmd->se_tmr_req); | ||
| 2502 | if (cmd->t_task_cdb != cmd->__t_task_cdb) | ||
| 2503 | kfree(cmd->t_task_cdb); | ||
| 2504 | } | ||
| 2505 | |||
| 2520 | static void target_release_cmd_kref(struct kref *kref) | 2506 | static void target_release_cmd_kref(struct kref *kref) |
| 2521 | { | 2507 | { |
| 2522 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); | 2508 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); |
| 2523 | struct se_session *se_sess = se_cmd->se_sess; | 2509 | struct se_session *se_sess = se_cmd->se_sess; |
| 2524 | unsigned long flags; | 2510 | unsigned long flags; |
| 2511 | bool fabric_stop; | ||
| 2525 | 2512 | ||
| 2526 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2513 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
| 2527 | if (list_empty(&se_cmd->se_cmd_list)) { | 2514 | if (list_empty(&se_cmd->se_cmd_list)) { |
| 2528 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2515 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 2516 | target_free_cmd_mem(se_cmd); | ||
| 2529 | se_cmd->se_tfo->release_cmd(se_cmd); | 2517 | se_cmd->se_tfo->release_cmd(se_cmd); |
| 2530 | return; | 2518 | return; |
| 2531 | } | 2519 | } |
| 2532 | if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { | 2520 | |
| 2521 | spin_lock(&se_cmd->t_state_lock); | ||
| 2522 | fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); | ||
| 2523 | spin_unlock(&se_cmd->t_state_lock); | ||
| 2524 | |||
| 2525 | if (se_cmd->cmd_wait_set || fabric_stop) { | ||
| 2526 | list_del_init(&se_cmd->se_cmd_list); | ||
| 2533 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2527 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 2528 | target_free_cmd_mem(se_cmd); | ||
| 2534 | complete(&se_cmd->cmd_wait_comp); | 2529 | complete(&se_cmd->cmd_wait_comp); |
| 2535 | return; | 2530 | return; |
| 2536 | } | 2531 | } |
| 2537 | list_del(&se_cmd->se_cmd_list); | 2532 | list_del_init(&se_cmd->se_cmd_list); |
| 2538 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2533 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 2539 | 2534 | ||
| 2535 | target_free_cmd_mem(se_cmd); | ||
| 2540 | se_cmd->se_tfo->release_cmd(se_cmd); | 2536 | se_cmd->se_tfo->release_cmd(se_cmd); |
| 2541 | } | 2537 | } |
| 2542 | 2538 | ||
| @@ -2548,6 +2544,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) | |||
| 2548 | struct se_session *se_sess = se_cmd->se_sess; | 2544 | struct se_session *se_sess = se_cmd->se_sess; |
| 2549 | 2545 | ||
| 2550 | if (!se_sess) { | 2546 | if (!se_sess) { |
| 2547 | target_free_cmd_mem(se_cmd); | ||
| 2551 | se_cmd->se_tfo->release_cmd(se_cmd); | 2548 | se_cmd->se_tfo->release_cmd(se_cmd); |
| 2552 | return 1; | 2549 | return 1; |
| 2553 | } | 2550 | } |
| @@ -2564,6 +2561,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) | |||
| 2564 | { | 2561 | { |
| 2565 | struct se_cmd *se_cmd; | 2562 | struct se_cmd *se_cmd; |
| 2566 | unsigned long flags; | 2563 | unsigned long flags; |
| 2564 | int rc; | ||
| 2567 | 2565 | ||
| 2568 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2566 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
| 2569 | if (se_sess->sess_tearing_down) { | 2567 | if (se_sess->sess_tearing_down) { |
| @@ -2573,8 +2571,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) | |||
| 2573 | se_sess->sess_tearing_down = 1; | 2571 | se_sess->sess_tearing_down = 1; |
| 2574 | list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); | 2572 | list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); |
| 2575 | 2573 | ||
| 2576 | list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) | 2574 | list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { |
| 2577 | se_cmd->cmd_wait_set = 1; | 2575 | rc = kref_get_unless_zero(&se_cmd->cmd_kref); |
| 2576 | if (rc) { | ||
| 2577 | se_cmd->cmd_wait_set = 1; | ||
| 2578 | spin_lock(&se_cmd->t_state_lock); | ||
| 2579 | se_cmd->transport_state |= CMD_T_FABRIC_STOP; | ||
| 2580 | spin_unlock(&se_cmd->t_state_lock); | ||
| 2581 | } | ||
| 2582 | } | ||
| 2578 | 2583 | ||
| 2579 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2584 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
| 2580 | } | 2585 | } |
| @@ -2587,15 +2592,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) | |||
| 2587 | { | 2592 | { |
| 2588 | struct se_cmd *se_cmd, *tmp_cmd; | 2593 | struct se_cmd *se_cmd, *tmp_cmd; |
| 2589 | unsigned long flags; | 2594 | unsigned long flags; |
| 2595 | bool tas; | ||
| 2590 | 2596 | ||
| 2591 | list_for_each_entry_safe(se_cmd, tmp_cmd, | 2597 | list_for_each_entry_safe(se_cmd, tmp_cmd, |
| 2592 | &se_sess->sess_wait_list, se_cmd_list) { | 2598 | &se_sess->sess_wait_list, se_cmd_list) { |
| 2593 | list_del(&se_cmd->se_cmd_list); | 2599 | list_del_init(&se_cmd->se_cmd_list); |
| 2594 | 2600 | ||
| 2595 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" | 2601 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" |
| 2596 | " %d\n", se_cmd, se_cmd->t_state, | 2602 | " %d\n", se_cmd, se_cmd->t_state, |
| 2597 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | 2603 | se_cmd->se_tfo->get_cmd_state(se_cmd)); |
| 2598 | 2604 | ||
| 2605 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); | ||
| 2606 | tas = (se_cmd->transport_state & CMD_T_TAS); | ||
| 2607 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | ||
| 2608 | |||
| 2609 | if (!target_put_sess_cmd(se_cmd)) { | ||
| 2610 | if (tas) | ||
| 2611 | target_put_sess_cmd(se_cmd); | ||
| 2612 | } | ||
| 2613 | |||
| 2599 | wait_for_completion(&se_cmd->cmd_wait_comp); | 2614 | wait_for_completion(&se_cmd->cmd_wait_comp); |
| 2600 | pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" | 2615 | pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" |
| 2601 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | 2616 | " fabric state: %d\n", se_cmd, se_cmd->t_state, |
| @@ -2617,53 +2632,75 @@ void transport_clear_lun_ref(struct se_lun *lun) | |||
| 2617 | wait_for_completion(&lun->lun_ref_comp); | 2632 | wait_for_completion(&lun->lun_ref_comp); |
| 2618 | } | 2633 | } |
| 2619 | 2634 | ||
| 2620 | /** | 2635 | static bool |
| 2621 | * transport_wait_for_tasks - wait for completion to occur | 2636 | __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, |
| 2622 | * @cmd: command to wait | 2637 | bool *aborted, bool *tas, unsigned long *flags) |
| 2623 | * | 2638 | __releases(&cmd->t_state_lock) |
| 2624 | * Called from frontend fabric context to wait for storage engine | 2639 | __acquires(&cmd->t_state_lock) |
| 2625 | * to pause and/or release frontend generated struct se_cmd. | ||
| 2626 | */ | ||
| 2627 | bool transport_wait_for_tasks(struct se_cmd *cmd) | ||
| 2628 | { | 2640 | { |
| 2629 | unsigned long flags; | ||
| 2630 | 2641 | ||
| 2631 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2642 | assert_spin_locked(&cmd->t_state_lock); |
| 2643 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 2644 | |||
| 2645 | if (fabric_stop) | ||
| 2646 | cmd->transport_state |= CMD_T_FABRIC_STOP; | ||
| 2647 | |||
| 2648 | if (cmd->transport_state & CMD_T_ABORTED) | ||
| 2649 | *aborted = true; | ||
| 2650 | |||
| 2651 | if (cmd->transport_state & CMD_T_TAS) | ||
| 2652 | *tas = true; | ||
| 2653 | |||
| 2632 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && | 2654 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && |
| 2633 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { | 2655 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) |
| 2634 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 2635 | return false; | 2656 | return false; |
| 2636 | } | ||
| 2637 | 2657 | ||
| 2638 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && | 2658 | if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && |
| 2639 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { | 2659 | !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) |
| 2640 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 2641 | return false; | 2660 | return false; |
| 2642 | } | ||
| 2643 | 2661 | ||
| 2644 | if (!(cmd->transport_state & CMD_T_ACTIVE)) { | 2662 | if (!(cmd->transport_state & CMD_T_ACTIVE)) |
| 2645 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2663 | return false; |
| 2664 | |||
| 2665 | if (fabric_stop && *aborted) | ||
| 2646 | return false; | 2666 | return false; |
| 2647 | } | ||
| 2648 | 2667 | ||
| 2649 | cmd->transport_state |= CMD_T_STOP; | 2668 | cmd->transport_state |= CMD_T_STOP; |
| 2650 | 2669 | ||
| 2651 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", | 2670 | pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," |
| 2652 | cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | 2671 | " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, |
| 2672 | cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); | ||
| 2653 | 2673 | ||
| 2654 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2674 | spin_unlock_irqrestore(&cmd->t_state_lock, *flags); |
| 2655 | 2675 | ||
| 2656 | wait_for_completion(&cmd->t_transport_stop_comp); | 2676 | wait_for_completion(&cmd->t_transport_stop_comp); |
| 2657 | 2677 | ||
| 2658 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 2678 | spin_lock_irqsave(&cmd->t_state_lock, *flags); |
| 2659 | cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); | 2679 | cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); |
| 2660 | 2680 | ||
| 2661 | pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", | 2681 | pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" |
| 2662 | cmd->tag); | 2682 | "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); |
| 2663 | 2683 | ||
| 2684 | return true; | ||
| 2685 | } | ||
| 2686 | |||
| 2687 | /** | ||
| 2688 | * transport_wait_for_tasks - wait for completion to occur | ||
| 2689 | * @cmd: command to wait | ||
| 2690 | * | ||
| 2691 | * Called from frontend fabric context to wait for storage engine | ||
| 2692 | * to pause and/or release frontend generated struct se_cmd. | ||
| 2693 | */ | ||
| 2694 | bool transport_wait_for_tasks(struct se_cmd *cmd) | ||
| 2695 | { | ||
| 2696 | unsigned long flags; | ||
| 2697 | bool ret, aborted = false, tas = false; | ||
| 2698 | |||
| 2699 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
| 2700 | ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); | ||
| 2664 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2701 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
| 2665 | 2702 | ||
| 2666 | return true; | 2703 | return ret; |
| 2667 | } | 2704 | } |
| 2668 | EXPORT_SYMBOL(transport_wait_for_tasks); | 2705 | EXPORT_SYMBOL(transport_wait_for_tasks); |
| 2669 | 2706 | ||
| @@ -2845,28 +2882,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, | |||
| 2845 | } | 2882 | } |
| 2846 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); | 2883 | EXPORT_SYMBOL(transport_send_check_condition_and_sense); |
| 2847 | 2884 | ||
| 2848 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | 2885 | static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) |
| 2886 | __releases(&cmd->t_state_lock) | ||
| 2887 | __acquires(&cmd->t_state_lock) | ||
| 2849 | { | 2888 | { |
| 2889 | assert_spin_locked(&cmd->t_state_lock); | ||
| 2890 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 2891 | |||
| 2850 | if (!(cmd->transport_state & CMD_T_ABORTED)) | 2892 | if (!(cmd->transport_state & CMD_T_ABORTED)) |
| 2851 | return 0; | 2893 | return 0; |
| 2852 | |||
| 2853 | /* | 2894 | /* |
| 2854 | * If cmd has been aborted but either no status is to be sent or it has | 2895 | * If cmd has been aborted but either no status is to be sent or it has |
| 2855 | * already been sent, just return | 2896 | * already been sent, just return |
| 2856 | */ | 2897 | */ |
| 2857 | if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) | 2898 | if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { |
| 2899 | if (send_status) | ||
| 2900 | cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; | ||
| 2858 | return 1; | 2901 | return 1; |
| 2902 | } | ||
| 2859 | 2903 | ||
| 2860 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", | 2904 | pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" |
| 2861 | cmd->t_task_cdb[0], cmd->tag); | 2905 | " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); |
| 2862 | 2906 | ||
| 2863 | cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; | 2907 | cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; |
| 2864 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 2908 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
| 2865 | trace_target_cmd_complete(cmd); | 2909 | trace_target_cmd_complete(cmd); |
| 2910 | |||
| 2911 | spin_unlock_irq(&cmd->t_state_lock); | ||
| 2866 | cmd->se_tfo->queue_status(cmd); | 2912 | cmd->se_tfo->queue_status(cmd); |
| 2913 | spin_lock_irq(&cmd->t_state_lock); | ||
| 2867 | 2914 | ||
| 2868 | return 1; | 2915 | return 1; |
| 2869 | } | 2916 | } |
| 2917 | |||
| 2918 | int transport_check_aborted_status(struct se_cmd *cmd, int send_status) | ||
| 2919 | { | ||
| 2920 | int ret; | ||
| 2921 | |||
| 2922 | spin_lock_irq(&cmd->t_state_lock); | ||
| 2923 | ret = __transport_check_aborted_status(cmd, send_status); | ||
| 2924 | spin_unlock_irq(&cmd->t_state_lock); | ||
| 2925 | |||
| 2926 | return ret; | ||
| 2927 | } | ||
| 2870 | EXPORT_SYMBOL(transport_check_aborted_status); | 2928 | EXPORT_SYMBOL(transport_check_aborted_status); |
| 2871 | 2929 | ||
| 2872 | void transport_send_task_abort(struct se_cmd *cmd) | 2930 | void transport_send_task_abort(struct se_cmd *cmd) |
| @@ -2888,11 +2946,17 @@ void transport_send_task_abort(struct se_cmd *cmd) | |||
| 2888 | */ | 2946 | */ |
| 2889 | if (cmd->data_direction == DMA_TO_DEVICE) { | 2947 | if (cmd->data_direction == DMA_TO_DEVICE) { |
| 2890 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { | 2948 | if (cmd->se_tfo->write_pending_status(cmd) != 0) { |
| 2891 | cmd->transport_state |= CMD_T_ABORTED; | 2949 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
| 2950 | if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { | ||
| 2951 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 2952 | goto send_abort; | ||
| 2953 | } | ||
| 2892 | cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; | 2954 | cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; |
| 2955 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 2893 | return; | 2956 | return; |
| 2894 | } | 2957 | } |
| 2895 | } | 2958 | } |
| 2959 | send_abort: | ||
| 2896 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; | 2960 | cmd->scsi_status = SAM_STAT_TASK_ABORTED; |
| 2897 | 2961 | ||
| 2898 | transport_lun_remove_cmd(cmd); | 2962 | transport_lun_remove_cmd(cmd); |
| @@ -2909,8 +2973,17 @@ static void target_tmr_work(struct work_struct *work) | |||
| 2909 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); | 2973 | struct se_cmd *cmd = container_of(work, struct se_cmd, work); |
| 2910 | struct se_device *dev = cmd->se_dev; | 2974 | struct se_device *dev = cmd->se_dev; |
| 2911 | struct se_tmr_req *tmr = cmd->se_tmr_req; | 2975 | struct se_tmr_req *tmr = cmd->se_tmr_req; |
| 2976 | unsigned long flags; | ||
| 2912 | int ret; | 2977 | int ret; |
| 2913 | 2978 | ||
| 2979 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
| 2980 | if (cmd->transport_state & CMD_T_ABORTED) { | ||
| 2981 | tmr->response = TMR_FUNCTION_REJECTED; | ||
| 2982 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 2983 | goto check_stop; | ||
| 2984 | } | ||
| 2985 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 2986 | |||
| 2914 | switch (tmr->function) { | 2987 | switch (tmr->function) { |
| 2915 | case TMR_ABORT_TASK: | 2988 | case TMR_ABORT_TASK: |
| 2916 | core_tmr_abort_task(dev, tmr, cmd->se_sess); | 2989 | core_tmr_abort_task(dev, tmr, cmd->se_sess); |
| @@ -2943,9 +3016,17 @@ static void target_tmr_work(struct work_struct *work) | |||
| 2943 | break; | 3016 | break; |
| 2944 | } | 3017 | } |
| 2945 | 3018 | ||
| 3019 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
| 3020 | if (cmd->transport_state & CMD_T_ABORTED) { | ||
| 3021 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 3022 | goto check_stop; | ||
| 3023 | } | ||
| 2946 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; | 3024 | cmd->t_state = TRANSPORT_ISTATE_PROCESSING; |
| 3025 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
| 3026 | |||
| 2947 | cmd->se_tfo->queue_tm_rsp(cmd); | 3027 | cmd->se_tfo->queue_tm_rsp(cmd); |
| 2948 | 3028 | ||
| 3029 | check_stop: | ||
| 2949 | transport_cmd_check_stop_to_fabric(cmd); | 3030 | transport_cmd_check_stop_to_fabric(cmd); |
| 2950 | } | 3031 | } |
| 2951 | 3032 | ||
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index dd600e5ead71..94f5154ac788 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -903,7 +903,7 @@ static int tcmu_configure_device(struct se_device *dev) | |||
| 903 | info->version = __stringify(TCMU_MAILBOX_VERSION); | 903 | info->version = __stringify(TCMU_MAILBOX_VERSION); |
| 904 | 904 | ||
| 905 | info->mem[0].name = "tcm-user command & data buffer"; | 905 | info->mem[0].name = "tcm-user command & data buffer"; |
| 906 | info->mem[0].addr = (phys_addr_t) udev->mb_addr; | 906 | info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; |
| 907 | info->mem[0].size = TCMU_RING_SIZE; | 907 | info->mem[0].size = TCMU_RING_SIZE; |
| 908 | info->mem[0].memtype = UIO_MEM_VIRTUAL; | 908 | info->mem[0].memtype = UIO_MEM_VIRTUAL; |
| 909 | 909 | ||
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 8cc4ac64a91c..7c92c09be213 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
| @@ -195,7 +195,7 @@ config IMX_THERMAL | |||
| 195 | passive trip is crossed. | 195 | passive trip is crossed. |
| 196 | 196 | ||
| 197 | config SPEAR_THERMAL | 197 | config SPEAR_THERMAL |
| 198 | bool "SPEAr thermal sensor driver" | 198 | tristate "SPEAr thermal sensor driver" |
| 199 | depends on PLAT_SPEAR || COMPILE_TEST | 199 | depends on PLAT_SPEAR || COMPILE_TEST |
| 200 | depends on OF | 200 | depends on OF |
| 201 | help | 201 | help |
| @@ -237,8 +237,8 @@ config DOVE_THERMAL | |||
| 237 | framework. | 237 | framework. |
| 238 | 238 | ||
| 239 | config DB8500_THERMAL | 239 | config DB8500_THERMAL |
| 240 | bool "DB8500 thermal management" | 240 | tristate "DB8500 thermal management" |
| 241 | depends on ARCH_U8500 | 241 | depends on MFD_DB8500_PRCMU |
| 242 | default y | 242 | default y |
| 243 | help | 243 | help |
| 244 | Adds DB8500 thermal management implementation according to the thermal | 244 | Adds DB8500 thermal management implementation according to the thermal |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index e3fbc5a5d88f..6ceac4f2d4b2 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
| @@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device, | |||
| 377 | * get_load() - get load for a cpu since last updated | 377 | * get_load() - get load for a cpu since last updated |
| 378 | * @cpufreq_device: &struct cpufreq_cooling_device for this cpu | 378 | * @cpufreq_device: &struct cpufreq_cooling_device for this cpu |
| 379 | * @cpu: cpu number | 379 | * @cpu: cpu number |
| 380 | * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus | ||
| 380 | * | 381 | * |
| 381 | * Return: The average load of cpu @cpu in percentage since this | 382 | * Return: The average load of cpu @cpu in percentage since this |
| 382 | * function was last called. | 383 | * function was last called. |
| 383 | */ | 384 | */ |
| 384 | static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu) | 385 | static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu, |
| 386 | int cpu_idx) | ||
| 385 | { | 387 | { |
| 386 | u32 load; | 388 | u32 load; |
| 387 | u64 now, now_idle, delta_time, delta_idle; | 389 | u64 now, now_idle, delta_time, delta_idle; |
| 388 | 390 | ||
| 389 | now_idle = get_cpu_idle_time(cpu, &now, 0); | 391 | now_idle = get_cpu_idle_time(cpu, &now, 0); |
| 390 | delta_idle = now_idle - cpufreq_device->time_in_idle[cpu]; | 392 | delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx]; |
| 391 | delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu]; | 393 | delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx]; |
| 392 | 394 | ||
| 393 | if (delta_time <= delta_idle) | 395 | if (delta_time <= delta_idle) |
| 394 | load = 0; | 396 | load = 0; |
| 395 | else | 397 | else |
| 396 | load = div64_u64(100 * (delta_time - delta_idle), delta_time); | 398 | load = div64_u64(100 * (delta_time - delta_idle), delta_time); |
| 397 | 399 | ||
| 398 | cpufreq_device->time_in_idle[cpu] = now_idle; | 400 | cpufreq_device->time_in_idle[cpu_idx] = now_idle; |
| 399 | cpufreq_device->time_in_idle_timestamp[cpu] = now; | 401 | cpufreq_device->time_in_idle_timestamp[cpu_idx] = now; |
| 400 | 402 | ||
| 401 | return load; | 403 | return load; |
| 402 | } | 404 | } |
| @@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, | |||
| 598 | u32 load; | 600 | u32 load; |
| 599 | 601 | ||
| 600 | if (cpu_online(cpu)) | 602 | if (cpu_online(cpu)) |
| 601 | load = get_load(cpufreq_device, cpu); | 603 | load = get_load(cpufreq_device, cpu, i); |
| 602 | else | 604 | else |
| 603 | load = 0; | 605 | load = 0; |
| 604 | 606 | ||
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index be4eedcb839a..9043f8f91852 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
| @@ -475,14 +475,10 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, | |||
| 475 | 475 | ||
| 476 | sensor_np = of_node_get(dev->of_node); | 476 | sensor_np = of_node_get(dev->of_node); |
| 477 | 477 | ||
| 478 | for_each_child_of_node(np, child) { | 478 | for_each_available_child_of_node(np, child) { |
| 479 | struct of_phandle_args sensor_specs; | 479 | struct of_phandle_args sensor_specs; |
| 480 | int ret, id; | 480 | int ret, id; |
| 481 | 481 | ||
| 482 | /* Check whether child is enabled or not */ | ||
| 483 | if (!of_device_is_available(child)) | ||
| 484 | continue; | ||
| 485 | |||
| 486 | /* For now, thermal framework supports only 1 sensor per zone */ | 482 | /* For now, thermal framework supports only 1 sensor per zone */ |
| 487 | ret = of_parse_phandle_with_args(child, "thermal-sensors", | 483 | ret = of_parse_phandle_with_args(child, "thermal-sensors", |
| 488 | "#thermal-sensor-cells", | 484 | "#thermal-sensor-cells", |
| @@ -881,16 +877,12 @@ int __init of_parse_thermal_zones(void) | |||
| 881 | return 0; /* Run successfully on systems without thermal DT */ | 877 | return 0; /* Run successfully on systems without thermal DT */ |
| 882 | } | 878 | } |
| 883 | 879 | ||
| 884 | for_each_child_of_node(np, child) { | 880 | for_each_available_child_of_node(np, child) { |
| 885 | struct thermal_zone_device *zone; | 881 | struct thermal_zone_device *zone; |
| 886 | struct thermal_zone_params *tzp; | 882 | struct thermal_zone_params *tzp; |
| 887 | int i, mask = 0; | 883 | int i, mask = 0; |
| 888 | u32 prop; | 884 | u32 prop; |
| 889 | 885 | ||
| 890 | /* Check whether child is enabled or not */ | ||
| 891 | if (!of_device_is_available(child)) | ||
| 892 | continue; | ||
| 893 | |||
| 894 | tz = thermal_of_build_thermal_zone(child); | 886 | tz = thermal_of_build_thermal_zone(child); |
| 895 | if (IS_ERR(tz)) { | 887 | if (IS_ERR(tz)) { |
| 896 | pr_err("failed to build thermal zone %s: %ld\n", | 888 | pr_err("failed to build thermal zone %s: %ld\n", |
| @@ -968,13 +960,9 @@ void of_thermal_destroy_zones(void) | |||
| 968 | return; | 960 | return; |
| 969 | } | 961 | } |
| 970 | 962 | ||
| 971 | for_each_child_of_node(np, child) { | 963 | for_each_available_child_of_node(np, child) { |
| 972 | struct thermal_zone_device *zone; | 964 | struct thermal_zone_device *zone; |
| 973 | 965 | ||
| 974 | /* Check whether child is enabled or not */ | ||
| 975 | if (!of_device_is_available(child)) | ||
| 976 | continue; | ||
| 977 | |||
| 978 | zone = thermal_zone_get_zone_by_name(child->name); | 966 | zone = thermal_zone_get_zone_by_name(child->name); |
| 979 | if (IS_ERR(zone)) | 967 | if (IS_ERR(zone)) |
| 980 | continue; | 968 | continue; |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 44b9c485157d..0e735acea33a 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
| 24 | #include <linux/io.h> | 24 | #include <linux/io.h> |
| 25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 26 | #include <linux/of_device.h> | ||
| 26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 27 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
| 28 | #include <linux/reboot.h> | 29 | #include <linux/reboot.h> |
| @@ -75,8 +76,10 @@ struct rcar_thermal_priv { | |||
| 75 | #define rcar_has_irq_support(priv) ((priv)->common->base) | 76 | #define rcar_has_irq_support(priv) ((priv)->common->base) |
| 76 | #define rcar_id_to_shift(priv) ((priv)->id * 8) | 77 | #define rcar_id_to_shift(priv) ((priv)->id * 8) |
| 77 | 78 | ||
| 79 | #define USE_OF_THERMAL 1 | ||
| 78 | static const struct of_device_id rcar_thermal_dt_ids[] = { | 80 | static const struct of_device_id rcar_thermal_dt_ids[] = { |
| 79 | { .compatible = "renesas,rcar-thermal", }, | 81 | { .compatible = "renesas,rcar-thermal", }, |
| 82 | { .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL }, | ||
| 80 | {}, | 83 | {}, |
| 81 | }; | 84 | }; |
| 82 | MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); | 85 | MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); |
| @@ -200,9 +203,9 @@ err_out_unlock: | |||
| 200 | return ret; | 203 | return ret; |
| 201 | } | 204 | } |
| 202 | 205 | ||
| 203 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) | 206 | static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv, |
| 207 | int *temp) | ||
| 204 | { | 208 | { |
| 205 | struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); | ||
| 206 | int tmp; | 209 | int tmp; |
| 207 | int ret; | 210 | int ret; |
| 208 | 211 | ||
| @@ -226,6 +229,20 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) | |||
| 226 | return 0; | 229 | return 0; |
| 227 | } | 230 | } |
| 228 | 231 | ||
| 232 | static int rcar_thermal_of_get_temp(void *data, int *temp) | ||
| 233 | { | ||
| 234 | struct rcar_thermal_priv *priv = data; | ||
| 235 | |||
| 236 | return rcar_thermal_get_current_temp(priv, temp); | ||
| 237 | } | ||
| 238 | |||
| 239 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) | ||
| 240 | { | ||
| 241 | struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); | ||
| 242 | |||
| 243 | return rcar_thermal_get_current_temp(priv, temp); | ||
| 244 | } | ||
| 245 | |||
| 229 | static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone, | 246 | static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone, |
| 230 | int trip, enum thermal_trip_type *type) | 247 | int trip, enum thermal_trip_type *type) |
| 231 | { | 248 | { |
| @@ -282,6 +299,10 @@ static int rcar_thermal_notify(struct thermal_zone_device *zone, | |||
| 282 | return 0; | 299 | return 0; |
| 283 | } | 300 | } |
| 284 | 301 | ||
| 302 | static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = { | ||
| 303 | .get_temp = rcar_thermal_of_get_temp, | ||
| 304 | }; | ||
| 305 | |||
| 285 | static struct thermal_zone_device_ops rcar_thermal_zone_ops = { | 306 | static struct thermal_zone_device_ops rcar_thermal_zone_ops = { |
| 286 | .get_temp = rcar_thermal_get_temp, | 307 | .get_temp = rcar_thermal_get_temp, |
| 287 | .get_trip_type = rcar_thermal_get_trip_type, | 308 | .get_trip_type = rcar_thermal_get_trip_type, |
| @@ -318,14 +339,20 @@ static void rcar_thermal_work(struct work_struct *work) | |||
| 318 | 339 | ||
| 319 | priv = container_of(work, struct rcar_thermal_priv, work.work); | 340 | priv = container_of(work, struct rcar_thermal_priv, work.work); |
| 320 | 341 | ||
| 321 | rcar_thermal_get_temp(priv->zone, &cctemp); | 342 | ret = rcar_thermal_get_current_temp(priv, &cctemp); |
| 343 | if (ret < 0) | ||
| 344 | return; | ||
| 345 | |||
| 322 | ret = rcar_thermal_update_temp(priv); | 346 | ret = rcar_thermal_update_temp(priv); |
| 323 | if (ret < 0) | 347 | if (ret < 0) |
| 324 | return; | 348 | return; |
| 325 | 349 | ||
| 326 | rcar_thermal_irq_enable(priv); | 350 | rcar_thermal_irq_enable(priv); |
| 327 | 351 | ||
| 328 | rcar_thermal_get_temp(priv->zone, &nctemp); | 352 | ret = rcar_thermal_get_current_temp(priv, &nctemp); |
| 353 | if (ret < 0) | ||
| 354 | return; | ||
| 355 | |||
| 329 | if (nctemp != cctemp) | 356 | if (nctemp != cctemp) |
| 330 | thermal_zone_device_update(priv->zone); | 357 | thermal_zone_device_update(priv->zone); |
| 331 | } | 358 | } |
| @@ -403,6 +430,8 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
| 403 | struct rcar_thermal_priv *priv; | 430 | struct rcar_thermal_priv *priv; |
| 404 | struct device *dev = &pdev->dev; | 431 | struct device *dev = &pdev->dev; |
| 405 | struct resource *res, *irq; | 432 | struct resource *res, *irq; |
| 433 | const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev); | ||
| 434 | unsigned long of_data = (unsigned long)of_id->data; | ||
| 406 | int mres = 0; | 435 | int mres = 0; |
| 407 | int i; | 436 | int i; |
| 408 | int ret = -ENODEV; | 437 | int ret = -ENODEV; |
| @@ -463,7 +492,13 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
| 463 | if (ret < 0) | 492 | if (ret < 0) |
| 464 | goto error_unregister; | 493 | goto error_unregister; |
| 465 | 494 | ||
| 466 | priv->zone = thermal_zone_device_register("rcar_thermal", | 495 | if (of_data == USE_OF_THERMAL) |
| 496 | priv->zone = thermal_zone_of_sensor_register( | ||
| 497 | dev, i, priv, | ||
| 498 | &rcar_thermal_zone_of_ops); | ||
| 499 | else | ||
| 500 | priv->zone = thermal_zone_device_register( | ||
| 501 | "rcar_thermal", | ||
| 467 | 1, 0, priv, | 502 | 1, 0, priv, |
| 468 | &rcar_thermal_zone_ops, NULL, 0, | 503 | &rcar_thermal_zone_ops, NULL, 0, |
| 469 | idle); | 504 | idle); |
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c index 534dd9136662..81b35aace9de 100644 --- a/drivers/thermal/spear_thermal.c +++ b/drivers/thermal/spear_thermal.c | |||
| @@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = { | |||
| 54 | .get_temp = thermal_get_temp, | 54 | .get_temp = thermal_get_temp, |
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | #ifdef CONFIG_PM | 57 | static int __maybe_unused spear_thermal_suspend(struct device *dev) |
| 58 | static int spear_thermal_suspend(struct device *dev) | ||
| 59 | { | 58 | { |
| 60 | struct platform_device *pdev = to_platform_device(dev); | 59 | struct platform_device *pdev = to_platform_device(dev); |
| 61 | struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); | 60 | struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); |
| @@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev) | |||
| 72 | return 0; | 71 | return 0; |
| 73 | } | 72 | } |
| 74 | 73 | ||
| 75 | static int spear_thermal_resume(struct device *dev) | 74 | static int __maybe_unused spear_thermal_resume(struct device *dev) |
| 76 | { | 75 | { |
| 77 | struct platform_device *pdev = to_platform_device(dev); | 76 | struct platform_device *pdev = to_platform_device(dev); |
| 78 | struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); | 77 | struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); |
| @@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev) | |||
| 94 | 93 | ||
| 95 | return 0; | 94 | return 0; |
| 96 | } | 95 | } |
| 97 | #endif | ||
| 98 | 96 | ||
| 99 | static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, | 97 | static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, |
| 100 | spear_thermal_resume); | 98 | spear_thermal_resume); |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index b3110040164a..2348fa613707 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
| @@ -681,7 +681,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) | |||
| 681 | /* this is called once with whichever end is closed last */ | 681 | /* this is called once with whichever end is closed last */ |
| 682 | static void pty_unix98_shutdown(struct tty_struct *tty) | 682 | static void pty_unix98_shutdown(struct tty_struct *tty) |
| 683 | { | 683 | { |
| 684 | devpts_kill_index(tty->driver_data, tty->index); | 684 | struct inode *ptmx_inode; |
| 685 | |||
| 686 | if (tty->driver->subtype == PTY_TYPE_MASTER) | ||
| 687 | ptmx_inode = tty->driver_data; | ||
| 688 | else | ||
| 689 | ptmx_inode = tty->link->driver_data; | ||
| 690 | devpts_kill_index(ptmx_inode, tty->index); | ||
| 691 | devpts_del_ref(ptmx_inode); | ||
| 685 | } | 692 | } |
| 686 | 693 | ||
| 687 | static const struct tty_operations ptm_unix98_ops = { | 694 | static const struct tty_operations ptm_unix98_ops = { |
| @@ -773,6 +780,18 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 773 | set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ | 780 | set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ |
| 774 | tty->driver_data = inode; | 781 | tty->driver_data = inode; |
| 775 | 782 | ||
| 783 | /* | ||
| 784 | * In the case where all references to ptmx inode are dropped and we | ||
| 785 | * still have /dev/tty opened pointing to the master/slave pair (ptmx | ||
| 786 | * is closed/released before /dev/tty), we must make sure that the inode | ||
| 787 | * is still valid when we call the final pty_unix98_shutdown, thus we | ||
| 788 | * hold an additional reference to the ptmx inode. For the same /dev/tty | ||
| 789 | * last close case, we also need to make sure the super_block isn't | ||
| 790 | * destroyed (devpts instance unmounted), before /dev/tty is closed and | ||
| 791 | * on its release devpts_kill_index is called. | ||
| 792 | */ | ||
| 793 | devpts_add_ref(inode); | ||
| 794 | |||
| 776 | tty_add_file(tty, filp); | 795 | tty_add_file(tty, filp); |
| 777 | 796 | ||
| 778 | slave_inode = devpts_pty_new(inode, | 797 | slave_inode = devpts_pty_new(inode, |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index e71ec78fc11e..7cd6f9a90542 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
| @@ -1941,6 +1941,7 @@ pci_wch_ch38x_setup(struct serial_private *priv, | |||
| 1941 | #define PCIE_VENDOR_ID_WCH 0x1c00 | 1941 | #define PCIE_VENDOR_ID_WCH 0x1c00 |
| 1942 | #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 | 1942 | #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 |
| 1943 | #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 | 1943 | #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 |
| 1944 | #define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253 | ||
| 1944 | 1945 | ||
| 1945 | #define PCI_VENDOR_ID_PERICOM 0x12D8 | 1946 | #define PCI_VENDOR_ID_PERICOM 0x12D8 |
| 1946 | #define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 | 1947 | #define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 |
| @@ -2637,6 +2638,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
| 2637 | .subdevice = PCI_ANY_ID, | 2638 | .subdevice = PCI_ANY_ID, |
| 2638 | .setup = pci_wch_ch353_setup, | 2639 | .setup = pci_wch_ch353_setup, |
| 2639 | }, | 2640 | }, |
| 2641 | /* WCH CH382 2S card (16850 clone) */ | ||
| 2642 | { | ||
| 2643 | .vendor = PCIE_VENDOR_ID_WCH, | ||
| 2644 | .device = PCIE_DEVICE_ID_WCH_CH382_2S, | ||
| 2645 | .subvendor = PCI_ANY_ID, | ||
| 2646 | .subdevice = PCI_ANY_ID, | ||
| 2647 | .setup = pci_wch_ch38x_setup, | ||
| 2648 | }, | ||
| 2640 | /* WCH CH382 2S1P card (16850 clone) */ | 2649 | /* WCH CH382 2S1P card (16850 clone) */ |
| 2641 | { | 2650 | { |
| 2642 | .vendor = PCIE_VENDOR_ID_WCH, | 2651 | .vendor = PCIE_VENDOR_ID_WCH, |
| @@ -2955,6 +2964,7 @@ enum pci_board_num_t { | |||
| 2955 | pbn_fintek_4, | 2964 | pbn_fintek_4, |
| 2956 | pbn_fintek_8, | 2965 | pbn_fintek_8, |
| 2957 | pbn_fintek_12, | 2966 | pbn_fintek_12, |
| 2967 | pbn_wch382_2, | ||
| 2958 | pbn_wch384_4, | 2968 | pbn_wch384_4, |
| 2959 | pbn_pericom_PI7C9X7951, | 2969 | pbn_pericom_PI7C9X7951, |
| 2960 | pbn_pericom_PI7C9X7952, | 2970 | pbn_pericom_PI7C9X7952, |
| @@ -3775,6 +3785,13 @@ static struct pciserial_board pci_boards[] = { | |||
| 3775 | .base_baud = 115200, | 3785 | .base_baud = 115200, |
| 3776 | .first_offset = 0x40, | 3786 | .first_offset = 0x40, |
| 3777 | }, | 3787 | }, |
| 3788 | [pbn_wch382_2] = { | ||
| 3789 | .flags = FL_BASE0, | ||
| 3790 | .num_ports = 2, | ||
| 3791 | .base_baud = 115200, | ||
| 3792 | .uart_offset = 8, | ||
| 3793 | .first_offset = 0xC0, | ||
| 3794 | }, | ||
| 3778 | [pbn_wch384_4] = { | 3795 | [pbn_wch384_4] = { |
| 3779 | .flags = FL_BASE0, | 3796 | .flags = FL_BASE0, |
| 3780 | .num_ports = 4, | 3797 | .num_ports = 4, |
| @@ -5574,6 +5591,10 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
| 5574 | PCI_ANY_ID, PCI_ANY_ID, | 5591 | PCI_ANY_ID, PCI_ANY_ID, |
| 5575 | 0, 0, pbn_b0_bt_2_115200 }, | 5592 | 0, 0, pbn_b0_bt_2_115200 }, |
| 5576 | 5593 | ||
| 5594 | { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S, | ||
| 5595 | PCI_ANY_ID, PCI_ANY_ID, | ||
| 5596 | 0, 0, pbn_wch382_2 }, | ||
| 5597 | |||
| 5577 | { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, | 5598 | { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, |
| 5578 | PCI_ANY_ID, PCI_ANY_ID, | 5599 | PCI_ANY_ID, PCI_ANY_ID, |
| 5579 | 0, 0, pbn_wch384_4 }, | 5600 | 0, 0, pbn_wch384_4 }, |
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index b645f9228ed7..fa49eb1e2fa2 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c | |||
| @@ -1165,7 +1165,7 @@ serial_omap_type(struct uart_port *port) | |||
| 1165 | 1165 | ||
| 1166 | #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | 1166 | #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) |
| 1167 | 1167 | ||
| 1168 | static void wait_for_xmitr(struct uart_omap_port *up) | 1168 | static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up) |
| 1169 | { | 1169 | { |
| 1170 | unsigned int status, tmout = 10000; | 1170 | unsigned int status, tmout = 10000; |
| 1171 | 1171 | ||
| @@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up) | |||
| 1343 | 1343 | ||
| 1344 | /* Enable or disable the rs485 support */ | 1344 | /* Enable or disable the rs485 support */ |
| 1345 | static int | 1345 | static int |
| 1346 | serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) | 1346 | serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485) |
| 1347 | { | 1347 | { |
| 1348 | struct uart_omap_port *up = to_uart_omap_port(port); | 1348 | struct uart_omap_port *up = to_uart_omap_port(port); |
| 1349 | unsigned int mode; | 1349 | unsigned int mode; |
| @@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) | |||
| 1356 | up->ier = 0; | 1356 | up->ier = 0; |
| 1357 | serial_out(up, UART_IER, 0); | 1357 | serial_out(up, UART_IER, 0); |
| 1358 | 1358 | ||
| 1359 | /* Clamp the delays to [0, 100ms] */ | ||
| 1360 | rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U); | ||
| 1361 | rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U); | ||
| 1362 | |||
| 1359 | /* store new config */ | 1363 | /* store new config */ |
| 1360 | port->rs485 = *rs485conf; | 1364 | port->rs485 = *rs485; |
| 1361 | 1365 | ||
| 1362 | /* | 1366 | /* |
| 1363 | * Just as a precaution, only allow rs485 | 1367 | * Just as a precaution, only allow rs485 |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 5cec01c75691..a7eacef1bd22 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -2066,13 +2066,12 @@ retry_open: | |||
| 2066 | if (tty) { | 2066 | if (tty) { |
| 2067 | mutex_unlock(&tty_mutex); | 2067 | mutex_unlock(&tty_mutex); |
| 2068 | retval = tty_lock_interruptible(tty); | 2068 | retval = tty_lock_interruptible(tty); |
| 2069 | tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */ | ||
| 2069 | if (retval) { | 2070 | if (retval) { |
| 2070 | if (retval == -EINTR) | 2071 | if (retval == -EINTR) |
| 2071 | retval = -ERESTARTSYS; | 2072 | retval = -ERESTARTSYS; |
| 2072 | goto err_unref; | 2073 | goto err_unref; |
| 2073 | } | 2074 | } |
| 2074 | /* safe to drop the kref from tty_driver_lookup_tty() */ | ||
| 2075 | tty_kref_put(tty); | ||
| 2076 | retval = tty_reopen(tty); | 2075 | retval = tty_reopen(tty); |
| 2077 | if (retval < 0) { | 2076 | if (retval < 0) { |
| 2078 | tty_unlock(tty); | 2077 | tty_unlock(tty); |
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c index d2f3c4cd697f..dfa9ec03fa8e 100644 --- a/drivers/tty/tty_mutex.c +++ b/drivers/tty/tty_mutex.c | |||
| @@ -21,10 +21,15 @@ EXPORT_SYMBOL(tty_lock); | |||
| 21 | 21 | ||
| 22 | int tty_lock_interruptible(struct tty_struct *tty) | 22 | int tty_lock_interruptible(struct tty_struct *tty) |
| 23 | { | 23 | { |
| 24 | int ret; | ||
| 25 | |||
| 24 | if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty)) | 26 | if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty)) |
| 25 | return -EIO; | 27 | return -EIO; |
| 26 | tty_kref_get(tty); | 28 | tty_kref_get(tty); |
| 27 | return mutex_lock_interruptible(&tty->legacy_mutex); | 29 | ret = mutex_lock_interruptible(&tty->legacy_mutex); |
| 30 | if (ret) | ||
| 31 | tty_kref_put(tty); | ||
| 32 | return ret; | ||
| 28 | } | 33 | } |
| 29 | 34 | ||
| 30 | void __lockfunc tty_unlock(struct tty_struct *tty) | 35 | void __lockfunc tty_unlock(struct tty_struct *tty) |
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index 0081725c6b5b..6b2a06d09f2b 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c | |||
| @@ -152,7 +152,7 @@ static void lcdc_write(unsigned int val, unsigned int addr) | |||
| 152 | 152 | ||
| 153 | struct da8xx_fb_par { | 153 | struct da8xx_fb_par { |
| 154 | struct device *dev; | 154 | struct device *dev; |
| 155 | resource_size_t p_palette_base; | 155 | dma_addr_t p_palette_base; |
| 156 | unsigned char *v_palette_base; | 156 | unsigned char *v_palette_base; |
| 157 | dma_addr_t vram_phys; | 157 | dma_addr_t vram_phys; |
| 158 | unsigned long vram_size; | 158 | unsigned long vram_size; |
| @@ -1428,7 +1428,7 @@ static int fb_probe(struct platform_device *device) | |||
| 1428 | 1428 | ||
| 1429 | par->vram_virt = dma_alloc_coherent(NULL, | 1429 | par->vram_virt = dma_alloc_coherent(NULL, |
| 1430 | par->vram_size, | 1430 | par->vram_size, |
| 1431 | (resource_size_t *) &par->vram_phys, | 1431 | &par->vram_phys, |
| 1432 | GFP_KERNEL | GFP_DMA); | 1432 | GFP_KERNEL | GFP_DMA); |
| 1433 | if (!par->vram_virt) { | 1433 | if (!par->vram_virt) { |
| 1434 | dev_err(&device->dev, | 1434 | dev_err(&device->dev, |
| @@ -1448,7 +1448,7 @@ static int fb_probe(struct platform_device *device) | |||
| 1448 | 1448 | ||
| 1449 | /* allocate palette buffer */ | 1449 | /* allocate palette buffer */ |
| 1450 | par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, | 1450 | par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, |
| 1451 | (resource_size_t *)&par->p_palette_base, | 1451 | &par->p_palette_base, |
| 1452 | GFP_KERNEL | GFP_DMA); | 1452 | GFP_KERNEL | GFP_DMA); |
| 1453 | if (!par->v_palette_base) { | 1453 | if (!par->v_palette_base) { |
| 1454 | dev_err(&device->dev, | 1454 | dev_err(&device->dev, |
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c index 95873f26e39c..de2f3e793786 100644 --- a/drivers/video/fbdev/exynos/s6e8ax0.c +++ b/drivers/video/fbdev/exynos/s6e8ax0.c | |||
| @@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev) | |||
| 829 | return 0; | 829 | return 0; |
| 830 | } | 830 | } |
| 831 | 831 | ||
| 832 | #ifdef CONFIG_PM | 832 | static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev) |
| 833 | static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev) | ||
| 834 | { | 833 | { |
| 835 | struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); | 834 | struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); |
| 836 | 835 | ||
| @@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev) | |||
| 843 | return 0; | 842 | return 0; |
| 844 | } | 843 | } |
| 845 | 844 | ||
| 846 | static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) | 845 | static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) |
| 847 | { | 846 | { |
| 848 | struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); | 847 | struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); |
| 849 | 848 | ||
| @@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) | |||
| 855 | 854 | ||
| 856 | return 0; | 855 | return 0; |
| 857 | } | 856 | } |
| 858 | #else | ||
| 859 | #define s6e8ax0_suspend NULL | ||
| 860 | #define s6e8ax0_resume NULL | ||
| 861 | #endif | ||
| 862 | 857 | ||
| 863 | static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { | 858 | static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { |
| 864 | .name = "s6e8ax0", | 859 | .name = "s6e8ax0", |
| @@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { | |||
| 867 | .power_on = s6e8ax0_power_on, | 862 | .power_on = s6e8ax0_power_on, |
| 868 | .set_sequence = s6e8ax0_set_sequence, | 863 | .set_sequence = s6e8ax0_set_sequence, |
| 869 | .probe = s6e8ax0_probe, | 864 | .probe = s6e8ax0_probe, |
| 870 | .suspend = s6e8ax0_suspend, | 865 | .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL, |
| 871 | .resume = s6e8ax0_resume, | 866 | .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL, |
| 872 | }; | 867 | }; |
| 873 | 868 | ||
| 874 | static int s6e8ax0_init(void) | 869 | static int s6e8ax0_init(void) |
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index cee88603efc9..bb2f1e866020 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c | |||
| @@ -902,6 +902,21 @@ static int imxfb_probe(struct platform_device *pdev) | |||
| 902 | goto failed_getclock; | 902 | goto failed_getclock; |
| 903 | } | 903 | } |
| 904 | 904 | ||
| 905 | /* | ||
| 906 | * The LCDC controller does not have an enable bit. The | ||
| 907 | * controller starts directly when the clocks are enabled. | ||
| 908 | * If the clocks are enabled when the controller is not yet | ||
| 909 | * programmed with proper register values (enabled at the | ||
| 910 | * bootloader, for example) then it just goes into some undefined | ||
| 911 | * state. | ||
| 912 | * To avoid this issue, let's enable and disable LCDC IPG clock | ||
| 913 | * so that we force some kind of 'reset' to the LCDC block. | ||
| 914 | */ | ||
| 915 | ret = clk_prepare_enable(fbi->clk_ipg); | ||
| 916 | if (ret) | ||
| 917 | goto failed_getclock; | ||
| 918 | clk_disable_unprepare(fbi->clk_ipg); | ||
| 919 | |||
| 905 | fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); | 920 | fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); |
| 906 | if (IS_ERR(fbi->clk_ahb)) { | 921 | if (IS_ERR(fbi->clk_ahb)) { |
| 907 | ret = PTR_ERR(fbi->clk_ahb); | 922 | ret = PTR_ERR(fbi->clk_ahb); |
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index de54a4748065..b6f83d5df9fd 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c | |||
| @@ -503,8 +503,7 @@ static int mmphw_probe(struct platform_device *pdev) | |||
| 503 | ctrl->reg_base = devm_ioremap_nocache(ctrl->dev, | 503 | ctrl->reg_base = devm_ioremap_nocache(ctrl->dev, |
| 504 | res->start, resource_size(res)); | 504 | res->start, resource_size(res)); |
| 505 | if (ctrl->reg_base == NULL) { | 505 | if (ctrl->reg_base == NULL) { |
| 506 | dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__, | 506 | dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res); |
| 507 | res->start, res->end); | ||
| 508 | ret = -ENOMEM; | 507 | ret = -ENOMEM; |
| 509 | goto failed; | 508 | goto failed; |
| 510 | } | 509 | } |
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c index c9293aea8ec3..a970edc2a6f8 100644 --- a/drivers/video/fbdev/ocfb.c +++ b/drivers/video/fbdev/ocfb.c | |||
| @@ -123,11 +123,11 @@ static int ocfb_setupfb(struct ocfb_dev *fbdev) | |||
| 123 | 123 | ||
| 124 | /* Horizontal timings */ | 124 | /* Horizontal timings */ |
| 125 | ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 | | 125 | ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 | |
| 126 | (var->right_margin - 1) << 16 | (var->xres - 1)); | 126 | (var->left_margin - 1) << 16 | (var->xres - 1)); |
| 127 | 127 | ||
| 128 | /* Vertical timings */ | 128 | /* Vertical timings */ |
| 129 | ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 | | 129 | ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 | |
| 130 | (var->lower_margin - 1) << 16 | (var->yres - 1)); | 130 | (var->upper_margin - 1) << 16 | (var->yres - 1)); |
| 131 | 131 | ||
| 132 | /* Total length of frame */ | 132 | /* Total length of frame */ |
| 133 | hlen = var->left_margin + var->right_margin + var->hsync_len + | 133 | hlen = var->left_margin + var->right_margin + var->hsync_len + |
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index b79a74a98a23..5fbeab38889e 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c | |||
| @@ -202,7 +202,7 @@ static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge) | |||
| 202 | bridge = ca91cx42_bridge->driver_priv; | 202 | bridge = ca91cx42_bridge->driver_priv; |
| 203 | 203 | ||
| 204 | /* Need pdev */ | 204 | /* Need pdev */ |
| 205 | pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev); | 205 | pdev = to_pci_dev(ca91cx42_bridge->parent); |
| 206 | 206 | ||
| 207 | INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers); | 207 | INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers); |
| 208 | 208 | ||
| @@ -293,8 +293,7 @@ static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, | |||
| 293 | iowrite32(tmp, bridge->base + LINT_EN); | 293 | iowrite32(tmp, bridge->base + LINT_EN); |
| 294 | 294 | ||
| 295 | if ((state == 0) && (sync != 0)) { | 295 | if ((state == 0) && (sync != 0)) { |
| 296 | pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, | 296 | pdev = to_pci_dev(ca91cx42_bridge->parent); |
| 297 | dev); | ||
| 298 | 297 | ||
| 299 | synchronize_irq(pdev->irq); | 298 | synchronize_irq(pdev->irq); |
| 300 | } | 299 | } |
| @@ -518,7 +517,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image, | |||
| 518 | dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n"); | 517 | dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n"); |
| 519 | return -EINVAL; | 518 | return -EINVAL; |
| 520 | } | 519 | } |
| 521 | pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev); | 520 | pdev = to_pci_dev(ca91cx42_bridge->parent); |
| 522 | 521 | ||
| 523 | existing_size = (unsigned long long)(image->bus_resource.end - | 522 | existing_size = (unsigned long long)(image->bus_resource.end - |
| 524 | image->bus_resource.start); | 523 | image->bus_resource.start); |
| @@ -1519,7 +1518,7 @@ static void *ca91cx42_alloc_consistent(struct device *parent, size_t size, | |||
| 1519 | struct pci_dev *pdev; | 1518 | struct pci_dev *pdev; |
| 1520 | 1519 | ||
| 1521 | /* Find pci_dev container of dev */ | 1520 | /* Find pci_dev container of dev */ |
| 1522 | pdev = container_of(parent, struct pci_dev, dev); | 1521 | pdev = to_pci_dev(parent); |
| 1523 | 1522 | ||
| 1524 | return pci_alloc_consistent(pdev, size, dma); | 1523 | return pci_alloc_consistent(pdev, size, dma); |
| 1525 | } | 1524 | } |
| @@ -1530,7 +1529,7 @@ static void ca91cx42_free_consistent(struct device *parent, size_t size, | |||
| 1530 | struct pci_dev *pdev; | 1529 | struct pci_dev *pdev; |
| 1531 | 1530 | ||
| 1532 | /* Find pci_dev container of dev */ | 1531 | /* Find pci_dev container of dev */ |
| 1533 | pdev = container_of(parent, struct pci_dev, dev); | 1532 | pdev = to_pci_dev(parent); |
| 1534 | 1533 | ||
| 1535 | pci_free_consistent(pdev, size, vaddr, dma); | 1534 | pci_free_consistent(pdev, size, vaddr, dma); |
| 1536 | } | 1535 | } |
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index 0e2f43bccf1f..a2eec97d5064 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c | |||
| @@ -618,7 +618,6 @@ static u8 omap_w1_read_byte(void *_hdq) | |||
| 618 | 618 | ||
| 619 | hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS, | 619 | hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS, |
| 620 | ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); | 620 | ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); |
| 621 | hdq_data->hdq_usecount = 0; | ||
| 622 | 621 | ||
| 623 | /* Write followed by a read, release the module */ | 622 | /* Write followed by a read, release the module */ |
| 624 | if (hdq_data->init_trans) { | 623 | if (hdq_data->init_trans) { |
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index c9a7ff67d395..89a784751738 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c | |||
| @@ -1147,7 +1147,6 @@ int w1_process(void *data) | |||
| 1147 | jremain = 1; | 1147 | jremain = 1; |
| 1148 | } | 1148 | } |
| 1149 | 1149 | ||
| 1150 | try_to_freeze(); | ||
| 1151 | __set_current_state(TASK_INTERRUPTIBLE); | 1150 | __set_current_state(TASK_INTERRUPTIBLE); |
| 1152 | 1151 | ||
| 1153 | /* hold list_mutex until after interruptible to prevent loosing | 1152 | /* hold list_mutex until after interruptible to prevent loosing |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 0f6d8515ba4f..86c2392bd968 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
| @@ -1214,6 +1214,21 @@ config SBC_EPX_C3_WATCHDOG | |||
| 1214 | To compile this driver as a module, choose M here: the | 1214 | To compile this driver as a module, choose M here: the |
| 1215 | module will be called sbc_epx_c3. | 1215 | module will be called sbc_epx_c3. |
| 1216 | 1216 | ||
| 1217 | config INTEL_MEI_WDT | ||
| 1218 | tristate "Intel MEI iAMT Watchdog" | ||
| 1219 | depends on INTEL_MEI && X86 | ||
| 1220 | select WATCHDOG_CORE | ||
| 1221 | ---help--- | ||
| 1222 | A device driver for the Intel MEI iAMT watchdog. | ||
| 1223 | |||
| 1224 | The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog. | ||
| 1225 | Whenever the OS hangs or crashes, iAMT will send an event | ||
| 1226 | to any subscriber to this event. The watchdog doesn't reset the | ||
| 1227 | the platform. | ||
| 1228 | |||
| 1229 | To compile this driver as a module, choose M here: | ||
| 1230 | the module will be called mei_wdt. | ||
| 1231 | |||
| 1217 | # M32R Architecture | 1232 | # M32R Architecture |
| 1218 | 1233 | ||
| 1219 | # M68K Architecture | 1234 | # M68K Architecture |
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index f566753256ab..efc4f788e0f2 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile | |||
| @@ -126,6 +126,7 @@ obj-$(CONFIG_MACHZ_WDT) += machzwd.o | |||
| 126 | obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o | 126 | obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o |
| 127 | obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o | 127 | obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o |
| 128 | obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o | 128 | obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o |
| 129 | obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o | ||
| 129 | 130 | ||
| 130 | # M32R Architecture | 131 | # M32R Architecture |
| 131 | 132 | ||
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c new file mode 100644 index 000000000000..630bd189f167 --- /dev/null +++ b/drivers/watchdog/mei_wdt.c | |||
| @@ -0,0 +1,724 @@ | |||
| 1 | /* | ||
| 2 | * Intel Management Engine Interface (Intel MEI) Linux driver | ||
| 3 | * Copyright (c) 2015, Intel Corporation. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/debugfs.h> | ||
| 19 | #include <linux/completion.h> | ||
| 20 | #include <linux/watchdog.h> | ||
| 21 | |||
| 22 | #include <linux/uuid.h> | ||
| 23 | #include <linux/mei_cl_bus.h> | ||
| 24 | |||
| 25 | /* | ||
| 26 | * iAMT Watchdog Device | ||
| 27 | */ | ||
| 28 | #define INTEL_AMT_WATCHDOG_ID "iamt_wdt" | ||
| 29 | |||
| 30 | #define MEI_WDT_DEFAULT_TIMEOUT 120 /* seconds */ | ||
| 31 | #define MEI_WDT_MIN_TIMEOUT 120 /* seconds */ | ||
| 32 | #define MEI_WDT_MAX_TIMEOUT 65535 /* seconds */ | ||
| 33 | |||
| 34 | /* Commands */ | ||
| 35 | #define MEI_MANAGEMENT_CONTROL 0x02 | ||
| 36 | |||
| 37 | /* MEI Management Control version number */ | ||
| 38 | #define MEI_MC_VERSION_NUMBER 0x10 | ||
| 39 | |||
| 40 | /* Sub Commands */ | ||
| 41 | #define MEI_MC_START_WD_TIMER_REQ 0x13 | ||
| 42 | #define MEI_MC_START_WD_TIMER_RES 0x83 | ||
| 43 | #define MEI_WDT_STATUS_SUCCESS 0 | ||
| 44 | #define MEI_WDT_WDSTATE_NOT_REQUIRED 0x1 | ||
| 45 | #define MEI_MC_STOP_WD_TIMER_REQ 0x14 | ||
| 46 | |||
| 47 | /** | ||
| 48 | * enum mei_wdt_state - internal watchdog state | ||
| 49 | * | ||
| 50 | * @MEI_WDT_PROBE: wd in probing stage | ||
| 51 | * @MEI_WDT_IDLE: wd is idle and not opened | ||
| 52 | * @MEI_WDT_START: wd was opened, start was called | ||
| 53 | * @MEI_WDT_RUNNING: wd is expecting keep alive pings | ||
| 54 | * @MEI_WDT_STOPPING: wd is stopping and will move to IDLE | ||
| 55 | * @MEI_WDT_NOT_REQUIRED: wd device is not required | ||
| 56 | */ | ||
| 57 | enum mei_wdt_state { | ||
| 58 | MEI_WDT_PROBE, | ||
| 59 | MEI_WDT_IDLE, | ||
| 60 | MEI_WDT_START, | ||
| 61 | MEI_WDT_RUNNING, | ||
| 62 | MEI_WDT_STOPPING, | ||
| 63 | MEI_WDT_NOT_REQUIRED, | ||
| 64 | }; | ||
| 65 | |||
| 66 | static const char *mei_wdt_state_str(enum mei_wdt_state state) | ||
| 67 | { | ||
| 68 | switch (state) { | ||
| 69 | case MEI_WDT_PROBE: | ||
| 70 | return "PROBE"; | ||
| 71 | case MEI_WDT_IDLE: | ||
| 72 | return "IDLE"; | ||
| 73 | case MEI_WDT_START: | ||
| 74 | return "START"; | ||
| 75 | case MEI_WDT_RUNNING: | ||
| 76 | return "RUNNING"; | ||
| 77 | case MEI_WDT_STOPPING: | ||
| 78 | return "STOPPING"; | ||
| 79 | case MEI_WDT_NOT_REQUIRED: | ||
| 80 | return "NOT_REQUIRED"; | ||
| 81 | default: | ||
| 82 | return "unknown"; | ||
| 83 | } | ||
| 84 | } | ||
| 85 | |||
| 86 | /** | ||
| 87 | * struct mei_wdt - mei watchdog driver | ||
| 88 | * @wdd: watchdog device | ||
| 89 | * | ||
| 90 | * @cldev: mei watchdog client device | ||
| 91 | * @state: watchdog internal state | ||
| 92 | * @resp_required: ping required response | ||
| 93 | * @response: ping response completion | ||
| 94 | * @unregister: unregister worker | ||
| 95 | * @reg_lock: watchdog device registration lock | ||
| 96 | * @timeout: watchdog current timeout | ||
| 97 | * | ||
| 98 | * @dbgfs_dir: debugfs dir entry | ||
| 99 | */ | ||
| 100 | struct mei_wdt { | ||
| 101 | struct watchdog_device wdd; | ||
| 102 | |||
| 103 | struct mei_cl_device *cldev; | ||
| 104 | enum mei_wdt_state state; | ||
| 105 | bool resp_required; | ||
| 106 | struct completion response; | ||
| 107 | struct work_struct unregister; | ||
| 108 | struct mutex reg_lock; | ||
| 109 | u16 timeout; | ||
| 110 | |||
| 111 | #if IS_ENABLED(CONFIG_DEBUG_FS) | ||
| 112 | struct dentry *dbgfs_dir; | ||
| 113 | #endif /* CONFIG_DEBUG_FS */ | ||
| 114 | }; | ||
| 115 | |||
| 116 | /* | ||
| 117 | * struct mei_mc_hdr - Management Control Command Header | ||
| 118 | * | ||
| 119 | * @command: Management Control (0x2) | ||
| 120 | * @bytecount: Number of bytes in the message beyond this byte | ||
| 121 | * @subcommand: Management Control Subcommand | ||
| 122 | * @versionnumber: Management Control Version (0x10) | ||
| 123 | */ | ||
| 124 | struct mei_mc_hdr { | ||
| 125 | u8 command; | ||
| 126 | u8 bytecount; | ||
| 127 | u8 subcommand; | ||
| 128 | u8 versionnumber; | ||
| 129 | }; | ||
| 130 | |||
| 131 | /** | ||
| 132 | * struct mei_wdt_start_request watchdog start/ping | ||
| 133 | * | ||
| 134 | * @hdr: Management Control Command Header | ||
| 135 | * @timeout: timeout value | ||
| 136 | * @reserved: reserved (legacy) | ||
| 137 | */ | ||
| 138 | struct mei_wdt_start_request { | ||
| 139 | struct mei_mc_hdr hdr; | ||
| 140 | u16 timeout; | ||
| 141 | u8 reserved[17]; | ||
| 142 | } __packed; | ||
| 143 | |||
| 144 | /** | ||
| 145 | * struct mei_wdt_start_response watchdog start/ping response | ||
| 146 | * | ||
| 147 | * @hdr: Management Control Command Header | ||
| 148 | * @status: operation status | ||
| 149 | * @wdstate: watchdog status bit mask | ||
| 150 | */ | ||
| 151 | struct mei_wdt_start_response { | ||
| 152 | struct mei_mc_hdr hdr; | ||
| 153 | u8 status; | ||
| 154 | u8 wdstate; | ||
| 155 | } __packed; | ||
| 156 | |||
| 157 | /** | ||
| 158 | * struct mei_wdt_stop_request - watchdog stop | ||
| 159 | * | ||
| 160 | * @hdr: Management Control Command Header | ||
| 161 | */ | ||
| 162 | struct mei_wdt_stop_request { | ||
| 163 | struct mei_mc_hdr hdr; | ||
| 164 | } __packed; | ||
| 165 | |||
| 166 | /** | ||
| 167 | * mei_wdt_ping - send wd start/ping command | ||
| 168 | * | ||
| 169 | * @wdt: mei watchdog device | ||
| 170 | * | ||
| 171 | * Return: 0 on success, | ||
| 172 | * negative errno code on failure | ||
| 173 | */ | ||
| 174 | static int mei_wdt_ping(struct mei_wdt *wdt) | ||
| 175 | { | ||
| 176 | struct mei_wdt_start_request req; | ||
| 177 | const size_t req_len = sizeof(req); | ||
| 178 | int ret; | ||
| 179 | |||
| 180 | memset(&req, 0, req_len); | ||
| 181 | req.hdr.command = MEI_MANAGEMENT_CONTROL; | ||
| 182 | req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand); | ||
| 183 | req.hdr.subcommand = MEI_MC_START_WD_TIMER_REQ; | ||
| 184 | req.hdr.versionnumber = MEI_MC_VERSION_NUMBER; | ||
| 185 | req.timeout = wdt->timeout; | ||
| 186 | |||
| 187 | ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len); | ||
| 188 | if (ret < 0) | ||
| 189 | return ret; | ||
| 190 | |||
| 191 | return 0; | ||
| 192 | } | ||
| 193 | |||
| 194 | /** | ||
| 195 | * mei_wdt_stop - send wd stop command | ||
| 196 | * | ||
| 197 | * @wdt: mei watchdog device | ||
| 198 | * | ||
| 199 | * Return: 0 on success, | ||
| 200 | * negative errno code on failure | ||
| 201 | */ | ||
| 202 | static int mei_wdt_stop(struct mei_wdt *wdt) | ||
| 203 | { | ||
| 204 | struct mei_wdt_stop_request req; | ||
| 205 | const size_t req_len = sizeof(req); | ||
| 206 | int ret; | ||
| 207 | |||
| 208 | memset(&req, 0, req_len); | ||
| 209 | req.hdr.command = MEI_MANAGEMENT_CONTROL; | ||
| 210 | req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand); | ||
| 211 | req.hdr.subcommand = MEI_MC_STOP_WD_TIMER_REQ; | ||
| 212 | req.hdr.versionnumber = MEI_MC_VERSION_NUMBER; | ||
| 213 | |||
| 214 | ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len); | ||
| 215 | if (ret < 0) | ||
| 216 | return ret; | ||
| 217 | |||
| 218 | return 0; | ||
| 219 | } | ||
| 220 | |||
| 221 | /** | ||
| 222 | * mei_wdt_ops_start - wd start command from the watchdog core. | ||
| 223 | * | ||
| 224 | * @wdd: watchdog device | ||
| 225 | * | ||
| 226 | * Return: 0 on success or -ENODEV; | ||
| 227 | */ | ||
| 228 | static int mei_wdt_ops_start(struct watchdog_device *wdd) | ||
| 229 | { | ||
| 230 | struct mei_wdt *wdt = watchdog_get_drvdata(wdd); | ||
| 231 | |||
| 232 | wdt->state = MEI_WDT_START; | ||
| 233 | wdd->timeout = wdt->timeout; | ||
| 234 | return 0; | ||
| 235 | } | ||
| 236 | |||
| 237 | /** | ||
| 238 | * mei_wdt_ops_stop - wd stop command from the watchdog core. | ||
| 239 | * | ||
| 240 | * @wdd: watchdog device | ||
| 241 | * | ||
| 242 | * Return: 0 if success, negative errno code for failure | ||
| 243 | */ | ||
| 244 | static int mei_wdt_ops_stop(struct watchdog_device *wdd) | ||
| 245 | { | ||
| 246 | struct mei_wdt *wdt = watchdog_get_drvdata(wdd); | ||
| 247 | int ret; | ||
| 248 | |||
| 249 | if (wdt->state != MEI_WDT_RUNNING) | ||
| 250 | return 0; | ||
| 251 | |||
| 252 | wdt->state = MEI_WDT_STOPPING; | ||
| 253 | |||
| 254 | ret = mei_wdt_stop(wdt); | ||
| 255 | if (ret) | ||
| 256 | return ret; | ||
| 257 | |||
| 258 | wdt->state = MEI_WDT_IDLE; | ||
| 259 | |||
| 260 | return 0; | ||
| 261 | } | ||
| 262 | |||
| 263 | /** | ||
| 264 | * mei_wdt_ops_ping - wd ping command from the watchdog core. | ||
| 265 | * | ||
| 266 | * @wdd: watchdog device | ||
| 267 | * | ||
| 268 | * Return: 0 if success, negative errno code on failure | ||
| 269 | */ | ||
| 270 | static int mei_wdt_ops_ping(struct watchdog_device *wdd) | ||
| 271 | { | ||
| 272 | struct mei_wdt *wdt = watchdog_get_drvdata(wdd); | ||
| 273 | int ret; | ||
| 274 | |||
| 275 | if (wdt->state != MEI_WDT_START && wdt->state != MEI_WDT_RUNNING) | ||
| 276 | return 0; | ||
| 277 | |||
| 278 | if (wdt->resp_required) | ||
| 279 | init_completion(&wdt->response); | ||
| 280 | |||
| 281 | wdt->state = MEI_WDT_RUNNING; | ||
| 282 | ret = mei_wdt_ping(wdt); | ||
| 283 | if (ret) | ||
| 284 | return ret; | ||
| 285 | |||
| 286 | if (wdt->resp_required) | ||
| 287 | ret = wait_for_completion_killable(&wdt->response); | ||
| 288 | |||
| 289 | return ret; | ||
| 290 | } | ||
| 291 | |||
| 292 | /** | ||
| 293 | * mei_wdt_ops_set_timeout - wd set timeout command from the watchdog core. | ||
| 294 | * | ||
| 295 | * @wdd: watchdog device | ||
| 296 | * @timeout: timeout value to set | ||
| 297 | * | ||
| 298 | * Return: 0 if success, negative errno code for failure | ||
| 299 | */ | ||
| 300 | static int mei_wdt_ops_set_timeout(struct watchdog_device *wdd, | ||
| 301 | unsigned int timeout) | ||
| 302 | { | ||
| 303 | |||
| 304 | struct mei_wdt *wdt = watchdog_get_drvdata(wdd); | ||
| 305 | |||
| 306 | /* valid value is already checked by the caller */ | ||
| 307 | wdt->timeout = timeout; | ||
| 308 | wdd->timeout = timeout; | ||
| 309 | |||
| 310 | return 0; | ||
| 311 | } | ||
| 312 | |||
| 313 | static const struct watchdog_ops wd_ops = { | ||
| 314 | .owner = THIS_MODULE, | ||
| 315 | .start = mei_wdt_ops_start, | ||
| 316 | .stop = mei_wdt_ops_stop, | ||
| 317 | .ping = mei_wdt_ops_ping, | ||
| 318 | .set_timeout = mei_wdt_ops_set_timeout, | ||
| 319 | }; | ||
| 320 | |||
| 321 | /* not const as the firmware_version field need to be retrieved */ | ||
| 322 | static struct watchdog_info wd_info = { | ||
| 323 | .identity = INTEL_AMT_WATCHDOG_ID, | ||
| 324 | .options = WDIOF_KEEPALIVEPING | | ||
| 325 | WDIOF_SETTIMEOUT | | ||
| 326 | WDIOF_ALARMONLY, | ||
| 327 | }; | ||
| 328 | |||
| 329 | /** | ||
| 330 | * __mei_wdt_is_registered - check if wdt is registered | ||
| 331 | * | ||
| 332 | * @wdt: mei watchdog device | ||
| 333 | * | ||
| 334 | * Return: true if the wdt is registered with the watchdog subsystem | ||
| 335 | * Locking: should be called under wdt->reg_lock | ||
| 336 | */ | ||
| 337 | static inline bool __mei_wdt_is_registered(struct mei_wdt *wdt) | ||
| 338 | { | ||
| 339 | return !!watchdog_get_drvdata(&wdt->wdd); | ||
| 340 | } | ||
| 341 | |||
| 342 | /** | ||
| 343 | * mei_wdt_unregister - unregister from the watchdog subsystem | ||
| 344 | * | ||
| 345 | * @wdt: mei watchdog device | ||
| 346 | */ | ||
| 347 | static void mei_wdt_unregister(struct mei_wdt *wdt) | ||
| 348 | { | ||
| 349 | mutex_lock(&wdt->reg_lock); | ||
| 350 | |||
| 351 | if (__mei_wdt_is_registered(wdt)) { | ||
| 352 | watchdog_unregister_device(&wdt->wdd); | ||
| 353 | watchdog_set_drvdata(&wdt->wdd, NULL); | ||
| 354 | memset(&wdt->wdd, 0, sizeof(wdt->wdd)); | ||
| 355 | } | ||
| 356 | |||
| 357 | mutex_unlock(&wdt->reg_lock); | ||
| 358 | } | ||
| 359 | |||
| 360 | /** | ||
| 361 | * mei_wdt_register - register with the watchdog subsystem | ||
| 362 | * | ||
| 363 | * @wdt: mei watchdog device | ||
| 364 | * | ||
| 365 | * Return: 0 if success, negative errno code for failure | ||
| 366 | */ | ||
| 367 | static int mei_wdt_register(struct mei_wdt *wdt) | ||
| 368 | { | ||
| 369 | struct device *dev; | ||
| 370 | int ret; | ||
| 371 | |||
| 372 | if (!wdt || !wdt->cldev) | ||
| 373 | return -EINVAL; | ||
| 374 | |||
| 375 | dev = &wdt->cldev->dev; | ||
| 376 | |||
| 377 | mutex_lock(&wdt->reg_lock); | ||
| 378 | |||
| 379 | if (__mei_wdt_is_registered(wdt)) { | ||
| 380 | ret = 0; | ||
| 381 | goto out; | ||
| 382 | } | ||
| 383 | |||
| 384 | wdt->wdd.info = &wd_info; | ||
| 385 | wdt->wdd.ops = &wd_ops; | ||
| 386 | wdt->wdd.parent = dev; | ||
| 387 | wdt->wdd.timeout = MEI_WDT_DEFAULT_TIMEOUT; | ||
| 388 | wdt->wdd.min_timeout = MEI_WDT_MIN_TIMEOUT; | ||
| 389 | wdt->wdd.max_timeout = MEI_WDT_MAX_TIMEOUT; | ||
| 390 | |||
| 391 | watchdog_set_drvdata(&wdt->wdd, wdt); | ||
| 392 | ret = watchdog_register_device(&wdt->wdd); | ||
| 393 | if (ret) { | ||
| 394 | dev_err(dev, "unable to register watchdog device = %d.\n", ret); | ||
| 395 | watchdog_set_drvdata(&wdt->wdd, NULL); | ||
| 396 | } | ||
| 397 | |||
| 398 | wdt->state = MEI_WDT_IDLE; | ||
| 399 | |||
| 400 | out: | ||
| 401 | mutex_unlock(&wdt->reg_lock); | ||
| 402 | return ret; | ||
| 403 | } | ||
| 404 | |||
| 405 | static void mei_wdt_unregister_work(struct work_struct *work) | ||
| 406 | { | ||
| 407 | struct mei_wdt *wdt = container_of(work, struct mei_wdt, unregister); | ||
| 408 | |||
| 409 | mei_wdt_unregister(wdt); | ||
| 410 | } | ||
| 411 | |||
| 412 | /** | ||
| 413 | * mei_wdt_event_rx - callback for data receive | ||
| 414 | * | ||
| 415 | * @cldev: bus device | ||
| 416 | */ | ||
| 417 | static void mei_wdt_event_rx(struct mei_cl_device *cldev) | ||
| 418 | { | ||
| 419 | struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); | ||
| 420 | struct mei_wdt_start_response res; | ||
| 421 | const size_t res_len = sizeof(res); | ||
| 422 | int ret; | ||
| 423 | |||
| 424 | ret = mei_cldev_recv(wdt->cldev, (u8 *)&res, res_len); | ||
| 425 | if (ret < 0) { | ||
| 426 | dev_err(&cldev->dev, "failure in recv %d\n", ret); | ||
| 427 | return; | ||
| 428 | } | ||
| 429 | |||
| 430 | /* Empty response can be sent on stop */ | ||
| 431 | if (ret == 0) | ||
| 432 | return; | ||
| 433 | |||
| 434 | if (ret < sizeof(struct mei_mc_hdr)) { | ||
| 435 | dev_err(&cldev->dev, "recv small data %d\n", ret); | ||
| 436 | return; | ||
| 437 | } | ||
| 438 | |||
| 439 | if (res.hdr.command != MEI_MANAGEMENT_CONTROL || | ||
| 440 | res.hdr.versionnumber != MEI_MC_VERSION_NUMBER) { | ||
| 441 | dev_err(&cldev->dev, "wrong command received\n"); | ||
| 442 | return; | ||
| 443 | } | ||
| 444 | |||
| 445 | if (res.hdr.subcommand != MEI_MC_START_WD_TIMER_RES) { | ||
| 446 | dev_warn(&cldev->dev, "unsupported command %d :%s[%d]\n", | ||
| 447 | res.hdr.subcommand, | ||
| 448 | mei_wdt_state_str(wdt->state), | ||
| 449 | wdt->state); | ||
| 450 | return; | ||
| 451 | } | ||
| 452 | |||
| 453 | /* Run the unregistration in a worker as this can be | ||
| 454 | * run only after ping completion, otherwise the flow will | ||
| 455 | * deadlock on watchdog core mutex. | ||
| 456 | */ | ||
| 457 | if (wdt->state == MEI_WDT_RUNNING) { | ||
| 458 | if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) { | ||
| 459 | wdt->state = MEI_WDT_NOT_REQUIRED; | ||
| 460 | schedule_work(&wdt->unregister); | ||
| 461 | } | ||
| 462 | goto out; | ||
| 463 | } | ||
| 464 | |||
| 465 | if (wdt->state == MEI_WDT_PROBE) { | ||
| 466 | if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) { | ||
| 467 | wdt->state = MEI_WDT_NOT_REQUIRED; | ||
| 468 | } else { | ||
| 469 | /* stop the watchdog and register watchdog device */ | ||
| 470 | mei_wdt_stop(wdt); | ||
| 471 | mei_wdt_register(wdt); | ||
| 472 | } | ||
| 473 | return; | ||
| 474 | } | ||
| 475 | |||
| 476 | dev_warn(&cldev->dev, "not in correct state %s[%d]\n", | ||
| 477 | mei_wdt_state_str(wdt->state), wdt->state); | ||
| 478 | |||
| 479 | out: | ||
| 480 | if (!completion_done(&wdt->response)) | ||
| 481 | complete(&wdt->response); | ||
| 482 | } | ||
| 483 | |||
| 484 | /* | ||
| 485 | * mei_wdt_notify_event - callback for event notification | ||
| 486 | * | ||
| 487 | * @cldev: bus device | ||
| 488 | */ | ||
| 489 | static void mei_wdt_notify_event(struct mei_cl_device *cldev) | ||
| 490 | { | ||
| 491 | struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); | ||
| 492 | |||
| 493 | if (wdt->state != MEI_WDT_NOT_REQUIRED) | ||
| 494 | return; | ||
| 495 | |||
| 496 | mei_wdt_register(wdt); | ||
| 497 | } | ||
| 498 | |||
| 499 | /** | ||
| 500 | * mei_wdt_event - callback for event receive | ||
| 501 | * | ||
| 502 | * @cldev: bus device | ||
| 503 | * @events: event mask | ||
| 504 | * @context: callback context | ||
| 505 | */ | ||
| 506 | static void mei_wdt_event(struct mei_cl_device *cldev, | ||
| 507 | u32 events, void *context) | ||
| 508 | { | ||
| 509 | if (events & BIT(MEI_CL_EVENT_RX)) | ||
| 510 | mei_wdt_event_rx(cldev); | ||
| 511 | |||
| 512 | if (events & BIT(MEI_CL_EVENT_NOTIF)) | ||
| 513 | mei_wdt_notify_event(cldev); | ||
| 514 | } | ||
| 515 | |||
| 516 | #if IS_ENABLED(CONFIG_DEBUG_FS) | ||
| 517 | |||
| 518 | static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf, | ||
| 519 | size_t cnt, loff_t *ppos) | ||
| 520 | { | ||
| 521 | struct mei_wdt *wdt = file->private_data; | ||
| 522 | const size_t bufsz = 32; | ||
| 523 | char buf[32]; | ||
| 524 | ssize_t pos; | ||
| 525 | |||
| 526 | mutex_lock(&wdt->reg_lock); | ||
| 527 | pos = scnprintf(buf, bufsz, "%s\n", | ||
| 528 | __mei_wdt_is_registered(wdt) ? "activated" : "deactivated"); | ||
| 529 | mutex_unlock(&wdt->reg_lock); | ||
| 530 | |||
| 531 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); | ||
| 532 | } | ||
| 533 | |||
| 534 | static const struct file_operations dbgfs_fops_activation = { | ||
| 535 | .open = simple_open, | ||
| 536 | .read = mei_dbgfs_read_activation, | ||
| 537 | .llseek = generic_file_llseek, | ||
| 538 | }; | ||
| 539 | |||
| 540 | static ssize_t mei_dbgfs_read_state(struct file *file, char __user *ubuf, | ||
| 541 | size_t cnt, loff_t *ppos) | ||
| 542 | { | ||
| 543 | struct mei_wdt *wdt = file->private_data; | ||
| 544 | const size_t bufsz = 32; | ||
| 545 | char buf[bufsz]; | ||
| 546 | ssize_t pos; | ||
| 547 | |||
| 548 | pos = scnprintf(buf, bufsz, "state: %s\n", | ||
| 549 | mei_wdt_state_str(wdt->state)); | ||
| 550 | |||
| 551 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); | ||
| 552 | } | ||
| 553 | |||
| 554 | static const struct file_operations dbgfs_fops_state = { | ||
| 555 | .open = simple_open, | ||
| 556 | .read = mei_dbgfs_read_state, | ||
| 557 | .llseek = generic_file_llseek, | ||
| 558 | }; | ||
| 559 | |||
| 560 | static void dbgfs_unregister(struct mei_wdt *wdt) | ||
| 561 | { | ||
| 562 | debugfs_remove_recursive(wdt->dbgfs_dir); | ||
| 563 | wdt->dbgfs_dir = NULL; | ||
| 564 | } | ||
| 565 | |||
| 566 | static int dbgfs_register(struct mei_wdt *wdt) | ||
| 567 | { | ||
| 568 | struct dentry *dir, *f; | ||
| 569 | |||
| 570 | dir = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
| 571 | if (!dir) | ||
| 572 | return -ENOMEM; | ||
| 573 | |||
| 574 | wdt->dbgfs_dir = dir; | ||
| 575 | f = debugfs_create_file("state", S_IRUSR, dir, wdt, &dbgfs_fops_state); | ||
| 576 | if (!f) | ||
| 577 | goto err; | ||
| 578 | |||
| 579 | f = debugfs_create_file("activation", S_IRUSR, | ||
| 580 | dir, wdt, &dbgfs_fops_activation); | ||
| 581 | if (!f) | ||
| 582 | goto err; | ||
| 583 | |||
| 584 | return 0; | ||
| 585 | err: | ||
| 586 | dbgfs_unregister(wdt); | ||
| 587 | return -ENODEV; | ||
| 588 | } | ||
| 589 | |||
| 590 | #else | ||
| 591 | |||
| 592 | static inline void dbgfs_unregister(struct mei_wdt *wdt) {} | ||
| 593 | |||
| 594 | static inline int dbgfs_register(struct mei_wdt *wdt) | ||
| 595 | { | ||
| 596 | return 0; | ||
| 597 | } | ||
| 598 | #endif /* CONFIG_DEBUG_FS */ | ||
| 599 | |||
| 600 | static int mei_wdt_probe(struct mei_cl_device *cldev, | ||
| 601 | const struct mei_cl_device_id *id) | ||
| 602 | { | ||
| 603 | struct mei_wdt *wdt; | ||
| 604 | int ret; | ||
| 605 | |||
| 606 | wdt = kzalloc(sizeof(struct mei_wdt), GFP_KERNEL); | ||
| 607 | if (!wdt) | ||
| 608 | return -ENOMEM; | ||
| 609 | |||
| 610 | wdt->timeout = MEI_WDT_DEFAULT_TIMEOUT; | ||
| 611 | wdt->state = MEI_WDT_PROBE; | ||
| 612 | wdt->cldev = cldev; | ||
| 613 | wdt->resp_required = mei_cldev_ver(cldev) > 0x1; | ||
| 614 | mutex_init(&wdt->reg_lock); | ||
| 615 | init_completion(&wdt->response); | ||
| 616 | INIT_WORK(&wdt->unregister, mei_wdt_unregister_work); | ||
| 617 | |||
| 618 | mei_cldev_set_drvdata(cldev, wdt); | ||
| 619 | |||
| 620 | ret = mei_cldev_enable(cldev); | ||
| 621 | if (ret < 0) { | ||
| 622 | dev_err(&cldev->dev, "Could not enable cl device\n"); | ||
| 623 | goto err_out; | ||
| 624 | } | ||
| 625 | |||
| 626 | ret = mei_cldev_register_event_cb(wdt->cldev, | ||
| 627 | BIT(MEI_CL_EVENT_RX) | | ||
| 628 | BIT(MEI_CL_EVENT_NOTIF), | ||
| 629 | mei_wdt_event, NULL); | ||
| 630 | |||
| 631 | /* on legacy devices notification is not supported | ||
| 632 | * this doesn't fail the registration for RX event | ||
| 633 | */ | ||
| 634 | if (ret && ret != -EOPNOTSUPP) { | ||
| 635 | dev_err(&cldev->dev, "Could not register event ret=%d\n", ret); | ||
| 636 | goto err_disable; | ||
| 637 | } | ||
| 638 | |||
| 639 | wd_info.firmware_version = mei_cldev_ver(cldev); | ||
| 640 | |||
| 641 | if (wdt->resp_required) | ||
| 642 | ret = mei_wdt_ping(wdt); | ||
| 643 | else | ||
| 644 | ret = mei_wdt_register(wdt); | ||
| 645 | |||
| 646 | if (ret) | ||
| 647 | goto err_disable; | ||
| 648 | |||
| 649 | if (dbgfs_register(wdt)) | ||
| 650 | dev_warn(&cldev->dev, "cannot register debugfs\n"); | ||
| 651 | |||
| 652 | return 0; | ||
| 653 | |||
| 654 | err_disable: | ||
| 655 | mei_cldev_disable(cldev); | ||
| 656 | |||
| 657 | err_out: | ||
| 658 | kfree(wdt); | ||
| 659 | |||
| 660 | return ret; | ||
| 661 | } | ||
| 662 | |||
| 663 | static int mei_wdt_remove(struct mei_cl_device *cldev) | ||
| 664 | { | ||
| 665 | struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); | ||
| 666 | |||
| 667 | /* Free the caller in case of fw initiated or unexpected reset */ | ||
| 668 | if (!completion_done(&wdt->response)) | ||
| 669 | complete(&wdt->response); | ||
| 670 | |||
| 671 | cancel_work_sync(&wdt->unregister); | ||
| 672 | |||
| 673 | mei_wdt_unregister(wdt); | ||
| 674 | |||
| 675 | mei_cldev_disable(cldev); | ||
| 676 | |||
| 677 | dbgfs_unregister(wdt); | ||
| 678 | |||
| 679 | kfree(wdt); | ||
| 680 | |||
| 681 | return 0; | ||
| 682 | } | ||
| 683 | |||
| 684 | #define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \ | ||
| 685 | 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB) | ||
| 686 | |||
| 687 | static struct mei_cl_device_id mei_wdt_tbl[] = { | ||
| 688 | { .uuid = MEI_UUID_WD, .version = MEI_CL_VERSION_ANY }, | ||
| 689 | /* required last entry */ | ||
| 690 | { } | ||
| 691 | }; | ||
| 692 | MODULE_DEVICE_TABLE(mei, mei_wdt_tbl); | ||
| 693 | |||
| 694 | static struct mei_cl_driver mei_wdt_driver = { | ||
| 695 | .id_table = mei_wdt_tbl, | ||
| 696 | .name = KBUILD_MODNAME, | ||
| 697 | |||
| 698 | .probe = mei_wdt_probe, | ||
| 699 | .remove = mei_wdt_remove, | ||
| 700 | }; | ||
| 701 | |||
| 702 | static int __init mei_wdt_init(void) | ||
| 703 | { | ||
| 704 | int ret; | ||
| 705 | |||
| 706 | ret = mei_cldev_driver_register(&mei_wdt_driver); | ||
| 707 | if (ret) { | ||
| 708 | pr_err(KBUILD_MODNAME ": module registration failed\n"); | ||
| 709 | return ret; | ||
| 710 | } | ||
| 711 | return 0; | ||
| 712 | } | ||
| 713 | |||
| 714 | static void __exit mei_wdt_exit(void) | ||
| 715 | { | ||
| 716 | mei_cldev_driver_unregister(&mei_wdt_driver); | ||
| 717 | } | ||
| 718 | |||
| 719 | module_init(mei_wdt_init); | ||
| 720 | module_exit(mei_wdt_exit); | ||
| 721 | |||
| 722 | MODULE_AUTHOR("Intel Corporation"); | ||
| 723 | MODULE_LICENSE("GPL"); | ||
| 724 | MODULE_DESCRIPTION("Device driver for Intel MEI iAMT watchdog"); | ||
