diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-19 13:58:45 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-19 13:58:45 -0400 |
| commit | d9351ea14ddca708d3cb384f828af4bf82fcc772 (patch) | |
| tree | 90c5fe9067f1005ce512c63b2e664a670af72b4f | |
| parent | 39feaa3ff4453594297574e116a55bd6d5371f37 (diff) | |
| parent | fb4e0592654adb31bc6f3a738d6499b816a655d6 (diff) | |
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull IRQ chip updates from Ingo Molnar:
"A late irqchips update:
- New TI INTR/INTA set of drivers
- Rewrite of the stm32mp1-exti driver as a platform driver
- Update the IOMMU MSI mapping API to be RT friendly
- A number of cleanups and other low impact fixes"
* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
iommu/dma-iommu: Remove iommu_dma_map_msi_msg()
irqchip/gic-v3-mbi: Don't map the MSI page in mbi_compose_m{b, s}i_msg()
irqchip/ls-scfg-msi: Don't map the MSI page in ls_scfg_msi_compose_msg()
irqchip/gic-v3-its: Don't map the MSI page in its_irq_compose_msi_msg()
irqchip/gicv2m: Don't map the MSI page in gicv2m_compose_msi_msg()
iommu/dma-iommu: Split iommu_dma_map_msi_msg() in two parts
genirq/msi: Add a new field in msi_desc to store an IOMMU cookie
arm64: arch_k3: Enable interrupt controller drivers
irqchip/ti-sci-inta: Add msi domain support
soc: ti: Add MSI domain bus support for Interrupt Aggregator
irqchip/ti-sci-inta: Add support for Interrupt Aggregator driver
dt-bindings: irqchip: Introduce TISCI Interrupt Aggregator bindings
irqchip/ti-sci-intr: Add support for Interrupt Router driver
dt-bindings: irqchip: Introduce TISCI Interrupt router bindings
gpio: thunderx: Use the default parent apis for {request,release}_resources
genirq: Introduce irq_chip_{request,release}_resource_parent() apis
firmware: ti_sci: Add helper apis to manage resources
firmware: ti_sci: Add RM mapping table for am654
firmware: ti_sci: Add support for IRQ management
firmware: ti_sci: Add support for RM core ops
...
38 files changed, 2511 insertions, 229 deletions
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt b/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt index b56a02c10ae6..6f0cd31c1520 100644 --- a/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt +++ b/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt | |||
| @@ -24,7 +24,8 @@ relationship between the TI-SCI parent node to the child node. | |||
| 24 | 24 | ||
| 25 | Required properties: | 25 | Required properties: |
| 26 | ------------------- | 26 | ------------------- |
| 27 | - compatible: should be "ti,k2g-sci" | 27 | - compatible: should be "ti,k2g-sci" for TI 66AK2G SoC |
| 28 | should be "ti,am654-sci" for for TI AM654 SoC | ||
| 28 | - mbox-names: | 29 | - mbox-names: |
| 29 | "rx" - Mailbox corresponding to receive path | 30 | "rx" - Mailbox corresponding to receive path |
| 30 | "tx" - Mailbox corresponding to transmit path | 31 | "tx" - Mailbox corresponding to transmit path |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt new file mode 100644 index 000000000000..7841cb099e13 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt | |||
| @@ -0,0 +1,66 @@ | |||
| 1 | Texas Instruments K3 Interrupt Aggregator | ||
| 2 | ========================================= | ||
| 3 | |||
| 4 | The Interrupt Aggregator (INTA) provides a centralized machine | ||
| 5 | which handles the termination of system events to that they can | ||
| 6 | be coherently processed by the host(s) in the system. A maximum | ||
| 7 | of 64 events can be mapped to a single interrupt. | ||
| 8 | |||
| 9 | |||
| 10 | Interrupt Aggregator | ||
| 11 | +-----------------------------------------+ | ||
| 12 | | Intmap VINT | | ||
| 13 | | +--------------+ +------------+ | | ||
| 14 | m ------>| | vint | bit | | 0 |.....|63| vint0 | | ||
| 15 | . | +--------------+ +------------+ | +------+ | ||
| 16 | . | . . | | HOST | | ||
| 17 | Globalevents ------>| . . |------>| IRQ | | ||
| 18 | . | . . | | CTRL | | ||
| 19 | . | . . | +------+ | ||
| 20 | n ------>| +--------------+ +------------+ | | ||
| 21 | | | vint | bit | | 0 |.....|63| vintx | | ||
| 22 | | +--------------+ +------------+ | | ||
| 23 | | | | ||
| 24 | +-----------------------------------------+ | ||
| 25 | |||
| 26 | Configuration of these Intmap registers that maps global events to vint is done | ||
| 27 | by a system controller (like the Device Memory and Security Controller on K3 | ||
| 28 | AM654 SoC). Driver should request the system controller to get the range | ||
| 29 | of global events and vints assigned to the requesting host. Management | ||
| 30 | of these requested resources should be handled by driver and requests | ||
| 31 | system controller to map specific global event to vint, bit pair. | ||
| 32 | |||
| 33 | Communication between the host processor running an OS and the system | ||
| 34 | controller happens through a protocol called TI System Control Interface | ||
| 35 | (TISCI protocol). For more details refer: | ||
| 36 | Documentation/devicetree/bindings/arm/keystone/ti,sci.txt | ||
| 37 | |||
| 38 | TISCI Interrupt Aggregator Node: | ||
| 39 | ------------------------------- | ||
| 40 | - compatible: Must be "ti,sci-inta". | ||
| 41 | - reg: Should contain registers location and length. | ||
| 42 | - interrupt-controller: Identifies the node as an interrupt controller | ||
| 43 | - msi-controller: Identifies the node as an MSI controller. | ||
| 44 | - interrupt-parent: phandle of irq parent. | ||
| 45 | - ti,sci: Phandle to TI-SCI compatible System controller node. | ||
| 46 | - ti,sci-dev-id: TISCI device ID of the Interrupt Aggregator. | ||
| 47 | - ti,sci-rm-range-vint: Array of TISCI subtype ids representing vints(inta | ||
| 48 | outputs) range within this INTA, assigned to the | ||
| 49 | requesting host context. | ||
| 50 | - ti,sci-rm-range-global-event: Array of TISCI subtype ids representing the | ||
| 51 | global events range reaching this IA and are assigned | ||
| 52 | to the requesting host context. | ||
| 53 | |||
| 54 | Example: | ||
| 55 | -------- | ||
| 56 | main_udmass_inta: interrupt-controller@33d00000 { | ||
| 57 | compatible = "ti,sci-inta"; | ||
| 58 | reg = <0x0 0x33d00000 0x0 0x100000>; | ||
| 59 | interrupt-controller; | ||
| 60 | msi-controller; | ||
| 61 | interrupt-parent = <&main_navss_intr>; | ||
| 62 | ti,sci = <&dmsc>; | ||
| 63 | ti,sci-dev-id = <179>; | ||
| 64 | ti,sci-rm-range-vint = <0x0>; | ||
| 65 | ti,sci-rm-range-global-event = <0x1>; | ||
| 66 | }; | ||
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt new file mode 100644 index 000000000000..1a8718f8855d --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | Texas Instruments K3 Interrupt Router | ||
| 2 | ===================================== | ||
| 3 | |||
| 4 | The Interrupt Router (INTR) module provides a mechanism to mux M | ||
| 5 | interrupt inputs to N interrupt outputs, where all M inputs are selectable | ||
| 6 | to be driven per N output. An Interrupt Router can either handle edge triggered | ||
| 7 | or level triggered interrupts and that is fixed in hardware. | ||
| 8 | |||
| 9 | Interrupt Router | ||
| 10 | +----------------------+ | ||
| 11 | | Inputs Outputs | | ||
| 12 | +-------+ | +------+ +-----+ | | ||
| 13 | | GPIO |----------->| | irq0 | | 0 | | Host IRQ | ||
| 14 | +-------+ | +------+ +-----+ | controller | ||
| 15 | | . . | +-------+ | ||
| 16 | +-------+ | . . |----->| IRQ | | ||
| 17 | | INTA |----------->| . . | +-------+ | ||
| 18 | +-------+ | . +-----+ | | ||
| 19 | | +------+ | N | | | ||
| 20 | | | irqM | +-----+ | | ||
| 21 | | +------+ | | ||
| 22 | | | | ||
| 23 | +----------------------+ | ||
| 24 | |||
| 25 | There is one register per output (MUXCNTL_N) that controls the selection. | ||
| 26 | Configuration of these MUXCNTL_N registers is done by a system controller | ||
| 27 | (like the Device Memory and Security Controller on K3 AM654 SoC). System | ||
| 28 | controller will keep track of the used and unused registers within the Router. | ||
| 29 | Driver should request the system controller to get the range of GIC IRQs | ||
| 30 | assigned to the requesting hosts. It is the drivers responsibility to keep | ||
| 31 | track of Host IRQs. | ||
| 32 | |||
| 33 | Communication between the host processor running an OS and the system | ||
| 34 | controller happens through a protocol called TI System Control Interface | ||
| 35 | (TISCI protocol). For more details refer: | ||
| 36 | Documentation/devicetree/bindings/arm/keystone/ti,sci.txt | ||
| 37 | |||
| 38 | TISCI Interrupt Router Node: | ||
| 39 | ---------------------------- | ||
| 40 | Required Properties: | ||
| 41 | - compatible: Must be "ti,sci-intr". | ||
| 42 | - ti,intr-trigger-type: Should be one of the following: | ||
| 43 | 1: If intr supports edge triggered interrupts. | ||
| 44 | 4: If intr supports level triggered interrupts. | ||
| 45 | - interrupt-controller: Identifies the node as an interrupt controller | ||
| 46 | - #interrupt-cells: Specifies the number of cells needed to encode an | ||
| 47 | interrupt source. The value should be 2. | ||
| 48 | First cell should contain the TISCI device ID of source | ||
| 49 | Second cell should contain the interrupt source offset | ||
| 50 | within the device. | ||
| 51 | - ti,sci: Phandle to TI-SCI compatible System controller node. | ||
| 52 | - ti,sci-dst-id: TISCI device ID of the destination IRQ controller. | ||
| 53 | - ti,sci-rm-range-girq: Array of TISCI subtype ids representing the host irqs | ||
| 54 | assigned to this interrupt router. Each subtype id | ||
| 55 | corresponds to a range of host irqs. | ||
| 56 | |||
| 57 | For more details on TISCI IRQ resource management refer: | ||
| 58 | http://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html | ||
| 59 | |||
| 60 | Example: | ||
| 61 | -------- | ||
| 62 | The following example demonstrates both interrupt router node and the consumer | ||
| 63 | node(main gpio) on the AM654 SoC: | ||
| 64 | |||
| 65 | main_intr: interrupt-controller0 { | ||
| 66 | compatible = "ti,sci-intr"; | ||
| 67 | ti,intr-trigger-type = <1>; | ||
| 68 | interrupt-controller; | ||
| 69 | interrupt-parent = <&gic500>; | ||
| 70 | #interrupt-cells = <2>; | ||
| 71 | ti,sci = <&dmsc>; | ||
| 72 | ti,sci-dst-id = <56>; | ||
| 73 | ti,sci-rm-range-girq = <0x1>; | ||
| 74 | }; | ||
| 75 | |||
| 76 | main_gpio0: gpio@600000 { | ||
| 77 | ... | ||
| 78 | interrupt-parent = <&main_intr>; | ||
| 79 | interrupts = <57 256>, <57 257>, <57 258>, | ||
| 80 | <57 259>, <57 260>, <57 261>; | ||
| 81 | ... | ||
| 82 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 9cc6767e1b12..4f4dd9413da7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -15547,6 +15547,12 @@ F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt | |||
| 15547 | F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt | 15547 | F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt |
| 15548 | F: drivers/clk/keystone/sci-clk.c | 15548 | F: drivers/clk/keystone/sci-clk.c |
| 15549 | F: drivers/reset/reset-ti-sci.c | 15549 | F: drivers/reset/reset-ti-sci.c |
| 15550 | F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt | ||
| 15551 | F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt | ||
| 15552 | F: drivers/irqchip/irq-ti-sci-intr.c | ||
| 15553 | F: drivers/irqchip/irq-ti-sci-inta.c | ||
| 15554 | F: include/linux/soc/ti/ti_sci_inta_msi.h | ||
| 15555 | F: drivers/soc/ti/ti_sci_inta_msi.c | ||
| 15550 | 15556 | ||
| 15551 | Texas Instruments ASoC drivers | 15557 | Texas Instruments ASoC drivers |
| 15552 | M: Peter Ujfalusi <peter.ujfalusi@ti.com> | 15558 | M: Peter Ujfalusi <peter.ujfalusi@ti.com> |
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 1001410c4782..42eca656faa8 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms | |||
| @@ -87,6 +87,11 @@ config ARCH_EXYNOS | |||
| 87 | config ARCH_K3 | 87 | config ARCH_K3 |
| 88 | bool "Texas Instruments Inc. K3 multicore SoC architecture" | 88 | bool "Texas Instruments Inc. K3 multicore SoC architecture" |
| 89 | select PM_GENERIC_DOMAINS if PM | 89 | select PM_GENERIC_DOMAINS if PM |
| 90 | select MAILBOX | ||
| 91 | select TI_MESSAGE_MANAGER | ||
| 92 | select TI_SCI_PROTOCOL | ||
| 93 | select TI_SCI_INTR_IRQCHIP | ||
| 94 | select TI_SCI_INTA_IRQCHIP | ||
| 90 | help | 95 | help |
| 91 | This enables support for Texas Instruments' K3 multicore SoC | 96 | This enables support for Texas Instruments' K3 multicore SoC |
| 92 | architecture. | 97 | architecture. |
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 3fbbb61012c4..ef93406ace1b 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c | |||
| @@ -65,18 +65,36 @@ struct ti_sci_xfers_info { | |||
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | /** | 67 | /** |
| 68 | * struct ti_sci_rm_type_map - Structure representing TISCI Resource | ||
| 69 | * management representation of dev_ids. | ||
| 70 | * @dev_id: TISCI device ID | ||
| 71 | * @type: Corresponding id as identified by TISCI RM. | ||
| 72 | * | ||
| 73 | * Note: This is used only as a work around for using RM range apis | ||
| 74 | * for AM654 SoC. For future SoCs dev_id will be used as type | ||
| 75 | * for RM range APIs. In order to maintain ABI backward compatibility | ||
| 76 | * type is not being changed for AM654 SoC. | ||
| 77 | */ | ||
| 78 | struct ti_sci_rm_type_map { | ||
| 79 | u32 dev_id; | ||
| 80 | u16 type; | ||
| 81 | }; | ||
| 82 | |||
| 83 | /** | ||
| 68 | * struct ti_sci_desc - Description of SoC integration | 84 | * struct ti_sci_desc - Description of SoC integration |
| 69 | * @default_host_id: Host identifier representing the compute entity | 85 | * @default_host_id: Host identifier representing the compute entity |
| 70 | * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) | 86 | * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) |
| 71 | * @max_msgs: Maximum number of messages that can be pending | 87 | * @max_msgs: Maximum number of messages that can be pending |
| 72 | * simultaneously in the system | 88 | * simultaneously in the system |
| 73 | * @max_msg_size: Maximum size of data per message that can be handled. | 89 | * @max_msg_size: Maximum size of data per message that can be handled. |
| 90 | * @rm_type_map: RM resource type mapping structure. | ||
| 74 | */ | 91 | */ |
| 75 | struct ti_sci_desc { | 92 | struct ti_sci_desc { |
| 76 | u8 default_host_id; | 93 | u8 default_host_id; |
| 77 | int max_rx_timeout_ms; | 94 | int max_rx_timeout_ms; |
| 78 | int max_msgs; | 95 | int max_msgs; |
| 79 | int max_msg_size; | 96 | int max_msg_size; |
| 97 | struct ti_sci_rm_type_map *rm_type_map; | ||
| 80 | }; | 98 | }; |
| 81 | 99 | ||
| 82 | /** | 100 | /** |
| @@ -1600,6 +1618,392 @@ fail: | |||
| 1600 | return ret; | 1618 | return ret; |
| 1601 | } | 1619 | } |
| 1602 | 1620 | ||
| 1621 | static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id, | ||
| 1622 | u16 *type) | ||
| 1623 | { | ||
| 1624 | struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map; | ||
| 1625 | bool found = false; | ||
| 1626 | int i; | ||
| 1627 | |||
| 1628 | /* If map is not provided then assume dev_id is used as type */ | ||
| 1629 | if (!rm_type_map) { | ||
| 1630 | *type = dev_id; | ||
| 1631 | return 0; | ||
| 1632 | } | ||
| 1633 | |||
| 1634 | for (i = 0; rm_type_map[i].dev_id; i++) { | ||
| 1635 | if (rm_type_map[i].dev_id == dev_id) { | ||
| 1636 | *type = rm_type_map[i].type; | ||
| 1637 | found = true; | ||
| 1638 | break; | ||
| 1639 | } | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | if (!found) | ||
| 1643 | return -EINVAL; | ||
| 1644 | |||
| 1645 | return 0; | ||
| 1646 | } | ||
| 1647 | |||
| 1648 | /** | ||
| 1649 | * ti_sci_get_resource_range - Helper to get a range of resources assigned | ||
| 1650 | * to a host. Resource is uniquely identified by | ||
| 1651 | * type and subtype. | ||
| 1652 | * @handle: Pointer to TISCI handle. | ||
| 1653 | * @dev_id: TISCI device ID. | ||
| 1654 | * @subtype: Resource assignment subtype that is being requested | ||
| 1655 | * from the given device. | ||
| 1656 | * @s_host: Host processor ID to which the resources are allocated | ||
| 1657 | * @range_start: Start index of the resource range | ||
| 1658 | * @range_num: Number of resources in the range | ||
| 1659 | * | ||
| 1660 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1661 | */ | ||
| 1662 | static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, | ||
| 1663 | u32 dev_id, u8 subtype, u8 s_host, | ||
| 1664 | u16 *range_start, u16 *range_num) | ||
| 1665 | { | ||
| 1666 | struct ti_sci_msg_resp_get_resource_range *resp; | ||
| 1667 | struct ti_sci_msg_req_get_resource_range *req; | ||
| 1668 | struct ti_sci_xfer *xfer; | ||
| 1669 | struct ti_sci_info *info; | ||
| 1670 | struct device *dev; | ||
| 1671 | u16 type; | ||
| 1672 | int ret = 0; | ||
| 1673 | |||
| 1674 | if (IS_ERR(handle)) | ||
| 1675 | return PTR_ERR(handle); | ||
| 1676 | if (!handle) | ||
| 1677 | return -EINVAL; | ||
| 1678 | |||
| 1679 | info = handle_to_ti_sci_info(handle); | ||
| 1680 | dev = info->dev; | ||
| 1681 | |||
| 1682 | xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE, | ||
| 1683 | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, | ||
| 1684 | sizeof(*req), sizeof(*resp)); | ||
| 1685 | if (IS_ERR(xfer)) { | ||
| 1686 | ret = PTR_ERR(xfer); | ||
| 1687 | dev_err(dev, "Message alloc failed(%d)\n", ret); | ||
| 1688 | return ret; | ||
| 1689 | } | ||
| 1690 | |||
| 1691 | ret = ti_sci_get_resource_type(info, dev_id, &type); | ||
| 1692 | if (ret) { | ||
| 1693 | dev_err(dev, "rm type lookup failed for %u\n", dev_id); | ||
| 1694 | goto fail; | ||
| 1695 | } | ||
| 1696 | |||
| 1697 | req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf; | ||
| 1698 | req->secondary_host = s_host; | ||
| 1699 | req->type = type & MSG_RM_RESOURCE_TYPE_MASK; | ||
| 1700 | req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK; | ||
| 1701 | |||
| 1702 | ret = ti_sci_do_xfer(info, xfer); | ||
| 1703 | if (ret) { | ||
| 1704 | dev_err(dev, "Mbox send fail %d\n", ret); | ||
| 1705 | goto fail; | ||
| 1706 | } | ||
| 1707 | |||
| 1708 | resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf; | ||
| 1709 | |||
| 1710 | if (!ti_sci_is_response_ack(resp)) { | ||
| 1711 | ret = -ENODEV; | ||
| 1712 | } else if (!resp->range_start && !resp->range_num) { | ||
| 1713 | ret = -ENODEV; | ||
| 1714 | } else { | ||
| 1715 | *range_start = resp->range_start; | ||
| 1716 | *range_num = resp->range_num; | ||
| 1717 | }; | ||
| 1718 | |||
| 1719 | fail: | ||
| 1720 | ti_sci_put_one_xfer(&info->minfo, xfer); | ||
| 1721 | |||
| 1722 | return ret; | ||
| 1723 | } | ||
| 1724 | |||
| 1725 | /** | ||
| 1726 | * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host | ||
| 1727 | * that is same as ti sci interface host. | ||
| 1728 | * @handle: Pointer to TISCI handle. | ||
| 1729 | * @dev_id: TISCI device ID. | ||
| 1730 | * @subtype: Resource assignment subtype that is being requested | ||
| 1731 | * from the given device. | ||
| 1732 | * @range_start: Start index of the resource range | ||
| 1733 | * @range_num: Number of resources in the range | ||
| 1734 | * | ||
| 1735 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1736 | */ | ||
| 1737 | static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle, | ||
| 1738 | u32 dev_id, u8 subtype, | ||
| 1739 | u16 *range_start, u16 *range_num) | ||
| 1740 | { | ||
| 1741 | return ti_sci_get_resource_range(handle, dev_id, subtype, | ||
| 1742 | TI_SCI_IRQ_SECONDARY_HOST_INVALID, | ||
| 1743 | range_start, range_num); | ||
| 1744 | } | ||
| 1745 | |||
| 1746 | /** | ||
| 1747 | * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources | ||
| 1748 | * assigned to a specified host. | ||
| 1749 | * @handle: Pointer to TISCI handle. | ||
| 1750 | * @dev_id: TISCI device ID. | ||
| 1751 | * @subtype: Resource assignment subtype that is being requested | ||
| 1752 | * from the given device. | ||
| 1753 | * @s_host: Host processor ID to which the resources are allocated | ||
| 1754 | * @range_start: Start index of the resource range | ||
| 1755 | * @range_num: Number of resources in the range | ||
| 1756 | * | ||
| 1757 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1758 | */ | ||
| 1759 | static | ||
| 1760 | int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle, | ||
| 1761 | u32 dev_id, u8 subtype, u8 s_host, | ||
| 1762 | u16 *range_start, u16 *range_num) | ||
| 1763 | { | ||
| 1764 | return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, | ||
| 1765 | range_start, range_num); | ||
| 1766 | } | ||
| 1767 | |||
| 1768 | /** | ||
| 1769 | * ti_sci_manage_irq() - Helper api to configure/release the irq route between | ||
| 1770 | * the requested source and destination | ||
| 1771 | * @handle: Pointer to TISCI handle. | ||
| 1772 | * @valid_params: Bit fields defining the validity of certain params | ||
| 1773 | * @src_id: Device ID of the IRQ source | ||
| 1774 | * @src_index: IRQ source index within the source device | ||
| 1775 | * @dst_id: Device ID of the IRQ destination | ||
| 1776 | * @dst_host_irq: IRQ number of the destination device | ||
| 1777 | * @ia_id: Device ID of the IA, if the IRQ flows through this IA | ||
| 1778 | * @vint: Virtual interrupt to be used within the IA | ||
| 1779 | * @global_event: Global event number to be used for the requesting event | ||
| 1780 | * @vint_status_bit: Virtual interrupt status bit to be used for the event | ||
| 1781 | * @s_host: Secondary host ID to which the irq/event is being | ||
| 1782 | * requested for. | ||
| 1783 | * @type: Request type irq set or release. | ||
| 1784 | * | ||
| 1785 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1786 | */ | ||
| 1787 | static int ti_sci_manage_irq(const struct ti_sci_handle *handle, | ||
| 1788 | u32 valid_params, u16 src_id, u16 src_index, | ||
| 1789 | u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint, | ||
| 1790 | u16 global_event, u8 vint_status_bit, u8 s_host, | ||
| 1791 | u16 type) | ||
| 1792 | { | ||
| 1793 | struct ti_sci_msg_req_manage_irq *req; | ||
| 1794 | struct ti_sci_msg_hdr *resp; | ||
| 1795 | struct ti_sci_xfer *xfer; | ||
| 1796 | struct ti_sci_info *info; | ||
| 1797 | struct device *dev; | ||
| 1798 | int ret = 0; | ||
| 1799 | |||
| 1800 | if (IS_ERR(handle)) | ||
| 1801 | return PTR_ERR(handle); | ||
| 1802 | if (!handle) | ||
| 1803 | return -EINVAL; | ||
| 1804 | |||
| 1805 | info = handle_to_ti_sci_info(handle); | ||
| 1806 | dev = info->dev; | ||
| 1807 | |||
| 1808 | xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, | ||
| 1809 | sizeof(*req), sizeof(*resp)); | ||
| 1810 | if (IS_ERR(xfer)) { | ||
| 1811 | ret = PTR_ERR(xfer); | ||
| 1812 | dev_err(dev, "Message alloc failed(%d)\n", ret); | ||
| 1813 | return ret; | ||
| 1814 | } | ||
| 1815 | req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf; | ||
| 1816 | req->valid_params = valid_params; | ||
| 1817 | req->src_id = src_id; | ||
| 1818 | req->src_index = src_index; | ||
| 1819 | req->dst_id = dst_id; | ||
| 1820 | req->dst_host_irq = dst_host_irq; | ||
| 1821 | req->ia_id = ia_id; | ||
| 1822 | req->vint = vint; | ||
| 1823 | req->global_event = global_event; | ||
| 1824 | req->vint_status_bit = vint_status_bit; | ||
| 1825 | req->secondary_host = s_host; | ||
| 1826 | |||
| 1827 | ret = ti_sci_do_xfer(info, xfer); | ||
| 1828 | if (ret) { | ||
| 1829 | dev_err(dev, "Mbox send fail %d\n", ret); | ||
| 1830 | goto fail; | ||
| 1831 | } | ||
| 1832 | |||
| 1833 | resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; | ||
| 1834 | |||
| 1835 | ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; | ||
| 1836 | |||
| 1837 | fail: | ||
| 1838 | ti_sci_put_one_xfer(&info->minfo, xfer); | ||
| 1839 | |||
| 1840 | return ret; | ||
| 1841 | } | ||
| 1842 | |||
| 1843 | /** | ||
| 1844 | * ti_sci_set_irq() - Helper api to configure the irq route between the | ||
| 1845 | * requested source and destination | ||
| 1846 | * @handle: Pointer to TISCI handle. | ||
| 1847 | * @valid_params: Bit fields defining the validity of certain params | ||
| 1848 | * @src_id: Device ID of the IRQ source | ||
| 1849 | * @src_index: IRQ source index within the source device | ||
| 1850 | * @dst_id: Device ID of the IRQ destination | ||
| 1851 | * @dst_host_irq: IRQ number of the destination device | ||
| 1852 | * @ia_id: Device ID of the IA, if the IRQ flows through this IA | ||
| 1853 | * @vint: Virtual interrupt to be used within the IA | ||
| 1854 | * @global_event: Global event number to be used for the requesting event | ||
| 1855 | * @vint_status_bit: Virtual interrupt status bit to be used for the event | ||
| 1856 | * @s_host: Secondary host ID to which the irq/event is being | ||
| 1857 | * requested for. | ||
| 1858 | * | ||
| 1859 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1860 | */ | ||
| 1861 | static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params, | ||
| 1862 | u16 src_id, u16 src_index, u16 dst_id, | ||
| 1863 | u16 dst_host_irq, u16 ia_id, u16 vint, | ||
| 1864 | u16 global_event, u8 vint_status_bit, u8 s_host) | ||
| 1865 | { | ||
| 1866 | pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", | ||
| 1867 | __func__, valid_params, src_id, src_index, | ||
| 1868 | dst_id, dst_host_irq, ia_id, vint, global_event, | ||
| 1869 | vint_status_bit); | ||
| 1870 | |||
| 1871 | return ti_sci_manage_irq(handle, valid_params, src_id, src_index, | ||
| 1872 | dst_id, dst_host_irq, ia_id, vint, | ||
| 1873 | global_event, vint_status_bit, s_host, | ||
| 1874 | TI_SCI_MSG_SET_IRQ); | ||
| 1875 | } | ||
| 1876 | |||
| 1877 | /** | ||
| 1878 | * ti_sci_free_irq() - Helper api to free the irq route between the | ||
| 1879 | * requested source and destination | ||
| 1880 | * @handle: Pointer to TISCI handle. | ||
| 1881 | * @valid_params: Bit fields defining the validity of certain params | ||
| 1882 | * @src_id: Device ID of the IRQ source | ||
| 1883 | * @src_index: IRQ source index within the source device | ||
| 1884 | * @dst_id: Device ID of the IRQ destination | ||
| 1885 | * @dst_host_irq: IRQ number of the destination device | ||
| 1886 | * @ia_id: Device ID of the IA, if the IRQ flows through this IA | ||
| 1887 | * @vint: Virtual interrupt to be used within the IA | ||
| 1888 | * @global_event: Global event number to be used for the requesting event | ||
| 1889 | * @vint_status_bit: Virtual interrupt status bit to be used for the event | ||
| 1890 | * @s_host: Secondary host ID to which the irq/event is being | ||
| 1891 | * requested for. | ||
| 1892 | * | ||
| 1893 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1894 | */ | ||
| 1895 | static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params, | ||
| 1896 | u16 src_id, u16 src_index, u16 dst_id, | ||
| 1897 | u16 dst_host_irq, u16 ia_id, u16 vint, | ||
| 1898 | u16 global_event, u8 vint_status_bit, u8 s_host) | ||
| 1899 | { | ||
| 1900 | pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n", | ||
| 1901 | __func__, valid_params, src_id, src_index, | ||
| 1902 | dst_id, dst_host_irq, ia_id, vint, global_event, | ||
| 1903 | vint_status_bit); | ||
| 1904 | |||
| 1905 | return ti_sci_manage_irq(handle, valid_params, src_id, src_index, | ||
| 1906 | dst_id, dst_host_irq, ia_id, vint, | ||
| 1907 | global_event, vint_status_bit, s_host, | ||
| 1908 | TI_SCI_MSG_FREE_IRQ); | ||
| 1909 | } | ||
| 1910 | |||
| 1911 | /** | ||
| 1912 | * ti_sci_cmd_set_irq() - Configure a host irq route between the requested | ||
| 1913 | * source and destination. | ||
| 1914 | * @handle: Pointer to TISCI handle. | ||
| 1915 | * @src_id: Device ID of the IRQ source | ||
| 1916 | * @src_index: IRQ source index within the source device | ||
| 1917 | * @dst_id: Device ID of the IRQ destination | ||
| 1918 | * @dst_host_irq: IRQ number of the destination device | ||
| 1919 | * @vint_irq: Boolean specifying if this interrupt belongs to | ||
| 1920 | * Interrupt Aggregator. | ||
| 1921 | * | ||
| 1922 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1923 | */ | ||
| 1924 | static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id, | ||
| 1925 | u16 src_index, u16 dst_id, u16 dst_host_irq) | ||
| 1926 | { | ||
| 1927 | u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; | ||
| 1928 | |||
| 1929 | return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id, | ||
| 1930 | dst_host_irq, 0, 0, 0, 0, 0); | ||
| 1931 | } | ||
| 1932 | |||
| 1933 | /** | ||
| 1934 | * ti_sci_cmd_set_event_map() - Configure an event based irq route between the | ||
| 1935 | * requested source and Interrupt Aggregator. | ||
| 1936 | * @handle: Pointer to TISCI handle. | ||
| 1937 | * @src_id: Device ID of the IRQ source | ||
| 1938 | * @src_index: IRQ source index within the source device | ||
| 1939 | * @ia_id: Device ID of the IA, if the IRQ flows through this IA | ||
| 1940 | * @vint: Virtual interrupt to be used within the IA | ||
| 1941 | * @global_event: Global event number to be used for the requesting event | ||
| 1942 | * @vint_status_bit: Virtual interrupt status bit to be used for the event | ||
| 1943 | * | ||
| 1944 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1945 | */ | ||
| 1946 | static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle, | ||
| 1947 | u16 src_id, u16 src_index, u16 ia_id, | ||
| 1948 | u16 vint, u16 global_event, | ||
| 1949 | u8 vint_status_bit) | ||
| 1950 | { | ||
| 1951 | u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID | | ||
| 1952 | MSG_FLAG_GLB_EVNT_VALID | | ||
| 1953 | MSG_FLAG_VINT_STS_BIT_VALID; | ||
| 1954 | |||
| 1955 | return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0, | ||
| 1956 | ia_id, vint, global_event, vint_status_bit, 0); | ||
| 1957 | } | ||
| 1958 | |||
| 1959 | /** | ||
| 1960 | * ti_sci_cmd_free_irq() - Free a host irq route between the between the | ||
| 1961 | * requested source and destination. | ||
| 1962 | * @handle: Pointer to TISCI handle. | ||
| 1963 | * @src_id: Device ID of the IRQ source | ||
| 1964 | * @src_index: IRQ source index within the source device | ||
| 1965 | * @dst_id: Device ID of the IRQ destination | ||
| 1966 | * @dst_host_irq: IRQ number of the destination device | ||
| 1967 | * @vint_irq: Boolean specifying if this interrupt belongs to | ||
| 1968 | * Interrupt Aggregator. | ||
| 1969 | * | ||
| 1970 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1971 | */ | ||
| 1972 | static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id, | ||
| 1973 | u16 src_index, u16 dst_id, u16 dst_host_irq) | ||
| 1974 | { | ||
| 1975 | u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID; | ||
| 1976 | |||
| 1977 | return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id, | ||
| 1978 | dst_host_irq, 0, 0, 0, 0, 0); | ||
| 1979 | } | ||
| 1980 | |||
| 1981 | /** | ||
| 1982 | * ti_sci_cmd_free_event_map() - Free an event map between the requested source | ||
| 1983 | * and Interrupt Aggregator. | ||
| 1984 | * @handle: Pointer to TISCI handle. | ||
| 1985 | * @src_id: Device ID of the IRQ source | ||
| 1986 | * @src_index: IRQ source index within the source device | ||
| 1987 | * @ia_id: Device ID of the IA, if the IRQ flows through this IA | ||
| 1988 | * @vint: Virtual interrupt to be used within the IA | ||
| 1989 | * @global_event: Global event number to be used for the requesting event | ||
| 1990 | * @vint_status_bit: Virtual interrupt status bit to be used for the event | ||
| 1991 | * | ||
| 1992 | * Return: 0 if all went fine, else return appropriate error. | ||
| 1993 | */ | ||
| 1994 | static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle, | ||
| 1995 | u16 src_id, u16 src_index, u16 ia_id, | ||
| 1996 | u16 vint, u16 global_event, | ||
| 1997 | u8 vint_status_bit) | ||
| 1998 | { | ||
| 1999 | u32 valid_params = MSG_FLAG_IA_ID_VALID | | ||
| 2000 | MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID | | ||
| 2001 | MSG_FLAG_VINT_STS_BIT_VALID; | ||
| 2002 | |||
| 2003 | return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0, | ||
| 2004 | ia_id, vint, global_event, vint_status_bit, 0); | ||
| 2005 | } | ||
| 2006 | |||
| 1603 | /* | 2007 | /* |
| 1604 | * ti_sci_setup_ops() - Setup the operations structures | 2008 | * ti_sci_setup_ops() - Setup the operations structures |
| 1605 | * @info: pointer to TISCI pointer | 2009 | * @info: pointer to TISCI pointer |
| @@ -1610,6 +2014,8 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) | |||
| 1610 | struct ti_sci_core_ops *core_ops = &ops->core_ops; | 2014 | struct ti_sci_core_ops *core_ops = &ops->core_ops; |
| 1611 | struct ti_sci_dev_ops *dops = &ops->dev_ops; | 2015 | struct ti_sci_dev_ops *dops = &ops->dev_ops; |
| 1612 | struct ti_sci_clk_ops *cops = &ops->clk_ops; | 2016 | struct ti_sci_clk_ops *cops = &ops->clk_ops; |
| 2017 | struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; | ||
| 2018 | struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops; | ||
| 1613 | 2019 | ||
| 1614 | core_ops->reboot_device = ti_sci_cmd_core_reboot; | 2020 | core_ops->reboot_device = ti_sci_cmd_core_reboot; |
| 1615 | 2021 | ||
| @@ -1640,6 +2046,15 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) | |||
| 1640 | cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq; | 2046 | cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq; |
| 1641 | cops->set_freq = ti_sci_cmd_clk_set_freq; | 2047 | cops->set_freq = ti_sci_cmd_clk_set_freq; |
| 1642 | cops->get_freq = ti_sci_cmd_clk_get_freq; | 2048 | cops->get_freq = ti_sci_cmd_clk_get_freq; |
| 2049 | |||
| 2050 | rm_core_ops->get_range = ti_sci_cmd_get_resource_range; | ||
| 2051 | rm_core_ops->get_range_from_shost = | ||
| 2052 | ti_sci_cmd_get_resource_range_from_shost; | ||
| 2053 | |||
| 2054 | iops->set_irq = ti_sci_cmd_set_irq; | ||
| 2055 | iops->set_event_map = ti_sci_cmd_set_event_map; | ||
| 2056 | iops->free_irq = ti_sci_cmd_free_irq; | ||
| 2057 | iops->free_event_map = ti_sci_cmd_free_event_map; | ||
| 1643 | } | 2058 | } |
| 1644 | 2059 | ||
| 1645 | /** | 2060 | /** |
| @@ -1764,6 +2179,219 @@ const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) | |||
| 1764 | } | 2179 | } |
| 1765 | EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle); | 2180 | EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle); |
| 1766 | 2181 | ||
| 2182 | /** | ||
| 2183 | * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle | ||
| 2184 | * @np: device node | ||
| 2185 | * @property: property name containing phandle on TISCI node | ||
| 2186 | * | ||
| 2187 | * NOTE: The function does not track individual clients of the framework | ||
| 2188 | * and is expected to be maintained by caller of TI SCI protocol library. | ||
| 2189 | * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle | ||
| 2190 | * Return: pointer to handle if successful, else: | ||
| 2191 | * -EPROBE_DEFER if the instance is not ready | ||
| 2192 | * -ENODEV if the required node handler is missing | ||
| 2193 | * -EINVAL if invalid conditions are encountered. | ||
| 2194 | */ | ||
| 2195 | const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, | ||
| 2196 | const char *property) | ||
| 2197 | { | ||
| 2198 | struct ti_sci_handle *handle = NULL; | ||
| 2199 | struct device_node *ti_sci_np; | ||
| 2200 | struct ti_sci_info *info; | ||
| 2201 | struct list_head *p; | ||
| 2202 | |||
| 2203 | if (!np) { | ||
| 2204 | pr_err("I need a device pointer\n"); | ||
| 2205 | return ERR_PTR(-EINVAL); | ||
| 2206 | } | ||
| 2207 | |||
| 2208 | ti_sci_np = of_parse_phandle(np, property, 0); | ||
| 2209 | if (!ti_sci_np) | ||
| 2210 | return ERR_PTR(-ENODEV); | ||
| 2211 | |||
| 2212 | mutex_lock(&ti_sci_list_mutex); | ||
| 2213 | list_for_each(p, &ti_sci_list) { | ||
| 2214 | info = list_entry(p, struct ti_sci_info, node); | ||
| 2215 | if (ti_sci_np == info->dev->of_node) { | ||
| 2216 | handle = &info->handle; | ||
| 2217 | info->users++; | ||
| 2218 | break; | ||
| 2219 | } | ||
| 2220 | } | ||
| 2221 | mutex_unlock(&ti_sci_list_mutex); | ||
| 2222 | of_node_put(ti_sci_np); | ||
| 2223 | |||
| 2224 | if (!handle) | ||
| 2225 | return ERR_PTR(-EPROBE_DEFER); | ||
| 2226 | |||
| 2227 | return handle; | ||
| 2228 | } | ||
| 2229 | EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle); | ||
| 2230 | |||
| 2231 | /** | ||
| 2232 | * devm_ti_sci_get_by_phandle() - Managed get handle using phandle | ||
| 2233 | * @dev: Device pointer requesting TISCI handle | ||
| 2234 | * @property: property name containing phandle on TISCI node | ||
| 2235 | * | ||
| 2236 | * NOTE: This releases the handle once the device resources are | ||
| 2237 | * no longer needed. MUST NOT BE released with ti_sci_put_handle. | ||
| 2238 | * The function does not track individual clients of the framework | ||
| 2239 | * and is expected to be maintained by caller of TI SCI protocol library. | ||
| 2240 | * | ||
| 2241 | * Return: 0 if all went fine, else corresponding error. | ||
| 2242 | */ | ||
| 2243 | const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, | ||
| 2244 | const char *property) | ||
| 2245 | { | ||
| 2246 | const struct ti_sci_handle *handle; | ||
| 2247 | const struct ti_sci_handle **ptr; | ||
| 2248 | |||
| 2249 | ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); | ||
| 2250 | if (!ptr) | ||
| 2251 | return ERR_PTR(-ENOMEM); | ||
| 2252 | handle = ti_sci_get_by_phandle(dev_of_node(dev), property); | ||
| 2253 | |||
| 2254 | if (!IS_ERR(handle)) { | ||
| 2255 | *ptr = handle; | ||
| 2256 | devres_add(dev, ptr); | ||
| 2257 | } else { | ||
| 2258 | devres_free(ptr); | ||
| 2259 | } | ||
| 2260 | |||
| 2261 | return handle; | ||
| 2262 | } | ||
| 2263 | EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle); | ||
| 2264 | |||
| 2265 | /** | ||
| 2266 | * ti_sci_get_free_resource() - Get a free resource from TISCI resource. | ||
| 2267 | * @res: Pointer to the TISCI resource | ||
| 2268 | * | ||
| 2269 | * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL. | ||
| 2270 | */ | ||
| 2271 | u16 ti_sci_get_free_resource(struct ti_sci_resource *res) | ||
| 2272 | { | ||
| 2273 | unsigned long flags; | ||
| 2274 | u16 set, free_bit; | ||
| 2275 | |||
| 2276 | raw_spin_lock_irqsave(&res->lock, flags); | ||
| 2277 | for (set = 0; set < res->sets; set++) { | ||
| 2278 | free_bit = find_first_zero_bit(res->desc[set].res_map, | ||
| 2279 | res->desc[set].num); | ||
| 2280 | if (free_bit != res->desc[set].num) { | ||
| 2281 | set_bit(free_bit, res->desc[set].res_map); | ||
| 2282 | raw_spin_unlock_irqrestore(&res->lock, flags); | ||
| 2283 | return res->desc[set].start + free_bit; | ||
| 2284 | } | ||
| 2285 | } | ||
| 2286 | raw_spin_unlock_irqrestore(&res->lock, flags); | ||
| 2287 | |||
| 2288 | return TI_SCI_RESOURCE_NULL; | ||
| 2289 | } | ||
| 2290 | EXPORT_SYMBOL_GPL(ti_sci_get_free_resource); | ||
| 2291 | |||
| 2292 | /** | ||
| 2293 | * ti_sci_release_resource() - Release a resource from TISCI resource. | ||
| 2294 | * @res: Pointer to the TISCI resource | ||
| 2295 | * @id: Resource id to be released. | ||
| 2296 | */ | ||
| 2297 | void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) | ||
| 2298 | { | ||
| 2299 | unsigned long flags; | ||
| 2300 | u16 set; | ||
| 2301 | |||
| 2302 | raw_spin_lock_irqsave(&res->lock, flags); | ||
| 2303 | for (set = 0; set < res->sets; set++) { | ||
| 2304 | if (res->desc[set].start <= id && | ||
| 2305 | (res->desc[set].num + res->desc[set].start) > id) | ||
| 2306 | clear_bit(id - res->desc[set].start, | ||
| 2307 | res->desc[set].res_map); | ||
| 2308 | } | ||
| 2309 | raw_spin_unlock_irqrestore(&res->lock, flags); | ||
| 2310 | } | ||
| 2311 | EXPORT_SYMBOL_GPL(ti_sci_release_resource); | ||
| 2312 | |||
| 2313 | /** | ||
| 2314 | * ti_sci_get_num_resources() - Get the number of resources in TISCI resource | ||
| 2315 | * @res: Pointer to the TISCI resource | ||
| 2316 | * | ||
| 2317 | * Return: Total number of available resources. | ||
| 2318 | */ | ||
| 2319 | u32 ti_sci_get_num_resources(struct ti_sci_resource *res) | ||
| 2320 | { | ||
| 2321 | u32 set, count = 0; | ||
| 2322 | |||
| 2323 | for (set = 0; set < res->sets; set++) | ||
| 2324 | count += res->desc[set].num; | ||
| 2325 | |||
| 2326 | return count; | ||
| 2327 | } | ||
| 2328 | EXPORT_SYMBOL_GPL(ti_sci_get_num_resources); | ||
| 2329 | |||
| 2330 | /** | ||
| 2331 | * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device | ||
| 2332 | * @handle: TISCI handle | ||
| 2333 | * @dev: Device pointer to which the resource is assigned | ||
| 2334 | * @dev_id: TISCI device id to which the resource is assigned | ||
| 2335 | * @of_prop: property name by which the resource are represented | ||
| 2336 | * | ||
| 2337 | * Return: Pointer to ti_sci_resource if all went well else appropriate | ||
| 2338 | * error pointer. | ||
| 2339 | */ | ||
| 2340 | struct ti_sci_resource * | ||
| 2341 | devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, | ||
| 2342 | struct device *dev, u32 dev_id, char *of_prop) | ||
| 2343 | { | ||
| 2344 | struct ti_sci_resource *res; | ||
| 2345 | u32 resource_subtype; | ||
| 2346 | int i, ret; | ||
| 2347 | |||
| 2348 | res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); | ||
| 2349 | if (!res) | ||
| 2350 | return ERR_PTR(-ENOMEM); | ||
| 2351 | |||
| 2352 | res->sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop, | ||
| 2353 | sizeof(u32)); | ||
| 2354 | if (res->sets < 0) { | ||
| 2355 | dev_err(dev, "%s resource type ids not available\n", of_prop); | ||
| 2356 | return ERR_PTR(res->sets); | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), | ||
| 2360 | GFP_KERNEL); | ||
| 2361 | if (!res->desc) | ||
| 2362 | return ERR_PTR(-ENOMEM); | ||
| 2363 | |||
| 2364 | for (i = 0; i < res->sets; i++) { | ||
| 2365 | ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i, | ||
| 2366 | &resource_subtype); | ||
| 2367 | if (ret) | ||
| 2368 | return ERR_PTR(-EINVAL); | ||
| 2369 | |||
| 2370 | ret = handle->ops.rm_core_ops.get_range(handle, dev_id, | ||
| 2371 | resource_subtype, | ||
| 2372 | &res->desc[i].start, | ||
| 2373 | &res->desc[i].num); | ||
| 2374 | if (ret) { | ||
| 2375 | dev_err(dev, "dev = %d subtype %d not allocated for this host\n", | ||
| 2376 | dev_id, resource_subtype); | ||
| 2377 | return ERR_PTR(ret); | ||
| 2378 | } | ||
| 2379 | |||
| 2380 | dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n", | ||
| 2381 | dev_id, resource_subtype, res->desc[i].start, | ||
| 2382 | res->desc[i].num); | ||
| 2383 | |||
| 2384 | res->desc[i].res_map = | ||
| 2385 | devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) * | ||
| 2386 | sizeof(*res->desc[i].res_map), GFP_KERNEL); | ||
| 2387 | if (!res->desc[i].res_map) | ||
| 2388 | return ERR_PTR(-ENOMEM); | ||
| 2389 | } | ||
| 2390 | raw_spin_lock_init(&res->lock); | ||
| 2391 | |||
| 2392 | return res; | ||
| 2393 | } | ||
| 2394 | |||
| 1767 | static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, | 2395 | static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, |
| 1768 | void *cmd) | 2396 | void *cmd) |
| 1769 | { | 2397 | { |
| @@ -1784,10 +2412,33 @@ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { | |||
| 1784 | /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ | 2412 | /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ |
| 1785 | .max_msgs = 20, | 2413 | .max_msgs = 20, |
| 1786 | .max_msg_size = 64, | 2414 | .max_msg_size = 64, |
| 2415 | .rm_type_map = NULL, | ||
| 2416 | }; | ||
| 2417 | |||
| 2418 | static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = { | ||
| 2419 | {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */ | ||
| 2420 | {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */ | ||
| 2421 | {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */ | ||
| 2422 | {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */ | ||
| 2423 | {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */ | ||
| 2424 | {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */ | ||
| 2425 | {.dev_id = 0, .type = 0x000}, /* end of table */ | ||
| 2426 | }; | ||
| 2427 | |||
| 2428 | /* Description for AM654 */ | ||
| 2429 | static const struct ti_sci_desc ti_sci_pmmc_am654_desc = { | ||
| 2430 | .default_host_id = 12, | ||
| 2431 | /* Conservative duration */ | ||
| 2432 | .max_rx_timeout_ms = 10000, | ||
| 2433 | /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ | ||
| 2434 | .max_msgs = 20, | ||
| 2435 | .max_msg_size = 60, | ||
| 2436 | .rm_type_map = ti_sci_am654_rm_type_map, | ||
| 1787 | }; | 2437 | }; |
| 1788 | 2438 | ||
| 1789 | static const struct of_device_id ti_sci_of_match[] = { | 2439 | static const struct of_device_id ti_sci_of_match[] = { |
| 1790 | {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc}, | 2440 | {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc}, |
| 2441 | {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc}, | ||
| 1791 | { /* Sentinel */ }, | 2442 | { /* Sentinel */ }, |
| 1792 | }; | 2443 | }; |
| 1793 | MODULE_DEVICE_TABLE(of, ti_sci_of_match); | 2444 | MODULE_DEVICE_TABLE(of, ti_sci_of_match); |
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index 12bf316b68df..4983827151bf 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h | |||
| @@ -35,6 +35,13 @@ | |||
| 35 | #define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d | 35 | #define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d |
| 36 | #define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e | 36 | #define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e |
| 37 | 37 | ||
| 38 | /* Resource Management Requests */ | ||
| 39 | #define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500 | ||
| 40 | |||
| 41 | /* IRQ requests */ | ||
| 42 | #define TI_SCI_MSG_SET_IRQ 0x1000 | ||
| 43 | #define TI_SCI_MSG_FREE_IRQ 0x1001 | ||
| 44 | |||
| 38 | /** | 45 | /** |
| 39 | * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses | 46 | * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses |
| 40 | * @type: Type of messages: One of TI_SCI_MSG* values | 47 | * @type: Type of messages: One of TI_SCI_MSG* values |
| @@ -461,4 +468,99 @@ struct ti_sci_msg_resp_get_clock_freq { | |||
| 461 | u64 freq_hz; | 468 | u64 freq_hz; |
| 462 | } __packed; | 469 | } __packed; |
| 463 | 470 | ||
| 471 | #define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff | ||
| 472 | |||
| 473 | /** | ||
| 474 | * struct ti_sci_msg_req_get_resource_range - Request to get a host's assigned | ||
| 475 | * range of resources. | ||
| 476 | * @hdr: Generic Header | ||
| 477 | * @type: Unique resource assignment type | ||
| 478 | * @subtype: Resource assignment subtype within the resource type. | ||
| 479 | * @secondary_host: Host processing entity to which the resources are | ||
| 480 | * allocated. This is required only when the destination | ||
| 481 | * host id id different from ti sci interface host id, | ||
| 482 | * else TI_SCI_IRQ_SECONDARY_HOST_INVALID can be passed. | ||
| 483 | * | ||
| 484 | * Request type is TI_SCI_MSG_GET_RESOURCE_RANGE. Responded with requested | ||
| 485 | * resource range which is of type TI_SCI_MSG_GET_RESOURCE_RANGE. | ||
| 486 | */ | ||
| 487 | struct ti_sci_msg_req_get_resource_range { | ||
| 488 | struct ti_sci_msg_hdr hdr; | ||
| 489 | #define MSG_RM_RESOURCE_TYPE_MASK GENMASK(9, 0) | ||
| 490 | #define MSG_RM_RESOURCE_SUBTYPE_MASK GENMASK(5, 0) | ||
| 491 | u16 type; | ||
| 492 | u8 subtype; | ||
| 493 | u8 secondary_host; | ||
| 494 | } __packed; | ||
| 495 | |||
| 496 | /** | ||
| 497 | * struct ti_sci_msg_resp_get_resource_range - Response to resource get range. | ||
| 498 | * @hdr: Generic Header | ||
| 499 | * @range_start: Start index of the resource range. | ||
| 500 | * @range_num: Number of resources in the range. | ||
| 501 | * | ||
| 502 | * Response to request TI_SCI_MSG_GET_RESOURCE_RANGE. | ||
| 503 | */ | ||
| 504 | struct ti_sci_msg_resp_get_resource_range { | ||
| 505 | struct ti_sci_msg_hdr hdr; | ||
| 506 | u16 range_start; | ||
| 507 | u16 range_num; | ||
| 508 | } __packed; | ||
| 509 | |||
| 510 | /** | ||
| 511 | * struct ti_sci_msg_req_manage_irq - Request to configure/release the route | ||
| 512 | * between the dev and the host. | ||
| 513 | * @hdr: Generic Header | ||
| 514 | * @valid_params: Bit fields defining the validity of interrupt source | ||
| 515 | * parameters. If a bit is not set, then corresponding | ||
| 516 | * field is not valid and will not be used for route set. | ||
| 517 | * Bit field definitions: | ||
| 518 | * 0 - Valid bit for @dst_id | ||
| 519 | * 1 - Valid bit for @dst_host_irq | ||
| 520 | * 2 - Valid bit for @ia_id | ||
| 521 | * 3 - Valid bit for @vint | ||
| 522 | * 4 - Valid bit for @global_event | ||
| 523 | * 5 - Valid bit for @vint_status_bit_index | ||
| 524 | * 31 - Valid bit for @secondary_host | ||
| 525 | * @src_id: IRQ source peripheral ID. | ||
| 526 | * @src_index: IRQ source index within the peripheral | ||
| 527 | * @dst_id: IRQ Destination ID. Based on the architecture it can be | ||
| 528 | * IRQ controller or host processor ID. | ||
| 529 | * @dst_host_irq: IRQ number of the destination host IRQ controller | ||
| 530 | * @ia_id: Device ID of the interrupt aggregator in which the | ||
| 531 | * vint resides. | ||
| 532 | * @vint: Virtual interrupt number if the interrupt route | ||
| 533 | * is through an interrupt aggregator. | ||
| 534 | * @global_event: Global event that is to be mapped to interrupt | ||
| 535 | * aggregator virtual interrupt status bit. | ||
| 536 | * @vint_status_bit: Virtual interrupt status bit if the interrupt route | ||
| 537 | * utilizes an interrupt aggregator status bit. | ||
| 538 | * @secondary_host: Host ID of the IRQ destination computing entity. This is | ||
| 539 | * required only when destination host id is different | ||
| 540 | * from ti sci interface host id. | ||
| 541 | * | ||
| 542 | * Request type is TI_SCI_MSG_SET/RELEASE_IRQ. | ||
| 543 | * Response is generic ACK / NACK message. | ||
| 544 | */ | ||
| 545 | struct ti_sci_msg_req_manage_irq { | ||
| 546 | struct ti_sci_msg_hdr hdr; | ||
| 547 | #define MSG_FLAG_DST_ID_VALID TI_SCI_MSG_FLAG(0) | ||
| 548 | #define MSG_FLAG_DST_HOST_IRQ_VALID TI_SCI_MSG_FLAG(1) | ||
| 549 | #define MSG_FLAG_IA_ID_VALID TI_SCI_MSG_FLAG(2) | ||
| 550 | #define MSG_FLAG_VINT_VALID TI_SCI_MSG_FLAG(3) | ||
| 551 | #define MSG_FLAG_GLB_EVNT_VALID TI_SCI_MSG_FLAG(4) | ||
| 552 | #define MSG_FLAG_VINT_STS_BIT_VALID TI_SCI_MSG_FLAG(5) | ||
| 553 | #define MSG_FLAG_SHOST_VALID TI_SCI_MSG_FLAG(31) | ||
| 554 | u32 valid_params; | ||
| 555 | u16 src_id; | ||
| 556 | u16 src_index; | ||
| 557 | u16 dst_id; | ||
| 558 | u16 dst_host_irq; | ||
| 559 | u16 ia_id; | ||
| 560 | u16 vint; | ||
| 561 | u16 global_event; | ||
| 562 | u8 vint_status_bit; | ||
| 563 | u8 secondary_host; | ||
| 564 | } __packed; | ||
| 565 | |||
| 464 | #endif /* __TI_SCI_H */ | 566 | #endif /* __TI_SCI_H */ |
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c index 1306722faa5a..715371b5102a 100644 --- a/drivers/gpio/gpio-thunderx.c +++ b/drivers/gpio/gpio-thunderx.c | |||
| @@ -363,22 +363,16 @@ static int thunderx_gpio_irq_request_resources(struct irq_data *data) | |||
| 363 | { | 363 | { |
| 364 | struct thunderx_line *txline = irq_data_get_irq_chip_data(data); | 364 | struct thunderx_line *txline = irq_data_get_irq_chip_data(data); |
| 365 | struct thunderx_gpio *txgpio = txline->txgpio; | 365 | struct thunderx_gpio *txgpio = txline->txgpio; |
| 366 | struct irq_data *parent_data = data->parent_data; | ||
| 367 | int r; | 366 | int r; |
| 368 | 367 | ||
| 369 | r = gpiochip_lock_as_irq(&txgpio->chip, txline->line); | 368 | r = gpiochip_lock_as_irq(&txgpio->chip, txline->line); |
| 370 | if (r) | 369 | if (r) |
| 371 | return r; | 370 | return r; |
| 372 | 371 | ||
| 373 | if (parent_data && parent_data->chip->irq_request_resources) { | 372 | r = irq_chip_request_resources_parent(data); |
| 374 | r = parent_data->chip->irq_request_resources(parent_data); | 373 | if (r) |
| 375 | if (r) | 374 | gpiochip_unlock_as_irq(&txgpio->chip, txline->line); |
| 376 | goto error; | ||
| 377 | } | ||
| 378 | 375 | ||
| 379 | return 0; | ||
| 380 | error: | ||
| 381 | gpiochip_unlock_as_irq(&txgpio->chip, txline->line); | ||
| 382 | return r; | 376 | return r; |
| 383 | } | 377 | } |
| 384 | 378 | ||
| @@ -386,10 +380,8 @@ static void thunderx_gpio_irq_release_resources(struct irq_data *data) | |||
| 386 | { | 380 | { |
| 387 | struct thunderx_line *txline = irq_data_get_irq_chip_data(data); | 381 | struct thunderx_line *txline = irq_data_get_irq_chip_data(data); |
| 388 | struct thunderx_gpio *txgpio = txline->txgpio; | 382 | struct thunderx_gpio *txgpio = txline->txgpio; |
| 389 | struct irq_data *parent_data = data->parent_data; | ||
| 390 | 383 | ||
| 391 | if (parent_data && parent_data->chip->irq_release_resources) | 384 | irq_chip_release_resources_parent(data); |
| 392 | parent_data->chip->irq_release_resources(parent_data); | ||
| 393 | 385 | ||
| 394 | gpiochip_unlock_as_irq(&txgpio->chip, txline->line); | 386 | gpiochip_unlock_as_irq(&txgpio->chip, txline->line); |
| 395 | } | 387 | } |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 15b831113ded..e559e43c8ac2 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
| @@ -94,6 +94,7 @@ config IOMMU_DMA | |||
| 94 | bool | 94 | bool |
| 95 | select IOMMU_API | 95 | select IOMMU_API |
| 96 | select IOMMU_IOVA | 96 | select IOMMU_IOVA |
| 97 | select IRQ_MSI_IOMMU | ||
| 97 | select NEED_SG_DMA_LENGTH | 98 | select NEED_SG_DMA_LENGTH |
| 98 | 99 | ||
| 99 | config FSL_PAMU | 100 | config FSL_PAMU |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 5e898047c390..129c4badf9ae 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
| @@ -907,17 +907,18 @@ out_free_page: | |||
| 907 | return NULL; | 907 | return NULL; |
| 908 | } | 908 | } |
| 909 | 909 | ||
| 910 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | 910 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) |
| 911 | { | 911 | { |
| 912 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | 912 | struct device *dev = msi_desc_to_dev(desc); |
| 913 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 913 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 914 | struct iommu_dma_cookie *cookie; | 914 | struct iommu_dma_cookie *cookie; |
| 915 | struct iommu_dma_msi_page *msi_page; | 915 | struct iommu_dma_msi_page *msi_page; |
| 916 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | ||
| 917 | unsigned long flags; | 916 | unsigned long flags; |
| 918 | 917 | ||
| 919 | if (!domain || !domain->iova_cookie) | 918 | if (!domain || !domain->iova_cookie) { |
| 920 | return; | 919 | desc->iommu_cookie = NULL; |
| 920 | return 0; | ||
| 921 | } | ||
| 921 | 922 | ||
| 922 | cookie = domain->iova_cookie; | 923 | cookie = domain->iova_cookie; |
| 923 | 924 | ||
| @@ -930,19 +931,26 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |||
| 930 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | 931 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); |
| 931 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | 932 | spin_unlock_irqrestore(&cookie->msi_lock, flags); |
| 932 | 933 | ||
| 933 | if (WARN_ON(!msi_page)) { | 934 | msi_desc_set_iommu_cookie(desc, msi_page); |
| 934 | /* | 935 | |
| 935 | * We're called from a void callback, so the best we can do is | 936 | if (!msi_page) |
| 936 | * 'fail' by filling the message with obviously bogus values. | 937 | return -ENOMEM; |
| 937 | * Since we got this far due to an IOMMU being present, it's | 938 | return 0; |
| 938 | * not like the existing address would have worked anyway... | 939 | } |
| 939 | */ | 940 | |
| 940 | msg->address_hi = ~0U; | 941 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, |
| 941 | msg->address_lo = ~0U; | 942 | struct msi_msg *msg) |
| 942 | msg->data = ~0U; | 943 | { |
| 943 | } else { | 944 | struct device *dev = msi_desc_to_dev(desc); |
| 944 | msg->address_hi = upper_32_bits(msi_page->iova); | 945 | const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 945 | msg->address_lo &= cookie_msi_granule(cookie) - 1; | 946 | const struct iommu_dma_msi_page *msi_page; |
| 946 | msg->address_lo += lower_32_bits(msi_page->iova); | 947 | |
| 947 | } | 948 | msi_page = msi_desc_get_iommu_cookie(desc); |
| 949 | |||
| 950 | if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) | ||
| 951 | return; | ||
| 952 | |||
| 953 | msg->address_hi = upper_32_bits(msi_page->iova); | ||
| 954 | msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; | ||
| 955 | msg->address_lo += lower_32_bits(msi_page->iova); | ||
| 948 | } | 956 | } |
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index cf7984991062..1c1f3f66dfd3 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig | |||
| @@ -6,7 +6,6 @@ config IRQCHIP | |||
| 6 | 6 | ||
| 7 | config ARM_GIC | 7 | config ARM_GIC |
| 8 | bool | 8 | bool |
| 9 | select IRQ_DOMAIN | ||
| 10 | select IRQ_DOMAIN_HIERARCHY | 9 | select IRQ_DOMAIN_HIERARCHY |
| 11 | select GENERIC_IRQ_MULTI_HANDLER | 10 | select GENERIC_IRQ_MULTI_HANDLER |
| 12 | select GENERIC_IRQ_EFFECTIVE_AFF_MASK | 11 | select GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| @@ -33,7 +32,6 @@ config GIC_NON_BANKED | |||
| 33 | 32 | ||
| 34 | config ARM_GIC_V3 | 33 | config ARM_GIC_V3 |
| 35 | bool | 34 | bool |
| 36 | select IRQ_DOMAIN | ||
| 37 | select GENERIC_IRQ_MULTI_HANDLER | 35 | select GENERIC_IRQ_MULTI_HANDLER |
| 38 | select IRQ_DOMAIN_HIERARCHY | 36 | select IRQ_DOMAIN_HIERARCHY |
| 39 | select PARTITION_PERCPU | 37 | select PARTITION_PERCPU |
| @@ -59,7 +57,6 @@ config ARM_GIC_V3_ITS_FSL_MC | |||
| 59 | 57 | ||
| 60 | config ARM_NVIC | 58 | config ARM_NVIC |
| 61 | bool | 59 | bool |
| 62 | select IRQ_DOMAIN | ||
| 63 | select IRQ_DOMAIN_HIERARCHY | 60 | select IRQ_DOMAIN_HIERARCHY |
| 64 | select GENERIC_IRQ_CHIP | 61 | select GENERIC_IRQ_CHIP |
| 65 | 62 | ||
| @@ -358,7 +355,6 @@ config STM32_EXTI | |||
| 358 | config QCOM_IRQ_COMBINER | 355 | config QCOM_IRQ_COMBINER |
| 359 | bool "QCOM IRQ combiner support" | 356 | bool "QCOM IRQ combiner support" |
| 360 | depends on ARCH_QCOM && ACPI | 357 | depends on ARCH_QCOM && ACPI |
| 361 | select IRQ_DOMAIN | ||
| 362 | select IRQ_DOMAIN_HIERARCHY | 358 | select IRQ_DOMAIN_HIERARCHY |
| 363 | help | 359 | help |
| 364 | Say yes here to add support for the IRQ combiner devices embedded | 360 | Say yes here to add support for the IRQ combiner devices embedded |
| @@ -375,7 +371,6 @@ config IRQ_UNIPHIER_AIDET | |||
| 375 | config MESON_IRQ_GPIO | 371 | config MESON_IRQ_GPIO |
| 376 | bool "Meson GPIO Interrupt Multiplexer" | 372 | bool "Meson GPIO Interrupt Multiplexer" |
| 377 | depends on ARCH_MESON | 373 | depends on ARCH_MESON |
| 378 | select IRQ_DOMAIN | ||
| 379 | select IRQ_DOMAIN_HIERARCHY | 374 | select IRQ_DOMAIN_HIERARCHY |
| 380 | help | 375 | help |
| 381 | Support Meson SoC Family GPIO Interrupt Multiplexer | 376 | Support Meson SoC Family GPIO Interrupt Multiplexer |
| @@ -391,7 +386,6 @@ config GOLDFISH_PIC | |||
| 391 | config QCOM_PDC | 386 | config QCOM_PDC |
| 392 | bool "QCOM PDC" | 387 | bool "QCOM PDC" |
| 393 | depends on ARCH_QCOM | 388 | depends on ARCH_QCOM |
| 394 | select IRQ_DOMAIN | ||
| 395 | select IRQ_DOMAIN_HIERARCHY | 389 | select IRQ_DOMAIN_HIERARCHY |
| 396 | help | 390 | help |
| 397 | Power Domain Controller driver to manage and configure wakeup | 391 | Power Domain Controller driver to manage and configure wakeup |
| @@ -431,6 +425,27 @@ config LS1X_IRQ | |||
| 431 | help | 425 | help |
| 432 | Support for the Loongson-1 platform Interrupt Controller. | 426 | Support for the Loongson-1 platform Interrupt Controller. |
| 433 | 427 | ||
| 428 | config TI_SCI_INTR_IRQCHIP | ||
| 429 | bool | ||
| 430 | depends on TI_SCI_PROTOCOL | ||
| 431 | select IRQ_DOMAIN_HIERARCHY | ||
| 432 | help | ||
| 433 | This enables the irqchip driver support for K3 Interrupt router | ||
| 434 | over TI System Control Interface available on some new TI's SoCs. | ||
| 435 | If you wish to use interrupt router irq resources managed by the | ||
| 436 | TI System Controller, say Y here. Otherwise, say N. | ||
| 437 | |||
| 438 | config TI_SCI_INTA_IRQCHIP | ||
| 439 | bool | ||
| 440 | depends on TI_SCI_PROTOCOL | ||
| 441 | select IRQ_DOMAIN_HIERARCHY | ||
| 442 | select TI_SCI_INTA_MSI_DOMAIN | ||
| 443 | help | ||
| 444 | This enables the irqchip driver support for K3 Interrupt aggregator | ||
| 445 | over TI System Control Interface available on some new TI's SoCs. | ||
| 446 | If you wish to use interrupt aggregator irq resources managed by the | ||
| 447 | TI System Controller, say Y here. Otherwise, say N. | ||
| 448 | |||
| 434 | endmenu | 449 | endmenu |
| 435 | 450 | ||
| 436 | config SIFIVE_PLIC | 451 | config SIFIVE_PLIC |
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index f8c66e958a64..606a003a0000 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile | |||
| @@ -98,3 +98,5 @@ obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o | |||
| 98 | obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o | 98 | obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o |
| 99 | obj-$(CONFIG_MADERA_IRQ) += irq-madera.o | 99 | obj-$(CONFIG_MADERA_IRQ) += irq-madera.o |
| 100 | obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o | 100 | obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o |
| 101 | obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o | ||
| 102 | obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o | ||
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 0f6e30e9009d..0acebac1920b 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c | |||
| @@ -343,6 +343,9 @@ int __init bcm7038_l1_of_init(struct device_node *dn, | |||
| 343 | goto out_unmap; | 343 | goto out_unmap; |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n", | ||
| 347 | dn, IRQS_PER_WORD * intc->n_words); | ||
| 348 | |||
| 346 | return 0; | 349 | return 0; |
| 347 | 350 | ||
| 348 | out_unmap: | 351 | out_unmap: |
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 8968e5e93fcb..541bdca9f4af 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c | |||
| @@ -318,6 +318,9 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, | |||
| 318 | } | 318 | } |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | pr_info("registered %s intc (%pOF, parent IRQ(s): %d)\n", | ||
| 322 | intc_name, dn, data->num_parent_irqs); | ||
| 323 | |||
| 321 | return 0; | 324 | return 0; |
| 322 | 325 | ||
| 323 | out_free_domain: | 326 | out_free_domain: |
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 5e4ca139e4ea..a0642b59befa 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c | |||
| @@ -264,6 +264,8 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, | |||
| 264 | ct->chip.irq_set_wake = irq_gc_set_wake; | 264 | ct->chip.irq_set_wake = irq_gc_set_wake; |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | pr_info("registered L2 intc (%pOF, parent irq: %d)\n", np, parent_irq); | ||
| 268 | |||
| 267 | return 0; | 269 | return 0; |
| 268 | 270 | ||
| 269 | out_free_domain: | 271 | out_free_domain: |
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c index ecafd295c31c..c4aac0977d8a 100644 --- a/drivers/irqchip/irq-gic-pm.c +++ b/drivers/irqchip/irq-gic-pm.c | |||
| @@ -19,7 +19,6 @@ | |||
| 19 | #include <linux/of_irq.h> | 19 | #include <linux/of_irq.h> |
| 20 | #include <linux/irqchip/arm-gic.h> | 20 | #include <linux/irqchip/arm-gic.h> |
| 21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
| 22 | #include <linux/pm_clock.h> | ||
| 23 | #include <linux/pm_runtime.h> | 22 | #include <linux/pm_runtime.h> |
| 24 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 25 | 24 | ||
| @@ -28,17 +27,27 @@ struct gic_clk_data { | |||
| 28 | const char *const *clocks; | 27 | const char *const *clocks; |
| 29 | }; | 28 | }; |
| 30 | 29 | ||
| 30 | struct gic_chip_pm { | ||
| 31 | struct gic_chip_data *chip_data; | ||
| 32 | const struct gic_clk_data *clk_data; | ||
| 33 | struct clk_bulk_data *clks; | ||
| 34 | }; | ||
| 35 | |||
| 31 | static int gic_runtime_resume(struct device *dev) | 36 | static int gic_runtime_resume(struct device *dev) |
| 32 | { | 37 | { |
| 33 | struct gic_chip_data *gic = dev_get_drvdata(dev); | 38 | struct gic_chip_pm *chip_pm = dev_get_drvdata(dev); |
| 39 | struct gic_chip_data *gic = chip_pm->chip_data; | ||
| 40 | const struct gic_clk_data *data = chip_pm->clk_data; | ||
| 34 | int ret; | 41 | int ret; |
| 35 | 42 | ||
| 36 | ret = pm_clk_resume(dev); | 43 | ret = clk_bulk_prepare_enable(data->num_clocks, chip_pm->clks); |
| 37 | if (ret) | 44 | if (ret) { |
| 45 | dev_err(dev, "clk_enable failed: %d\n", ret); | ||
| 38 | return ret; | 46 | return ret; |
| 47 | } | ||
| 39 | 48 | ||
| 40 | /* | 49 | /* |
| 41 | * On the very first resume, the pointer to the driver data | 50 | * On the very first resume, the pointer to chip_pm->chip_data |
| 42 | * will be NULL and this is intentional, because we do not | 51 | * will be NULL and this is intentional, because we do not |
| 43 | * want to restore the GIC on the very first resume. So if | 52 | * want to restore the GIC on the very first resume. So if |
| 44 | * the pointer is not valid just return. | 53 | * the pointer is not valid just return. |
| @@ -54,35 +63,14 @@ static int gic_runtime_resume(struct device *dev) | |||
| 54 | 63 | ||
| 55 | static int gic_runtime_suspend(struct device *dev) | 64 | static int gic_runtime_suspend(struct device *dev) |
| 56 | { | 65 | { |
| 57 | struct gic_chip_data *gic = dev_get_drvdata(dev); | 66 | struct gic_chip_pm *chip_pm = dev_get_drvdata(dev); |
| 67 | struct gic_chip_data *gic = chip_pm->chip_data; | ||
| 68 | const struct gic_clk_data *data = chip_pm->clk_data; | ||
| 58 | 69 | ||
| 59 | gic_dist_save(gic); | 70 | gic_dist_save(gic); |
| 60 | gic_cpu_save(gic); | 71 | gic_cpu_save(gic); |
| 61 | 72 | ||
| 62 | return pm_clk_suspend(dev); | 73 | clk_bulk_disable_unprepare(data->num_clocks, chip_pm->clks); |
| 63 | } | ||
| 64 | |||
| 65 | static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data) | ||
| 66 | { | ||
| 67 | unsigned int i; | ||
| 68 | int ret; | ||
| 69 | |||
| 70 | if (!dev || !data) | ||
| 71 | return -EINVAL; | ||
| 72 | |||
| 73 | ret = pm_clk_create(dev); | ||
| 74 | if (ret) | ||
| 75 | return ret; | ||
| 76 | |||
| 77 | for (i = 0; i < data->num_clocks; i++) { | ||
| 78 | ret = of_pm_clk_add_clk(dev, data->clocks[i]); | ||
| 79 | if (ret) { | ||
| 80 | dev_err(dev, "failed to add clock %s\n", | ||
| 81 | data->clocks[i]); | ||
| 82 | pm_clk_destroy(dev); | ||
| 83 | return ret; | ||
| 84 | } | ||
| 85 | } | ||
| 86 | 74 | ||
| 87 | return 0; | 75 | return 0; |
| 88 | } | 76 | } |
| @@ -91,8 +79,8 @@ static int gic_probe(struct platform_device *pdev) | |||
| 91 | { | 79 | { |
| 92 | struct device *dev = &pdev->dev; | 80 | struct device *dev = &pdev->dev; |
| 93 | const struct gic_clk_data *data; | 81 | const struct gic_clk_data *data; |
| 94 | struct gic_chip_data *gic; | 82 | struct gic_chip_pm *chip_pm; |
| 95 | int ret, irq; | 83 | int ret, irq, i; |
| 96 | 84 | ||
| 97 | data = of_device_get_match_data(&pdev->dev); | 85 | data = of_device_get_match_data(&pdev->dev); |
| 98 | if (!data) { | 86 | if (!data) { |
| @@ -100,28 +88,41 @@ static int gic_probe(struct platform_device *pdev) | |||
| 100 | return -ENODEV; | 88 | return -ENODEV; |
| 101 | } | 89 | } |
| 102 | 90 | ||
| 91 | chip_pm = devm_kzalloc(dev, sizeof(*chip_pm), GFP_KERNEL); | ||
| 92 | if (!chip_pm) | ||
| 93 | return -ENOMEM; | ||
| 94 | |||
| 103 | irq = irq_of_parse_and_map(dev->of_node, 0); | 95 | irq = irq_of_parse_and_map(dev->of_node, 0); |
| 104 | if (!irq) { | 96 | if (!irq) { |
| 105 | dev_err(dev, "no parent interrupt found!\n"); | 97 | dev_err(dev, "no parent interrupt found!\n"); |
| 106 | return -EINVAL; | 98 | return -EINVAL; |
| 107 | } | 99 | } |
| 108 | 100 | ||
| 109 | ret = gic_get_clocks(dev, data); | 101 | chip_pm->clks = devm_kcalloc(dev, data->num_clocks, |
| 102 | sizeof(*chip_pm->clks), GFP_KERNEL); | ||
| 103 | if (!chip_pm->clks) | ||
| 104 | return -ENOMEM; | ||
| 105 | |||
| 106 | for (i = 0; i < data->num_clocks; i++) | ||
| 107 | chip_pm->clks[i].id = data->clocks[i]; | ||
| 108 | |||
| 109 | ret = devm_clk_bulk_get(dev, data->num_clocks, chip_pm->clks); | ||
| 110 | if (ret) | 110 | if (ret) |
| 111 | goto irq_dispose; | 111 | goto irq_dispose; |
| 112 | 112 | ||
| 113 | chip_pm->clk_data = data; | ||
| 114 | dev_set_drvdata(dev, chip_pm); | ||
| 115 | |||
| 113 | pm_runtime_enable(dev); | 116 | pm_runtime_enable(dev); |
| 114 | 117 | ||
| 115 | ret = pm_runtime_get_sync(dev); | 118 | ret = pm_runtime_get_sync(dev); |
| 116 | if (ret < 0) | 119 | if (ret < 0) |
| 117 | goto rpm_disable; | 120 | goto rpm_disable; |
| 118 | 121 | ||
| 119 | ret = gic_of_init_child(dev, &gic, irq); | 122 | ret = gic_of_init_child(dev, &chip_pm->chip_data, irq); |
| 120 | if (ret) | 123 | if (ret) |
| 121 | goto rpm_put; | 124 | goto rpm_put; |
| 122 | 125 | ||
| 123 | platform_set_drvdata(pdev, gic); | ||
| 124 | |||
| 125 | pm_runtime_put(dev); | 126 | pm_runtime_put(dev); |
| 126 | 127 | ||
| 127 | dev_info(dev, "GIC IRQ controller registered\n"); | 128 | dev_info(dev, "GIC IRQ controller registered\n"); |
| @@ -132,7 +133,6 @@ rpm_put: | |||
| 132 | pm_runtime_put_sync(dev); | 133 | pm_runtime_put_sync(dev); |
| 133 | rpm_disable: | 134 | rpm_disable: |
| 134 | pm_runtime_disable(dev); | 135 | pm_runtime_disable(dev); |
| 135 | pm_clk_destroy(dev); | ||
| 136 | irq_dispose: | 136 | irq_dispose: |
| 137 | irq_dispose_mapping(irq); | 137 | irq_dispose_mapping(irq); |
| 138 | 138 | ||
| @@ -142,6 +142,8 @@ irq_dispose: | |||
| 142 | static const struct dev_pm_ops gic_pm_ops = { | 142 | static const struct dev_pm_ops gic_pm_ops = { |
| 143 | SET_RUNTIME_PM_OPS(gic_runtime_suspend, | 143 | SET_RUNTIME_PM_OPS(gic_runtime_suspend, |
| 144 | gic_runtime_resume, NULL) | 144 | gic_runtime_resume, NULL) |
| 145 | SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | ||
| 146 | pm_runtime_force_resume) | ||
| 145 | }; | 147 | }; |
| 146 | 148 | ||
| 147 | static const char * const gic400_clocks[] = { | 149 | static const char * const gic400_clocks[] = { |
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index de14e06fd9ec..3c77ab676e54 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c | |||
| @@ -110,7 +110,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 110 | if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) | 110 | if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) |
| 111 | msg->data -= v2m->spi_offset; | 111 | msg->data -= v2m->spi_offset; |
| 112 | 112 | ||
| 113 | iommu_dma_map_msi_msg(data->irq, msg); | 113 | iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg); |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | static struct irq_chip gicv2m_irq_chip = { | 116 | static struct irq_chip gicv2m_irq_chip = { |
| @@ -167,6 +167,7 @@ static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq, | |||
| 167 | static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | 167 | static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
| 168 | unsigned int nr_irqs, void *args) | 168 | unsigned int nr_irqs, void *args) |
| 169 | { | 169 | { |
| 170 | msi_alloc_info_t *info = args; | ||
| 170 | struct v2m_data *v2m = NULL, *tmp; | 171 | struct v2m_data *v2m = NULL, *tmp; |
| 171 | int hwirq, offset, i, err = 0; | 172 | int hwirq, offset, i, err = 0; |
| 172 | 173 | ||
| @@ -186,6 +187,11 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 186 | 187 | ||
| 187 | hwirq = v2m->spi_start + offset; | 188 | hwirq = v2m->spi_start + offset; |
| 188 | 189 | ||
| 190 | err = iommu_dma_prepare_msi(info->desc, | ||
| 191 | v2m->res.start + V2M_MSI_SETSPI_NS); | ||
| 192 | if (err) | ||
| 193 | return err; | ||
| 194 | |||
| 189 | for (i = 0; i < nr_irqs; i++) { | 195 | for (i = 0; i < nr_irqs; i++) { |
| 190 | err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i); | 196 | err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i); |
| 191 | if (err) | 197 | if (err) |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 128ac893d7e4..cfb9b4e5f914 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/irqdomain.h> | 27 | #include <linux/irqdomain.h> |
| 28 | #include <linux/list.h> | 28 | #include <linux/list.h> |
| 29 | #include <linux/list_sort.h> | ||
| 30 | #include <linux/log2.h> | 29 | #include <linux/log2.h> |
| 31 | #include <linux/memblock.h> | 30 | #include <linux/memblock.h> |
| 32 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
| @@ -1179,7 +1178,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) | |||
| 1179 | msg->address_hi = upper_32_bits(addr); | 1178 | msg->address_hi = upper_32_bits(addr); |
| 1180 | msg->data = its_get_event_id(d); | 1179 | msg->data = its_get_event_id(d); |
| 1181 | 1180 | ||
| 1182 | iommu_dma_map_msi_msg(d->irq, msg); | 1181 | iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); |
| 1183 | } | 1182 | } |
| 1184 | 1183 | ||
| 1185 | static int its_irq_set_irqchip_state(struct irq_data *d, | 1184 | static int its_irq_set_irqchip_state(struct irq_data *d, |
| @@ -1465,9 +1464,8 @@ static struct lpi_range *mk_lpi_range(u32 base, u32 span) | |||
| 1465 | { | 1464 | { |
| 1466 | struct lpi_range *range; | 1465 | struct lpi_range *range; |
| 1467 | 1466 | ||
| 1468 | range = kzalloc(sizeof(*range), GFP_KERNEL); | 1467 | range = kmalloc(sizeof(*range), GFP_KERNEL); |
| 1469 | if (range) { | 1468 | if (range) { |
| 1470 | INIT_LIST_HEAD(&range->entry); | ||
| 1471 | range->base_id = base; | 1469 | range->base_id = base; |
| 1472 | range->span = span; | 1470 | range->span = span; |
| 1473 | } | 1471 | } |
| @@ -1475,31 +1473,6 @@ static struct lpi_range *mk_lpi_range(u32 base, u32 span) | |||
| 1475 | return range; | 1473 | return range; |
| 1476 | } | 1474 | } |
| 1477 | 1475 | ||
| 1478 | static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b) | ||
| 1479 | { | ||
| 1480 | struct lpi_range *ra, *rb; | ||
| 1481 | |||
| 1482 | ra = container_of(a, struct lpi_range, entry); | ||
| 1483 | rb = container_of(b, struct lpi_range, entry); | ||
| 1484 | |||
| 1485 | return ra->base_id - rb->base_id; | ||
| 1486 | } | ||
| 1487 | |||
| 1488 | static void merge_lpi_ranges(void) | ||
| 1489 | { | ||
| 1490 | struct lpi_range *range, *tmp; | ||
| 1491 | |||
| 1492 | list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { | ||
| 1493 | if (!list_is_last(&range->entry, &lpi_range_list) && | ||
| 1494 | (tmp->base_id == (range->base_id + range->span))) { | ||
| 1495 | tmp->base_id = range->base_id; | ||
| 1496 | tmp->span += range->span; | ||
| 1497 | list_del(&range->entry); | ||
| 1498 | kfree(range); | ||
| 1499 | } | ||
| 1500 | } | ||
| 1501 | } | ||
| 1502 | |||
| 1503 | static int alloc_lpi_range(u32 nr_lpis, u32 *base) | 1476 | static int alloc_lpi_range(u32 nr_lpis, u32 *base) |
| 1504 | { | 1477 | { |
| 1505 | struct lpi_range *range, *tmp; | 1478 | struct lpi_range *range, *tmp; |
| @@ -1529,25 +1502,49 @@ static int alloc_lpi_range(u32 nr_lpis, u32 *base) | |||
| 1529 | return err; | 1502 | return err; |
| 1530 | } | 1503 | } |
| 1531 | 1504 | ||
| 1505 | static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) | ||
| 1506 | { | ||
| 1507 | if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) | ||
| 1508 | return; | ||
| 1509 | if (a->base_id + a->span != b->base_id) | ||
| 1510 | return; | ||
| 1511 | b->base_id = a->base_id; | ||
| 1512 | b->span += a->span; | ||
| 1513 | list_del(&a->entry); | ||
| 1514 | kfree(a); | ||
| 1515 | } | ||
| 1516 | |||
| 1532 | static int free_lpi_range(u32 base, u32 nr_lpis) | 1517 | static int free_lpi_range(u32 base, u32 nr_lpis) |
| 1533 | { | 1518 | { |
| 1534 | struct lpi_range *new; | 1519 | struct lpi_range *new, *old; |
| 1535 | int err = 0; | 1520 | |
| 1521 | new = mk_lpi_range(base, nr_lpis); | ||
| 1522 | if (!new) | ||
| 1523 | return -ENOMEM; | ||
| 1536 | 1524 | ||
| 1537 | mutex_lock(&lpi_range_lock); | 1525 | mutex_lock(&lpi_range_lock); |
| 1538 | 1526 | ||
| 1539 | new = mk_lpi_range(base, nr_lpis); | 1527 | list_for_each_entry_reverse(old, &lpi_range_list, entry) { |
| 1540 | if (!new) { | 1528 | if (old->base_id < base) |
| 1541 | err = -ENOMEM; | 1529 | break; |
| 1542 | goto out; | ||
| 1543 | } | 1530 | } |
| 1531 | /* | ||
| 1532 | * old is the last element with ->base_id smaller than base, | ||
| 1533 | * so new goes right after it. If there are no elements with | ||
| 1534 | * ->base_id smaller than base, &old->entry ends up pointing | ||
| 1535 | * at the head of the list, and inserting new it the start of | ||
| 1536 | * the list is the right thing to do in that case as well. | ||
| 1537 | */ | ||
| 1538 | list_add(&new->entry, &old->entry); | ||
| 1539 | /* | ||
| 1540 | * Now check if we can merge with the preceding and/or | ||
| 1541 | * following ranges. | ||
| 1542 | */ | ||
| 1543 | merge_lpi_ranges(old, new); | ||
| 1544 | merge_lpi_ranges(new, list_next_entry(new, entry)); | ||
| 1544 | 1545 | ||
| 1545 | list_add(&new->entry, &lpi_range_list); | ||
| 1546 | list_sort(NULL, &lpi_range_list, lpi_range_cmp); | ||
| 1547 | merge_lpi_ranges(); | ||
| 1548 | out: | ||
| 1549 | mutex_unlock(&lpi_range_lock); | 1546 | mutex_unlock(&lpi_range_lock); |
| 1550 | return err; | 1547 | return 0; |
| 1551 | } | 1548 | } |
| 1552 | 1549 | ||
| 1553 | static int __init its_lpi_init(u32 id_bits) | 1550 | static int __init its_lpi_init(u32 id_bits) |
| @@ -2487,7 +2484,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | |||
| 2487 | int err = 0; | 2484 | int err = 0; |
| 2488 | 2485 | ||
| 2489 | /* | 2486 | /* |
| 2490 | * We ignore "dev" entierely, and rely on the dev_id that has | 2487 | * We ignore "dev" entirely, and rely on the dev_id that has |
| 2491 | * been passed via the scratchpad. This limits this domain's | 2488 | * been passed via the scratchpad. This limits this domain's |
| 2492 | * usefulness to upper layers that definitely know that they | 2489 | * usefulness to upper layers that definitely know that they |
| 2493 | * are built on top of the ITS. | 2490 | * are built on top of the ITS. |
| @@ -2566,6 +2563,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 2566 | { | 2563 | { |
| 2567 | msi_alloc_info_t *info = args; | 2564 | msi_alloc_info_t *info = args; |
| 2568 | struct its_device *its_dev = info->scratchpad[0].ptr; | 2565 | struct its_device *its_dev = info->scratchpad[0].ptr; |
| 2566 | struct its_node *its = its_dev->its; | ||
| 2569 | irq_hw_number_t hwirq; | 2567 | irq_hw_number_t hwirq; |
| 2570 | int err; | 2568 | int err; |
| 2571 | int i; | 2569 | int i; |
| @@ -2574,6 +2572,10 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 2574 | if (err) | 2572 | if (err) |
| 2575 | return err; | 2573 | return err; |
| 2576 | 2574 | ||
| 2575 | err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); | ||
| 2576 | if (err) | ||
| 2577 | return err; | ||
| 2578 | |||
| 2577 | for (i = 0; i < nr_irqs; i++) { | 2579 | for (i = 0; i < nr_irqs; i++) { |
| 2578 | err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); | 2580 | err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); |
| 2579 | if (err) | 2581 | if (err) |
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c index fbfa7ff6deb1..563a9b366294 100644 --- a/drivers/irqchip/irq-gic-v3-mbi.c +++ b/drivers/irqchip/irq-gic-v3-mbi.c | |||
| @@ -84,6 +84,7 @@ static void mbi_free_msi(struct mbi_range *mbi, unsigned int hwirq, | |||
| 84 | static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | 84 | static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
| 85 | unsigned int nr_irqs, void *args) | 85 | unsigned int nr_irqs, void *args) |
| 86 | { | 86 | { |
| 87 | msi_alloc_info_t *info = args; | ||
| 87 | struct mbi_range *mbi = NULL; | 88 | struct mbi_range *mbi = NULL; |
| 88 | int hwirq, offset, i, err = 0; | 89 | int hwirq, offset, i, err = 0; |
| 89 | 90 | ||
| @@ -104,6 +105,11 @@ static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 104 | 105 | ||
| 105 | hwirq = mbi->spi_start + offset; | 106 | hwirq = mbi->spi_start + offset; |
| 106 | 107 | ||
| 108 | err = iommu_dma_prepare_msi(info->desc, | ||
| 109 | mbi_phys_base + GICD_SETSPI_NSR); | ||
| 110 | if (err) | ||
| 111 | return err; | ||
| 112 | |||
| 107 | for (i = 0; i < nr_irqs; i++) { | 113 | for (i = 0; i < nr_irqs; i++) { |
| 108 | err = mbi_irq_gic_domain_alloc(domain, virq + i, hwirq + i); | 114 | err = mbi_irq_gic_domain_alloc(domain, virq + i, hwirq + i); |
| 109 | if (err) | 115 | if (err) |
| @@ -142,7 +148,7 @@ static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 142 | msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR); | 148 | msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR); |
| 143 | msg[0].data = data->parent_data->hwirq; | 149 | msg[0].data = data->parent_data->hwirq; |
| 144 | 150 | ||
| 145 | iommu_dma_map_msi_msg(data->irq, msg); | 151 | iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg); |
| 146 | } | 152 | } |
| 147 | 153 | ||
| 148 | #ifdef CONFIG_PCI_MSI | 154 | #ifdef CONFIG_PCI_MSI |
| @@ -202,7 +208,7 @@ static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 202 | msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR); | 208 | msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR); |
| 203 | msg[1].data = data->parent_data->hwirq; | 209 | msg[1].data = data->parent_data->hwirq; |
| 204 | 210 | ||
| 205 | iommu_dma_map_msi_msg(data->irq, &msg[1]); | 211 | iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]); |
| 206 | } | 212 | } |
| 207 | 213 | ||
| 208 | /* Platform-MSI specific irqchip */ | 214 | /* Platform-MSI specific irqchip */ |
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index 88df3d00052c..290531ec3d61 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c | |||
| @@ -144,7 +144,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev) | |||
| 144 | { | 144 | { |
| 145 | struct device_node *np = pdev->dev.of_node; | 145 | struct device_node *np = pdev->dev.of_node; |
| 146 | struct irqsteer_data *data; | 146 | struct irqsteer_data *data; |
| 147 | struct resource *res; | ||
| 148 | u32 irqs_num; | 147 | u32 irqs_num; |
| 149 | int i, ret; | 148 | int i, ret; |
| 150 | 149 | ||
| @@ -152,8 +151,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev) | |||
| 152 | if (!data) | 151 | if (!data) |
| 153 | return -ENOMEM; | 152 | return -ENOMEM; |
| 154 | 153 | ||
| 155 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 154 | data->regs = devm_platform_ioremap_resource(pdev, 0); |
| 156 | data->regs = devm_ioremap_resource(&pdev->dev, res); | ||
| 157 | if (IS_ERR(data->regs)) { | 155 | if (IS_ERR(data->regs)) { |
| 158 | dev_err(&pdev->dev, "failed to initialize reg\n"); | 156 | dev_err(&pdev->dev, "failed to initialize reg\n"); |
| 159 | return PTR_ERR(data->regs); | 157 | return PTR_ERR(data->regs); |
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c index c671b3212010..669d29105772 100644 --- a/drivers/irqchip/irq-ls-scfg-msi.c +++ b/drivers/irqchip/irq-ls-scfg-msi.c | |||
| @@ -100,7 +100,7 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 100 | msg->data |= cpumask_first(mask); | 100 | msg->data |= cpumask_first(mask); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | iommu_dma_map_msi_msg(data->irq, msg); | 103 | iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg); |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, | 106 | static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, |
| @@ -141,6 +141,7 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain, | |||
| 141 | unsigned int nr_irqs, | 141 | unsigned int nr_irqs, |
| 142 | void *args) | 142 | void *args) |
| 143 | { | 143 | { |
| 144 | msi_alloc_info_t *info = args; | ||
| 144 | struct ls_scfg_msi *msi_data = domain->host_data; | 145 | struct ls_scfg_msi *msi_data = domain->host_data; |
| 145 | int pos, err = 0; | 146 | int pos, err = 0; |
| 146 | 147 | ||
| @@ -157,6 +158,10 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain, | |||
| 157 | if (err) | 158 | if (err) |
| 158 | return err; | 159 | return err; |
| 159 | 160 | ||
| 161 | err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr); | ||
| 162 | if (err) | ||
| 163 | return err; | ||
| 164 | |||
| 160 | irq_domain_set_info(domain, virq, pos, | 165 | irq_domain_set_info(domain, virq, pos, |
| 161 | &ls_scfg_msi_parent_chip, msi_data, | 166 | &ls_scfg_msi_parent_chip, msi_data, |
| 162 | handle_simple_irq, NULL, NULL); | 167 | handle_simple_irq, NULL, NULL); |
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 8c039525703f..04c05a18600c 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c | |||
| @@ -389,10 +389,8 @@ static int intc_irqpin_probe(struct platform_device *pdev) | |||
| 389 | int k; | 389 | int k; |
| 390 | 390 | ||
| 391 | p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); | 391 | p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); |
| 392 | if (!p) { | 392 | if (!p) |
| 393 | dev_err(dev, "failed to allocate driver data\n"); | ||
| 394 | return -ENOMEM; | 393 | return -ENOMEM; |
| 395 | } | ||
| 396 | 394 | ||
| 397 | /* deal with driver instance configuration */ | 395 | /* deal with driver instance configuration */ |
| 398 | of_property_read_u32(dev->of_node, "sense-bitfield-width", | 396 | of_property_read_u32(dev->of_node, "sense-bitfield-width", |
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 7bd1d4cb2e19..e00f2fa27f00 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c | |||
| @@ -14,8 +14,10 @@ | |||
| 14 | #include <linux/irqchip.h> | 14 | #include <linux/irqchip.h> |
| 15 | #include <linux/irqchip/chained_irq.h> | 15 | #include <linux/irqchip/chained_irq.h> |
| 16 | #include <linux/irqdomain.h> | 16 | #include <linux/irqdomain.h> |
| 17 | #include <linux/module.h> | ||
| 17 | #include <linux/of_address.h> | 18 | #include <linux/of_address.h> |
| 18 | #include <linux/of_irq.h> | 19 | #include <linux/of_irq.h> |
| 20 | #include <linux/of_platform.h> | ||
| 19 | #include <linux/syscore_ops.h> | 21 | #include <linux/syscore_ops.h> |
| 20 | 22 | ||
| 21 | #include <dt-bindings/interrupt-controller/arm-gic.h> | 23 | #include <dt-bindings/interrupt-controller/arm-gic.h> |
| @@ -37,12 +39,6 @@ struct stm32_exti_bank { | |||
| 37 | 39 | ||
| 38 | #define UNDEF_REG ~0 | 40 | #define UNDEF_REG ~0 |
| 39 | 41 | ||
| 40 | enum stm32_exti_hwspinlock { | ||
| 41 | HWSPINLOCK_UNKNOWN, | ||
| 42 | HWSPINLOCK_NONE, | ||
| 43 | HWSPINLOCK_READY, | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct stm32_desc_irq { | 42 | struct stm32_desc_irq { |
| 47 | u32 exti; | 43 | u32 exti; |
| 48 | u32 irq_parent; | 44 | u32 irq_parent; |
| @@ -69,8 +65,6 @@ struct stm32_exti_host_data { | |||
| 69 | void __iomem *base; | 65 | void __iomem *base; |
| 70 | struct stm32_exti_chip_data *chips_data; | 66 | struct stm32_exti_chip_data *chips_data; |
| 71 | const struct stm32_exti_drv_data *drv_data; | 67 | const struct stm32_exti_drv_data *drv_data; |
| 72 | struct device_node *node; | ||
| 73 | enum stm32_exti_hwspinlock hwlock_state; | ||
| 74 | struct hwspinlock *hwlock; | 68 | struct hwspinlock *hwlock; |
| 75 | }; | 69 | }; |
| 76 | 70 | ||
| @@ -285,49 +279,27 @@ static int stm32_exti_set_type(struct irq_data *d, | |||
| 285 | 279 | ||
| 286 | static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data) | 280 | static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data) |
| 287 | { | 281 | { |
| 288 | struct stm32_exti_host_data *host_data = chip_data->host_data; | 282 | int ret, timeout = 0; |
| 289 | struct hwspinlock *hwlock; | ||
| 290 | int id, ret = 0, timeout = 0; | ||
| 291 | |||
| 292 | /* first time, check for hwspinlock availability */ | ||
| 293 | if (unlikely(host_data->hwlock_state == HWSPINLOCK_UNKNOWN)) { | ||
| 294 | id = of_hwspin_lock_get_id(host_data->node, 0); | ||
| 295 | if (id >= 0) { | ||
| 296 | hwlock = hwspin_lock_request_specific(id); | ||
| 297 | if (hwlock) { | ||
| 298 | /* found valid hwspinlock */ | ||
| 299 | host_data->hwlock_state = HWSPINLOCK_READY; | ||
| 300 | host_data->hwlock = hwlock; | ||
| 301 | pr_debug("%s hwspinlock = %d\n", __func__, id); | ||
| 302 | } else { | ||
| 303 | host_data->hwlock_state = HWSPINLOCK_NONE; | ||
| 304 | } | ||
| 305 | } else if (id != -EPROBE_DEFER) { | ||
| 306 | host_data->hwlock_state = HWSPINLOCK_NONE; | ||
| 307 | } else { | ||
| 308 | /* hwspinlock driver shall be ready at that stage */ | ||
| 309 | ret = -EPROBE_DEFER; | ||
| 310 | } | ||
| 311 | } | ||
| 312 | 283 | ||
| 313 | if (likely(host_data->hwlock_state == HWSPINLOCK_READY)) { | 284 | if (!chip_data->host_data->hwlock) |
| 314 | /* | 285 | return 0; |
| 315 | * Use the x_raw API since we are under spin_lock protection. | 286 | |
| 316 | * Do not use the x_timeout API because we are under irq_disable | 287 | /* |
| 317 | * mode (see __setup_irq()) | 288 | * Use the x_raw API since we are under spin_lock protection. |
| 318 | */ | 289 | * Do not use the x_timeout API because we are under irq_disable |
| 319 | do { | 290 | * mode (see __setup_irq()) |
| 320 | ret = hwspin_trylock_raw(host_data->hwlock); | 291 | */ |
| 321 | if (!ret) | 292 | do { |
| 322 | return 0; | 293 | ret = hwspin_trylock_raw(chip_data->host_data->hwlock); |
| 323 | 294 | if (!ret) | |
| 324 | udelay(HWSPNLCK_RETRY_DELAY); | 295 | return 0; |
| 325 | timeout += HWSPNLCK_RETRY_DELAY; | 296 | |
| 326 | } while (timeout < HWSPNLCK_TIMEOUT); | 297 | udelay(HWSPNLCK_RETRY_DELAY); |
| 327 | 298 | timeout += HWSPNLCK_RETRY_DELAY; | |
| 328 | if (ret == -EBUSY) | 299 | } while (timeout < HWSPNLCK_TIMEOUT); |
| 329 | ret = -ETIMEDOUT; | 300 | |
| 330 | } | 301 | if (ret == -EBUSY) |
| 302 | ret = -ETIMEDOUT; | ||
| 331 | 303 | ||
| 332 | if (ret) | 304 | if (ret) |
| 333 | pr_err("%s can't get hwspinlock (%d)\n", __func__, ret); | 305 | pr_err("%s can't get hwspinlock (%d)\n", __func__, ret); |
| @@ -337,7 +309,7 @@ static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data) | |||
| 337 | 309 | ||
| 338 | static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data) | 310 | static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data) |
| 339 | { | 311 | { |
| 340 | if (likely(chip_data->host_data->hwlock_state == HWSPINLOCK_READY)) | 312 | if (chip_data->host_data->hwlock) |
| 341 | hwspin_unlock_raw(chip_data->host_data->hwlock); | 313 | hwspin_unlock_raw(chip_data->host_data->hwlock); |
| 342 | } | 314 | } |
| 343 | 315 | ||
| @@ -586,8 +558,7 @@ static int stm32_exti_h_set_affinity(struct irq_data *d, | |||
| 586 | return -EINVAL; | 558 | return -EINVAL; |
| 587 | } | 559 | } |
| 588 | 560 | ||
| 589 | #ifdef CONFIG_PM | 561 | static int __maybe_unused stm32_exti_h_suspend(void) |
| 590 | static int stm32_exti_h_suspend(void) | ||
| 591 | { | 562 | { |
| 592 | struct stm32_exti_chip_data *chip_data; | 563 | struct stm32_exti_chip_data *chip_data; |
| 593 | int i; | 564 | int i; |
| @@ -602,7 +573,7 @@ static int stm32_exti_h_suspend(void) | |||
| 602 | return 0; | 573 | return 0; |
| 603 | } | 574 | } |
| 604 | 575 | ||
| 605 | static void stm32_exti_h_resume(void) | 576 | static void __maybe_unused stm32_exti_h_resume(void) |
| 606 | { | 577 | { |
| 607 | struct stm32_exti_chip_data *chip_data; | 578 | struct stm32_exti_chip_data *chip_data; |
| 608 | int i; | 579 | int i; |
| @@ -616,17 +587,22 @@ static void stm32_exti_h_resume(void) | |||
| 616 | } | 587 | } |
| 617 | 588 | ||
| 618 | static struct syscore_ops stm32_exti_h_syscore_ops = { | 589 | static struct syscore_ops stm32_exti_h_syscore_ops = { |
| 590 | #ifdef CONFIG_PM_SLEEP | ||
| 619 | .suspend = stm32_exti_h_suspend, | 591 | .suspend = stm32_exti_h_suspend, |
| 620 | .resume = stm32_exti_h_resume, | 592 | .resume = stm32_exti_h_resume, |
| 593 | #endif | ||
| 621 | }; | 594 | }; |
| 622 | 595 | ||
| 623 | static void stm32_exti_h_syscore_init(void) | 596 | static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data) |
| 624 | { | 597 | { |
| 598 | stm32_host_data = host_data; | ||
| 625 | register_syscore_ops(&stm32_exti_h_syscore_ops); | 599 | register_syscore_ops(&stm32_exti_h_syscore_ops); |
| 626 | } | 600 | } |
| 627 | #else | 601 | |
| 628 | static inline void stm32_exti_h_syscore_init(void) {} | 602 | static void stm32_exti_h_syscore_deinit(void) |
| 629 | #endif | 603 | { |
| 604 | unregister_syscore_ops(&stm32_exti_h_syscore_ops); | ||
| 605 | } | ||
| 630 | 606 | ||
| 631 | static struct irq_chip stm32_exti_h_chip = { | 607 | static struct irq_chip stm32_exti_h_chip = { |
| 632 | .name = "stm32-exti-h", | 608 | .name = "stm32-exti-h", |
| @@ -683,8 +659,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd, | |||
| 683 | return NULL; | 659 | return NULL; |
| 684 | 660 | ||
| 685 | host_data->drv_data = dd; | 661 | host_data->drv_data = dd; |
| 686 | host_data->node = node; | ||
| 687 | host_data->hwlock_state = HWSPINLOCK_UNKNOWN; | ||
| 688 | host_data->chips_data = kcalloc(dd->bank_nr, | 662 | host_data->chips_data = kcalloc(dd->bank_nr, |
| 689 | sizeof(struct stm32_exti_chip_data), | 663 | sizeof(struct stm32_exti_chip_data), |
| 690 | GFP_KERNEL); | 664 | GFP_KERNEL); |
| @@ -711,7 +685,8 @@ free_host_data: | |||
| 711 | 685 | ||
| 712 | static struct | 686 | static struct |
| 713 | stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, | 687 | stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, |
| 714 | u32 bank_idx) | 688 | u32 bank_idx, |
| 689 | struct device_node *node) | ||
| 715 | { | 690 | { |
| 716 | const struct stm32_exti_bank *stm32_bank; | 691 | const struct stm32_exti_bank *stm32_bank; |
| 717 | struct stm32_exti_chip_data *chip_data; | 692 | struct stm32_exti_chip_data *chip_data; |
| @@ -731,7 +706,7 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data, | |||
| 731 | writel_relaxed(0, base + stm32_bank->imr_ofst); | 706 | writel_relaxed(0, base + stm32_bank->imr_ofst); |
| 732 | writel_relaxed(0, base + stm32_bank->emr_ofst); | 707 | writel_relaxed(0, base + stm32_bank->emr_ofst); |
| 733 | 708 | ||
| 734 | pr_info("%pOF: bank%d\n", h_data->node, bank_idx); | 709 | pr_info("%pOF: bank%d\n", node, bank_idx); |
| 735 | 710 | ||
| 736 | return chip_data; | 711 | return chip_data; |
| 737 | } | 712 | } |
| @@ -771,7 +746,7 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data, | |||
| 771 | struct stm32_exti_chip_data *chip_data; | 746 | struct stm32_exti_chip_data *chip_data; |
| 772 | 747 | ||
| 773 | stm32_bank = drv_data->exti_banks[i]; | 748 | stm32_bank = drv_data->exti_banks[i]; |
| 774 | chip_data = stm32_exti_chip_init(host_data, i); | 749 | chip_data = stm32_exti_chip_init(host_data, i, node); |
| 775 | 750 | ||
| 776 | gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK); | 751 | gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK); |
| 777 | 752 | ||
| @@ -815,50 +790,130 @@ static const struct irq_domain_ops stm32_exti_h_domain_ops = { | |||
| 815 | .xlate = irq_domain_xlate_twocell, | 790 | .xlate = irq_domain_xlate_twocell, |
| 816 | }; | 791 | }; |
| 817 | 792 | ||
| 818 | static int | 793 | static void stm32_exti_remove_irq(void *data) |
| 819 | __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data, | 794 | { |
| 820 | struct device_node *node, | 795 | struct irq_domain *domain = data; |
| 821 | struct device_node *parent) | 796 | |
| 797 | irq_domain_remove(domain); | ||
| 798 | } | ||
| 799 | |||
| 800 | static int stm32_exti_remove(struct platform_device *pdev) | ||
| 801 | { | ||
| 802 | stm32_exti_h_syscore_deinit(); | ||
| 803 | return 0; | ||
| 804 | } | ||
| 805 | |||
| 806 | static int stm32_exti_probe(struct platform_device *pdev) | ||
| 822 | { | 807 | { |
| 808 | int ret, i; | ||
| 809 | struct device *dev = &pdev->dev; | ||
| 810 | struct device_node *np = dev->of_node; | ||
| 823 | struct irq_domain *parent_domain, *domain; | 811 | struct irq_domain *parent_domain, *domain; |
| 824 | struct stm32_exti_host_data *host_data; | 812 | struct stm32_exti_host_data *host_data; |
| 825 | int ret, i; | 813 | const struct stm32_exti_drv_data *drv_data; |
| 814 | struct resource *res; | ||
| 826 | 815 | ||
| 827 | parent_domain = irq_find_host(parent); | 816 | host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL); |
| 828 | if (!parent_domain) { | 817 | if (!host_data) |
| 829 | pr_err("interrupt-parent not found\n"); | 818 | return -ENOMEM; |
| 830 | return -EINVAL; | 819 | |
| 820 | /* check for optional hwspinlock which may be not available yet */ | ||
| 821 | ret = of_hwspin_lock_get_id(np, 0); | ||
| 822 | if (ret == -EPROBE_DEFER) | ||
| 823 | /* hwspinlock framework not yet ready */ | ||
| 824 | return ret; | ||
| 825 | |||
| 826 | if (ret >= 0) { | ||
| 827 | host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret); | ||
| 828 | if (!host_data->hwlock) { | ||
| 829 | dev_err(dev, "Failed to request hwspinlock\n"); | ||
| 830 | return -EINVAL; | ||
| 831 | } | ||
| 832 | } else if (ret != -ENOENT) { | ||
| 833 | /* note: ENOENT is a valid case (means 'no hwspinlock') */ | ||
| 834 | dev_err(dev, "Failed to get hwspinlock\n"); | ||
| 835 | return ret; | ||
| 831 | } | 836 | } |
| 832 | 837 | ||
| 833 | host_data = stm32_exti_host_init(drv_data, node); | 838 | /* initialize host_data */ |
| 834 | if (!host_data) | 839 | drv_data = of_device_get_match_data(dev); |
| 840 | if (!drv_data) { | ||
| 841 | dev_err(dev, "no of match data\n"); | ||
| 842 | return -ENODEV; | ||
| 843 | } | ||
| 844 | host_data->drv_data = drv_data; | ||
| 845 | |||
| 846 | host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr, | ||
| 847 | sizeof(*host_data->chips_data), | ||
| 848 | GFP_KERNEL); | ||
| 849 | if (!host_data->chips_data) | ||
| 835 | return -ENOMEM; | 850 | return -ENOMEM; |
| 836 | 851 | ||
| 852 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 853 | host_data->base = devm_ioremap_resource(dev, res); | ||
| 854 | if (IS_ERR(host_data->base)) { | ||
| 855 | dev_err(dev, "Unable to map registers\n"); | ||
| 856 | return PTR_ERR(host_data->base); | ||
| 857 | } | ||
| 858 | |||
| 837 | for (i = 0; i < drv_data->bank_nr; i++) | 859 | for (i = 0; i < drv_data->bank_nr; i++) |
| 838 | stm32_exti_chip_init(host_data, i); | 860 | stm32_exti_chip_init(host_data, i, np); |
| 861 | |||
| 862 | parent_domain = irq_find_host(of_irq_find_parent(np)); | ||
| 863 | if (!parent_domain) { | ||
| 864 | dev_err(dev, "GIC interrupt-parent not found\n"); | ||
| 865 | return -EINVAL; | ||
| 866 | } | ||
| 839 | 867 | ||
| 840 | domain = irq_domain_add_hierarchy(parent_domain, 0, | 868 | domain = irq_domain_add_hierarchy(parent_domain, 0, |
| 841 | drv_data->bank_nr * IRQS_PER_BANK, | 869 | drv_data->bank_nr * IRQS_PER_BANK, |
| 842 | node, &stm32_exti_h_domain_ops, | 870 | np, &stm32_exti_h_domain_ops, |
| 843 | host_data); | 871 | host_data); |
| 844 | 872 | ||
| 845 | if (!domain) { | 873 | if (!domain) { |
| 846 | pr_err("%pOFn: Could not register exti domain.\n", node); | 874 | dev_err(dev, "Could not register exti domain\n"); |
| 847 | ret = -ENOMEM; | 875 | return -ENOMEM; |
| 848 | goto out_unmap; | ||
| 849 | } | 876 | } |
| 850 | 877 | ||
| 851 | stm32_exti_h_syscore_init(); | 878 | ret = devm_add_action_or_reset(dev, stm32_exti_remove_irq, domain); |
| 879 | if (ret) | ||
| 880 | return ret; | ||
| 881 | |||
| 882 | stm32_exti_h_syscore_init(host_data); | ||
| 852 | 883 | ||
| 853 | return 0; | 884 | return 0; |
| 885 | } | ||
| 854 | 886 | ||
| 855 | out_unmap: | 887 | /* platform driver only for MP1 */ |
| 856 | iounmap(host_data->base); | 888 | static const struct of_device_id stm32_exti_ids[] = { |
| 857 | kfree(host_data->chips_data); | 889 | { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data}, |
| 858 | kfree(host_data); | 890 | {}, |
| 859 | return ret; | 891 | }; |
| 892 | MODULE_DEVICE_TABLE(of, stm32_exti_ids); | ||
| 893 | |||
| 894 | static struct platform_driver stm32_exti_driver = { | ||
| 895 | .probe = stm32_exti_probe, | ||
| 896 | .remove = stm32_exti_remove, | ||
| 897 | .driver = { | ||
| 898 | .name = "stm32_exti", | ||
| 899 | .of_match_table = stm32_exti_ids, | ||
| 900 | }, | ||
| 901 | }; | ||
| 902 | |||
| 903 | static int __init stm32_exti_arch_init(void) | ||
| 904 | { | ||
| 905 | return platform_driver_register(&stm32_exti_driver); | ||
| 860 | } | 906 | } |
| 861 | 907 | ||
| 908 | static void __exit stm32_exti_arch_exit(void) | ||
| 909 | { | ||
| 910 | return platform_driver_unregister(&stm32_exti_driver); | ||
| 911 | } | ||
| 912 | |||
| 913 | arch_initcall(stm32_exti_arch_init); | ||
| 914 | module_exit(stm32_exti_arch_exit); | ||
| 915 | |||
| 916 | /* no platform driver for F4 and H7 */ | ||
| 862 | static int __init stm32f4_exti_of_init(struct device_node *np, | 917 | static int __init stm32f4_exti_of_init(struct device_node *np, |
| 863 | struct device_node *parent) | 918 | struct device_node *parent) |
| 864 | { | 919 | { |
| @@ -874,11 +929,3 @@ static int __init stm32h7_exti_of_init(struct device_node *np, | |||
| 874 | } | 929 | } |
| 875 | 930 | ||
| 876 | IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init); | 931 | IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init); |
| 877 | |||
| 878 | static int __init stm32mp1_exti_of_init(struct device_node *np, | ||
| 879 | struct device_node *parent) | ||
| 880 | { | ||
| 881 | return stm32_exti_hierarchy_init(&stm32mp1_drv_data, np, parent); | ||
| 882 | } | ||
| 883 | |||
| 884 | IRQCHIP_DECLARE(stm32mp1_exti, "st,stm32mp1-exti", stm32mp1_exti_of_init); | ||
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c new file mode 100644 index 000000000000..011b60a49e3f --- /dev/null +++ b/drivers/irqchip/irq-ti-sci-inta.c | |||
| @@ -0,0 +1,615 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Texas Instruments' K3 Interrupt Aggregator irqchip driver | ||
| 4 | * | ||
| 5 | * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ | ||
| 6 | * Lokesh Vutla <lokeshvutla@ti.com> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/err.h> | ||
| 10 | #include <linux/io.h> | ||
| 11 | #include <linux/irqchip.h> | ||
| 12 | #include <linux/irqdomain.h> | ||
| 13 | #include <linux/interrupt.h> | ||
| 14 | #include <linux/msi.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/moduleparam.h> | ||
| 17 | #include <linux/of_address.h> | ||
| 18 | #include <linux/of_irq.h> | ||
| 19 | #include <linux/of_platform.h> | ||
| 20 | #include <linux/irqchip/chained_irq.h> | ||
| 21 | #include <linux/soc/ti/ti_sci_inta_msi.h> | ||
| 22 | #include <linux/soc/ti/ti_sci_protocol.h> | ||
| 23 | #include <asm-generic/msi.h> | ||
| 24 | |||
| 25 | #define TI_SCI_DEV_ID_MASK 0xffff | ||
| 26 | #define TI_SCI_DEV_ID_SHIFT 16 | ||
| 27 | #define TI_SCI_IRQ_ID_MASK 0xffff | ||
| 28 | #define TI_SCI_IRQ_ID_SHIFT 0 | ||
| 29 | #define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \ | ||
| 30 | (TI_SCI_DEV_ID_MASK)) | ||
| 31 | #define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK)) | ||
| 32 | #define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \ | ||
| 33 | TI_SCI_DEV_ID_SHIFT) | \ | ||
| 34 | ((index) & TI_SCI_IRQ_ID_MASK)) | ||
| 35 | |||
| 36 | #define MAX_EVENTS_PER_VINT 64 | ||
| 37 | #define VINT_ENABLE_SET_OFFSET 0x0 | ||
| 38 | #define VINT_ENABLE_CLR_OFFSET 0x8 | ||
| 39 | #define VINT_STATUS_OFFSET 0x18 | ||
| 40 | |||
| 41 | /** | ||
| 42 | * struct ti_sci_inta_event_desc - Description of an event coming to | ||
| 43 | * Interrupt Aggregator. This serves | ||
| 44 | * as a mapping table for global event, | ||
| 45 | * hwirq and vint bit. | ||
| 46 | * @global_event: Global event number corresponding to this event | ||
| 47 | * @hwirq: Hwirq of the incoming interrupt | ||
| 48 | * @vint_bit: Corresponding vint bit to which this event is attached. | ||
| 49 | */ | ||
| 50 | struct ti_sci_inta_event_desc { | ||
| 51 | u16 global_event; | ||
| 52 | u32 hwirq; | ||
| 53 | u8 vint_bit; | ||
| 54 | }; | ||
| 55 | |||
| 56 | /** | ||
| 57 | * struct ti_sci_inta_vint_desc - Description of a virtual interrupt coming out | ||
| 58 | * of Interrupt Aggregator. | ||
| 59 | * @domain: Pointer to IRQ domain to which this vint belongs. | ||
| 60 | * @list: List entry for the vint list | ||
| 61 | * @event_map: Bitmap to manage the allocation of events to vint. | ||
| 62 | * @events: Array of event descriptors assigned to this vint. | ||
| 63 | * @parent_virq: Linux IRQ number that gets attached to parent | ||
| 64 | * @vint_id: TISCI vint ID | ||
| 65 | */ | ||
| 66 | struct ti_sci_inta_vint_desc { | ||
| 67 | struct irq_domain *domain; | ||
| 68 | struct list_head list; | ||
| 69 | DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT); | ||
| 70 | struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT]; | ||
| 71 | unsigned int parent_virq; | ||
| 72 | u16 vint_id; | ||
| 73 | }; | ||
| 74 | |||
| 75 | /** | ||
| 76 | * struct ti_sci_inta_irq_domain - Structure representing a TISCI based | ||
| 77 | * Interrupt Aggregator IRQ domain. | ||
| 78 | * @sci: Pointer to TISCI handle | ||
| 79 | * @vint: TISCI resource pointer representing IA inerrupts. | ||
| 80 | * @global_event: TISCI resource pointer representing global events. | ||
| 81 | * @vint_list: List of the vints active in the system | ||
| 82 | * @vint_mutex: Mutex to protect vint_list | ||
| 83 | * @base: Base address of the memory mapped IO registers | ||
| 84 | * @pdev: Pointer to platform device. | ||
| 85 | */ | ||
| 86 | struct ti_sci_inta_irq_domain { | ||
| 87 | const struct ti_sci_handle *sci; | ||
| 88 | struct ti_sci_resource *vint; | ||
| 89 | struct ti_sci_resource *global_event; | ||
| 90 | struct list_head vint_list; | ||
| 91 | /* Mutex to protect vint list */ | ||
| 92 | struct mutex vint_mutex; | ||
| 93 | void __iomem *base; | ||
| 94 | struct platform_device *pdev; | ||
| 95 | }; | ||
| 96 | |||
| 97 | #define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \ | ||
| 98 | events[i]) | ||
| 99 | |||
| 100 | /** | ||
| 101 | * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs | ||
| 102 | * @desc: Pointer to irq_desc corresponding to the irq | ||
| 103 | */ | ||
| 104 | static void ti_sci_inta_irq_handler(struct irq_desc *desc) | ||
| 105 | { | ||
| 106 | struct ti_sci_inta_vint_desc *vint_desc; | ||
| 107 | struct ti_sci_inta_irq_domain *inta; | ||
| 108 | struct irq_domain *domain; | ||
| 109 | unsigned int virq, bit; | ||
| 110 | unsigned long val; | ||
| 111 | |||
| 112 | vint_desc = irq_desc_get_handler_data(desc); | ||
| 113 | domain = vint_desc->domain; | ||
| 114 | inta = domain->host_data; | ||
| 115 | |||
| 116 | chained_irq_enter(irq_desc_get_chip(desc), desc); | ||
| 117 | |||
| 118 | val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 + | ||
| 119 | VINT_STATUS_OFFSET); | ||
| 120 | |||
| 121 | for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) { | ||
| 122 | virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq); | ||
| 123 | if (virq) | ||
| 124 | generic_handle_irq(virq); | ||
| 125 | } | ||
| 126 | |||
| 127 | chained_irq_exit(irq_desc_get_chip(desc), desc); | ||
| 128 | } | ||
| 129 | |||
| 130 | /** | ||
| 131 | * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator | ||
| 132 | * @domain: IRQ domain corresponding to Interrupt Aggregator | ||
| 133 | * | ||
| 134 | * Return 0 if all went well else corresponding error value. | ||
| 135 | */ | ||
| 136 | static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain) | ||
| 137 | { | ||
| 138 | struct ti_sci_inta_irq_domain *inta = domain->host_data; | ||
| 139 | struct ti_sci_inta_vint_desc *vint_desc; | ||
| 140 | struct irq_fwspec parent_fwspec; | ||
| 141 | unsigned int parent_virq; | ||
| 142 | u16 vint_id; | ||
| 143 | |||
| 144 | vint_id = ti_sci_get_free_resource(inta->vint); | ||
| 145 | if (vint_id == TI_SCI_RESOURCE_NULL) | ||
| 146 | return ERR_PTR(-EINVAL); | ||
| 147 | |||
| 148 | vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL); | ||
| 149 | if (!vint_desc) | ||
| 150 | return ERR_PTR(-ENOMEM); | ||
| 151 | |||
| 152 | vint_desc->domain = domain; | ||
| 153 | vint_desc->vint_id = vint_id; | ||
| 154 | INIT_LIST_HEAD(&vint_desc->list); | ||
| 155 | |||
| 156 | parent_fwspec.fwnode = of_node_to_fwnode(of_irq_find_parent(dev_of_node(&inta->pdev->dev))); | ||
| 157 | parent_fwspec.param_count = 2; | ||
| 158 | parent_fwspec.param[0] = inta->pdev->id; | ||
| 159 | parent_fwspec.param[1] = vint_desc->vint_id; | ||
| 160 | |||
| 161 | parent_virq = irq_create_fwspec_mapping(&parent_fwspec); | ||
| 162 | if (parent_virq <= 0) { | ||
| 163 | kfree(vint_desc); | ||
| 164 | return ERR_PTR(parent_virq); | ||
| 165 | } | ||
| 166 | vint_desc->parent_virq = parent_virq; | ||
| 167 | |||
| 168 | list_add_tail(&vint_desc->list, &inta->vint_list); | ||
| 169 | irq_set_chained_handler_and_data(vint_desc->parent_virq, | ||
| 170 | ti_sci_inta_irq_handler, vint_desc); | ||
| 171 | |||
| 172 | return vint_desc; | ||
| 173 | } | ||
| 174 | |||
| 175 | /** | ||
| 176 | * ti_sci_inta_alloc_event() - Attach an event to a IA vint. | ||
| 177 | * @vint_desc: Pointer to vint_desc to which the event gets attached | ||
| 178 | * @free_bit: Bit inside vint to which event gets attached | ||
| 179 | * @hwirq: hwirq of the input event | ||
| 180 | * | ||
| 181 | * Return event_desc pointer if all went ok else appropriate error value. | ||
| 182 | */ | ||
| 183 | static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc, | ||
| 184 | u16 free_bit, | ||
| 185 | u32 hwirq) | ||
| 186 | { | ||
| 187 | struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data; | ||
| 188 | struct ti_sci_inta_event_desc *event_desc; | ||
| 189 | u16 dev_id, dev_index; | ||
| 190 | int err; | ||
| 191 | |||
| 192 | dev_id = HWIRQ_TO_DEVID(hwirq); | ||
| 193 | dev_index = HWIRQ_TO_IRQID(hwirq); | ||
| 194 | |||
| 195 | event_desc = &vint_desc->events[free_bit]; | ||
| 196 | event_desc->hwirq = hwirq; | ||
| 197 | event_desc->vint_bit = free_bit; | ||
| 198 | event_desc->global_event = ti_sci_get_free_resource(inta->global_event); | ||
| 199 | if (event_desc->global_event == TI_SCI_RESOURCE_NULL) | ||
| 200 | return ERR_PTR(-EINVAL); | ||
| 201 | |||
| 202 | err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci, | ||
| 203 | dev_id, dev_index, | ||
| 204 | inta->pdev->id, | ||
| 205 | vint_desc->vint_id, | ||
| 206 | event_desc->global_event, | ||
| 207 | free_bit); | ||
| 208 | if (err) | ||
| 209 | goto free_global_event; | ||
| 210 | |||
| 211 | return event_desc; | ||
| 212 | free_global_event: | ||
| 213 | ti_sci_release_resource(inta->global_event, event_desc->global_event); | ||
| 214 | return ERR_PTR(err); | ||
| 215 | } | ||
| 216 | |||
| 217 | /** | ||
| 218 | * ti_sci_inta_alloc_irq() - Allocate an irq within INTA domain | ||
| 219 | * @domain: irq_domain pointer corresponding to INTA | ||
| 220 | * @hwirq: hwirq of the input event | ||
| 221 | * | ||
| 222 | * Note: Allocation happens in the following manner: | ||
| 223 | * - Find a free bit available in any of the vints available in the list. | ||
| 224 | * - If not found, allocate a vint from the vint pool | ||
| 225 | * - Attach the free bit to input hwirq. | ||
| 226 | * Return event_desc if all went ok else appropriate error value. | ||
| 227 | */ | ||
| 228 | static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain, | ||
| 229 | u32 hwirq) | ||
| 230 | { | ||
| 231 | struct ti_sci_inta_irq_domain *inta = domain->host_data; | ||
| 232 | struct ti_sci_inta_vint_desc *vint_desc = NULL; | ||
| 233 | struct ti_sci_inta_event_desc *event_desc; | ||
| 234 | u16 free_bit; | ||
| 235 | |||
| 236 | mutex_lock(&inta->vint_mutex); | ||
| 237 | list_for_each_entry(vint_desc, &inta->vint_list, list) { | ||
| 238 | free_bit = find_first_zero_bit(vint_desc->event_map, | ||
| 239 | MAX_EVENTS_PER_VINT); | ||
| 240 | if (free_bit != MAX_EVENTS_PER_VINT) { | ||
| 241 | set_bit(free_bit, vint_desc->event_map); | ||
| 242 | goto alloc_event; | ||
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 246 | /* No free bits available. Allocate a new vint */ | ||
| 247 | vint_desc = ti_sci_inta_alloc_parent_irq(domain); | ||
| 248 | if (IS_ERR(vint_desc)) { | ||
| 249 | mutex_unlock(&inta->vint_mutex); | ||
| 250 | return ERR_PTR(PTR_ERR(vint_desc)); | ||
| 251 | } | ||
| 252 | |||
| 253 | free_bit = find_first_zero_bit(vint_desc->event_map, | ||
| 254 | MAX_EVENTS_PER_VINT); | ||
| 255 | set_bit(free_bit, vint_desc->event_map); | ||
| 256 | |||
| 257 | alloc_event: | ||
| 258 | event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq); | ||
| 259 | if (IS_ERR(event_desc)) | ||
| 260 | clear_bit(free_bit, vint_desc->event_map); | ||
| 261 | |||
| 262 | mutex_unlock(&inta->vint_mutex); | ||
| 263 | return event_desc; | ||
| 264 | } | ||
| 265 | |||
| 266 | /** | ||
| 267 | * ti_sci_inta_free_parent_irq() - Free a parent irq to INTA | ||
| 268 | * @inta: Pointer to inta domain. | ||
| 269 | * @vint_desc: Pointer to vint_desc that needs to be freed. | ||
| 270 | */ | ||
| 271 | static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta, | ||
| 272 | struct ti_sci_inta_vint_desc *vint_desc) | ||
| 273 | { | ||
| 274 | if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) { | ||
| 275 | list_del(&vint_desc->list); | ||
| 276 | ti_sci_release_resource(inta->vint, vint_desc->vint_id); | ||
| 277 | irq_dispose_mapping(vint_desc->parent_virq); | ||
| 278 | kfree(vint_desc); | ||
| 279 | } | ||
| 280 | } | ||
| 281 | |||
| 282 | /** | ||
| 283 | * ti_sci_inta_free_irq() - Free an IRQ within INTA domain | ||
| 284 | * @event_desc: Pointer to event_desc that needs to be freed. | ||
| 285 | * @hwirq: Hwirq number within INTA domain that needs to be freed | ||
| 286 | */ | ||
| 287 | static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc, | ||
| 288 | u32 hwirq) | ||
| 289 | { | ||
| 290 | struct ti_sci_inta_vint_desc *vint_desc; | ||
| 291 | struct ti_sci_inta_irq_domain *inta; | ||
| 292 | |||
| 293 | vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); | ||
| 294 | inta = vint_desc->domain->host_data; | ||
| 295 | /* free event irq */ | ||
| 296 | mutex_lock(&inta->vint_mutex); | ||
| 297 | inta->sci->ops.rm_irq_ops.free_event_map(inta->sci, | ||
| 298 | HWIRQ_TO_DEVID(hwirq), | ||
| 299 | HWIRQ_TO_IRQID(hwirq), | ||
| 300 | inta->pdev->id, | ||
| 301 | vint_desc->vint_id, | ||
| 302 | event_desc->global_event, | ||
| 303 | event_desc->vint_bit); | ||
| 304 | |||
| 305 | clear_bit(event_desc->vint_bit, vint_desc->event_map); | ||
| 306 | ti_sci_release_resource(inta->global_event, event_desc->global_event); | ||
| 307 | event_desc->global_event = TI_SCI_RESOURCE_NULL; | ||
| 308 | event_desc->hwirq = 0; | ||
| 309 | |||
| 310 | ti_sci_inta_free_parent_irq(inta, vint_desc); | ||
| 311 | mutex_unlock(&inta->vint_mutex); | ||
| 312 | } | ||
| 313 | |||
| 314 | /** | ||
| 315 | * ti_sci_inta_request_resources() - Allocate resources for input irq | ||
| 316 | * @data: Pointer to corresponding irq_data | ||
| 317 | * | ||
| 318 | * Note: This is the core api where the actual allocation happens for input | ||
| 319 | * hwirq. This allocation involves creating a parent irq for vint. | ||
| 320 | * If this is done in irq_domain_ops.alloc() then a deadlock is reached | ||
| 321 | * for allocation. So this allocation is being done in request_resources() | ||
| 322 | * | ||
| 323 | * Return: 0 if all went well else corresponding error. | ||
| 324 | */ | ||
| 325 | static int ti_sci_inta_request_resources(struct irq_data *data) | ||
| 326 | { | ||
| 327 | struct ti_sci_inta_event_desc *event_desc; | ||
| 328 | |||
| 329 | event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq); | ||
| 330 | if (IS_ERR(event_desc)) | ||
| 331 | return PTR_ERR(event_desc); | ||
| 332 | |||
| 333 | data->chip_data = event_desc; | ||
| 334 | |||
| 335 | return 0; | ||
| 336 | } | ||
| 337 | |||
| 338 | /** | ||
| 339 | * ti_sci_inta_release_resources - Release resources for input irq | ||
| 340 | * @data: Pointer to corresponding irq_data | ||
| 341 | * | ||
| 342 | * Note: Corresponding to request_resources(), all the unmapping and deletion | ||
| 343 | * of parent vint irqs happens in this api. | ||
| 344 | */ | ||
| 345 | static void ti_sci_inta_release_resources(struct irq_data *data) | ||
| 346 | { | ||
| 347 | struct ti_sci_inta_event_desc *event_desc; | ||
| 348 | |||
| 349 | event_desc = irq_data_get_irq_chip_data(data); | ||
| 350 | ti_sci_inta_free_irq(event_desc, data->hwirq); | ||
| 351 | } | ||
| 352 | |||
| 353 | /** | ||
| 354 | * ti_sci_inta_manage_event() - Control the event based on the offset | ||
| 355 | * @data: Pointer to corresponding irq_data | ||
| 356 | * @offset: register offset using which event is controlled. | ||
| 357 | */ | ||
| 358 | static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset) | ||
| 359 | { | ||
| 360 | struct ti_sci_inta_event_desc *event_desc; | ||
| 361 | struct ti_sci_inta_vint_desc *vint_desc; | ||
| 362 | struct ti_sci_inta_irq_domain *inta; | ||
| 363 | |||
| 364 | event_desc = irq_data_get_irq_chip_data(data); | ||
| 365 | vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); | ||
| 366 | inta = data->domain->host_data; | ||
| 367 | |||
| 368 | writeq_relaxed(BIT(event_desc->vint_bit), | ||
| 369 | inta->base + vint_desc->vint_id * 0x1000 + offset); | ||
| 370 | } | ||
| 371 | |||
| 372 | /** | ||
| 373 | * ti_sci_inta_mask_irq() - Mask an event | ||
| 374 | * @data: Pointer to corresponding irq_data | ||
| 375 | */ | ||
| 376 | static void ti_sci_inta_mask_irq(struct irq_data *data) | ||
| 377 | { | ||
| 378 | ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET); | ||
| 379 | } | ||
| 380 | |||
| 381 | /** | ||
| 382 | * ti_sci_inta_unmask_irq() - Unmask an event | ||
| 383 | * @data: Pointer to corresponding irq_data | ||
| 384 | */ | ||
| 385 | static void ti_sci_inta_unmask_irq(struct irq_data *data) | ||
| 386 | { | ||
| 387 | ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET); | ||
| 388 | } | ||
| 389 | |||
| 390 | /** | ||
| 391 | * ti_sci_inta_ack_irq() - Ack an event | ||
| 392 | * @data: Pointer to corresponding irq_data | ||
| 393 | */ | ||
| 394 | static void ti_sci_inta_ack_irq(struct irq_data *data) | ||
| 395 | { | ||
| 396 | /* | ||
| 397 | * Do not clear the event if hardware is capable of sending | ||
| 398 | * a down event. | ||
| 399 | */ | ||
| 400 | if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH) | ||
| 401 | ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET); | ||
| 402 | } | ||
| 403 | |||
| 404 | static int ti_sci_inta_set_affinity(struct irq_data *d, | ||
| 405 | const struct cpumask *mask_val, bool force) | ||
| 406 | { | ||
| 407 | return -EINVAL; | ||
| 408 | } | ||
| 409 | |||
| 410 | /** | ||
| 411 | * ti_sci_inta_set_type() - Update the trigger type of the irq. | ||
| 412 | * @data: Pointer to corresponding irq_data | ||
| 413 | * @type: Trigger type as specified by user | ||
| 414 | * | ||
| 415 | * Note: This updates the handle_irq callback for level msi. | ||
| 416 | * | ||
| 417 | * Return 0 if all went well else appropriate error. | ||
| 418 | */ | ||
| 419 | static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type) | ||
| 420 | { | ||
| 421 | /* | ||
| 422 | * .alloc default sets handle_edge_irq. But if the user specifies | ||
| 423 | * that IRQ is level MSI, then update the handle to handle_level_irq | ||
| 424 | */ | ||
| 425 | switch (type & IRQ_TYPE_SENSE_MASK) { | ||
| 426 | case IRQF_TRIGGER_HIGH: | ||
| 427 | irq_set_handler_locked(data, handle_level_irq); | ||
| 428 | return 0; | ||
| 429 | case IRQF_TRIGGER_RISING: | ||
| 430 | return 0; | ||
| 431 | default: | ||
| 432 | return -EINVAL; | ||
| 433 | } | ||
| 434 | |||
| 435 | return -EINVAL; | ||
| 436 | } | ||
| 437 | |||
| 438 | static struct irq_chip ti_sci_inta_irq_chip = { | ||
| 439 | .name = "INTA", | ||
| 440 | .irq_ack = ti_sci_inta_ack_irq, | ||
| 441 | .irq_mask = ti_sci_inta_mask_irq, | ||
| 442 | .irq_set_type = ti_sci_inta_set_type, | ||
| 443 | .irq_unmask = ti_sci_inta_unmask_irq, | ||
| 444 | .irq_set_affinity = ti_sci_inta_set_affinity, | ||
| 445 | .irq_request_resources = ti_sci_inta_request_resources, | ||
| 446 | .irq_release_resources = ti_sci_inta_release_resources, | ||
| 447 | }; | ||
| 448 | |||
| 449 | /** | ||
| 450 | * ti_sci_inta_irq_domain_free() - Free an IRQ from the IRQ domain | ||
| 451 | * @domain: Domain to which the irqs belong | ||
| 452 | * @virq: base linux virtual IRQ to be freed. | ||
| 453 | * @nr_irqs: Number of continuous irqs to be freed | ||
| 454 | */ | ||
| 455 | static void ti_sci_inta_irq_domain_free(struct irq_domain *domain, | ||
| 456 | unsigned int virq, unsigned int nr_irqs) | ||
| 457 | { | ||
| 458 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); | ||
| 459 | |||
| 460 | irq_domain_reset_irq_data(data); | ||
| 461 | } | ||
| 462 | |||
| 463 | /** | ||
| 464 | * ti_sci_inta_irq_domain_alloc() - Allocate Interrupt aggregator IRQs | ||
| 465 | * @domain: Point to the interrupt aggregator IRQ domain | ||
| 466 | * @virq: Corresponding Linux virtual IRQ number | ||
| 467 | * @nr_irqs: Continuous irqs to be allocated | ||
| 468 | * @data: Pointer to firmware specifier | ||
| 469 | * | ||
| 470 | * No actual allocation happens here. | ||
| 471 | * | ||
| 472 | * Return 0 if all went well else appropriate error value. | ||
| 473 | */ | ||
| 474 | static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain, | ||
| 475 | unsigned int virq, unsigned int nr_irqs, | ||
| 476 | void *data) | ||
| 477 | { | ||
| 478 | msi_alloc_info_t *arg = data; | ||
| 479 | |||
| 480 | irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip, | ||
| 481 | NULL, handle_edge_irq, NULL, NULL); | ||
| 482 | |||
| 483 | return 0; | ||
| 484 | } | ||
| 485 | |||
| 486 | static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = { | ||
| 487 | .free = ti_sci_inta_irq_domain_free, | ||
| 488 | .alloc = ti_sci_inta_irq_domain_alloc, | ||
| 489 | }; | ||
| 490 | |||
| 491 | static struct irq_chip ti_sci_inta_msi_irq_chip = { | ||
| 492 | .name = "MSI-INTA", | ||
| 493 | .flags = IRQCHIP_SUPPORTS_LEVEL_MSI, | ||
| 494 | }; | ||
| 495 | |||
| 496 | static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg, | ||
| 497 | struct msi_desc *desc) | ||
| 498 | { | ||
| 499 | struct platform_device *pdev = to_platform_device(desc->dev); | ||
| 500 | |||
| 501 | arg->desc = desc; | ||
| 502 | arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index); | ||
| 503 | } | ||
| 504 | |||
| 505 | static struct msi_domain_ops ti_sci_inta_msi_ops = { | ||
| 506 | .set_desc = ti_sci_inta_msi_set_desc, | ||
| 507 | }; | ||
| 508 | |||
| 509 | static struct msi_domain_info ti_sci_inta_msi_domain_info = { | ||
| 510 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 511 | MSI_FLAG_LEVEL_CAPABLE), | ||
| 512 | .ops = &ti_sci_inta_msi_ops, | ||
| 513 | .chip = &ti_sci_inta_msi_irq_chip, | ||
| 514 | }; | ||
| 515 | |||
| 516 | static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) | ||
| 517 | { | ||
| 518 | struct irq_domain *parent_domain, *domain, *msi_domain; | ||
| 519 | struct device_node *parent_node, *node; | ||
| 520 | struct ti_sci_inta_irq_domain *inta; | ||
| 521 | struct device *dev = &pdev->dev; | ||
| 522 | struct resource *res; | ||
| 523 | int ret; | ||
| 524 | |||
| 525 | node = dev_of_node(dev); | ||
| 526 | parent_node = of_irq_find_parent(node); | ||
| 527 | if (!parent_node) { | ||
| 528 | dev_err(dev, "Failed to get IRQ parent node\n"); | ||
| 529 | return -ENODEV; | ||
| 530 | } | ||
| 531 | |||
| 532 | parent_domain = irq_find_host(parent_node); | ||
| 533 | if (!parent_domain) | ||
| 534 | return -EPROBE_DEFER; | ||
| 535 | |||
| 536 | inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL); | ||
| 537 | if (!inta) | ||
| 538 | return -ENOMEM; | ||
| 539 | |||
| 540 | inta->pdev = pdev; | ||
| 541 | inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); | ||
| 542 | if (IS_ERR(inta->sci)) { | ||
| 543 | ret = PTR_ERR(inta->sci); | ||
| 544 | if (ret != -EPROBE_DEFER) | ||
| 545 | dev_err(dev, "ti,sci read fail %d\n", ret); | ||
| 546 | inta->sci = NULL; | ||
| 547 | return ret; | ||
| 548 | } | ||
| 549 | |||
| 550 | ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &pdev->id); | ||
| 551 | if (ret) { | ||
| 552 | dev_err(dev, "missing 'ti,sci-dev-id' property\n"); | ||
| 553 | return -EINVAL; | ||
| 554 | } | ||
| 555 | |||
| 556 | inta->vint = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id, | ||
| 557 | "ti,sci-rm-range-vint"); | ||
| 558 | if (IS_ERR(inta->vint)) { | ||
| 559 | dev_err(dev, "VINT resource allocation failed\n"); | ||
| 560 | return PTR_ERR(inta->vint); | ||
| 561 | } | ||
| 562 | |||
| 563 | inta->global_event = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id, | ||
| 564 | "ti,sci-rm-range-global-event"); | ||
| 565 | if (IS_ERR(inta->global_event)) { | ||
| 566 | dev_err(dev, "Global event resource allocation failed\n"); | ||
| 567 | return PTR_ERR(inta->global_event); | ||
| 568 | } | ||
| 569 | |||
| 570 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 571 | inta->base = devm_ioremap_resource(dev, res); | ||
| 572 | if (IS_ERR(inta->base)) | ||
| 573 | return -ENODEV; | ||
| 574 | |||
| 575 | domain = irq_domain_add_linear(dev_of_node(dev), | ||
| 576 | ti_sci_get_num_resources(inta->vint), | ||
| 577 | &ti_sci_inta_irq_domain_ops, inta); | ||
| 578 | if (!domain) { | ||
| 579 | dev_err(dev, "Failed to allocate IRQ domain\n"); | ||
| 580 | return -ENOMEM; | ||
| 581 | } | ||
| 582 | |||
| 583 | msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node), | ||
| 584 | &ti_sci_inta_msi_domain_info, | ||
| 585 | domain); | ||
| 586 | if (!msi_domain) { | ||
| 587 | irq_domain_remove(domain); | ||
| 588 | dev_err(dev, "Failed to allocate msi domain\n"); | ||
| 589 | return -ENOMEM; | ||
| 590 | } | ||
| 591 | |||
| 592 | INIT_LIST_HEAD(&inta->vint_list); | ||
| 593 | mutex_init(&inta->vint_mutex); | ||
| 594 | |||
| 595 | return 0; | ||
| 596 | } | ||
| 597 | |||
| 598 | static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = { | ||
| 599 | { .compatible = "ti,sci-inta", }, | ||
| 600 | { /* sentinel */ }, | ||
| 601 | }; | ||
| 602 | MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match); | ||
| 603 | |||
| 604 | static struct platform_driver ti_sci_inta_irq_domain_driver = { | ||
| 605 | .probe = ti_sci_inta_irq_domain_probe, | ||
| 606 | .driver = { | ||
| 607 | .name = "ti-sci-inta", | ||
| 608 | .of_match_table = ti_sci_inta_irq_domain_of_match, | ||
| 609 | }, | ||
| 610 | }; | ||
| 611 | module_platform_driver(ti_sci_inta_irq_domain_driver); | ||
| 612 | |||
| 613 | MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>"); | ||
| 614 | MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol"); | ||
| 615 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c new file mode 100644 index 000000000000..59d51a20bbd8 --- /dev/null +++ b/drivers/irqchip/irq-ti-sci-intr.c | |||
| @@ -0,0 +1,275 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Texas Instruments' K3 Interrupt Router irqchip driver | ||
| 4 | * | ||
| 5 | * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ | ||
| 6 | * Lokesh Vutla <lokeshvutla@ti.com> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/err.h> | ||
| 10 | #include <linux/module.h> | ||
| 11 | #include <linux/moduleparam.h> | ||
| 12 | #include <linux/io.h> | ||
| 13 | #include <linux/irqchip.h> | ||
| 14 | #include <linux/irqdomain.h> | ||
| 15 | #include <linux/of_platform.h> | ||
| 16 | #include <linux/of_address.h> | ||
| 17 | #include <linux/of_irq.h> | ||
| 18 | #include <linux/soc/ti/ti_sci_protocol.h> | ||
| 19 | |||
| 20 | #define TI_SCI_DEV_ID_MASK 0xffff | ||
| 21 | #define TI_SCI_DEV_ID_SHIFT 16 | ||
| 22 | #define TI_SCI_IRQ_ID_MASK 0xffff | ||
| 23 | #define TI_SCI_IRQ_ID_SHIFT 0 | ||
| 24 | #define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \ | ||
| 25 | (TI_SCI_DEV_ID_MASK)) | ||
| 26 | #define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK)) | ||
| 27 | #define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \ | ||
| 28 | TI_SCI_DEV_ID_SHIFT) | \ | ||
| 29 | ((index) & TI_SCI_IRQ_ID_MASK)) | ||
| 30 | |||
| 31 | /** | ||
| 32 | * struct ti_sci_intr_irq_domain - Structure representing a TISCI based | ||
| 33 | * Interrupt Router IRQ domain. | ||
| 34 | * @sci: Pointer to TISCI handle | ||
| 35 | * @dst_irq: TISCI resource pointer representing GIC irq controller. | ||
| 36 | * @dst_id: TISCI device ID of the GIC irq controller. | ||
| 37 | * @type: Specifies the trigger type supported by this Interrupt Router | ||
| 38 | */ | ||
| 39 | struct ti_sci_intr_irq_domain { | ||
| 40 | const struct ti_sci_handle *sci; | ||
| 41 | struct ti_sci_resource *dst_irq; | ||
| 42 | u32 dst_id; | ||
| 43 | u32 type; | ||
| 44 | }; | ||
| 45 | |||
| 46 | static struct irq_chip ti_sci_intr_irq_chip = { | ||
| 47 | .name = "INTR", | ||
| 48 | .irq_eoi = irq_chip_eoi_parent, | ||
| 49 | .irq_mask = irq_chip_mask_parent, | ||
| 50 | .irq_unmask = irq_chip_unmask_parent, | ||
| 51 | .irq_set_type = irq_chip_set_type_parent, | ||
| 52 | .irq_retrigger = irq_chip_retrigger_hierarchy, | ||
| 53 | .irq_set_affinity = irq_chip_set_affinity_parent, | ||
| 54 | }; | ||
| 55 | |||
| 56 | /** | ||
| 57 | * ti_sci_intr_irq_domain_translate() - Retrieve hwirq and type from | ||
| 58 | * IRQ firmware specific handler. | ||
| 59 | * @domain: Pointer to IRQ domain | ||
| 60 | * @fwspec: Pointer to IRQ specific firmware structure | ||
| 61 | * @hwirq: IRQ number identified by hardware | ||
| 62 | * @type: IRQ type | ||
| 63 | * | ||
| 64 | * Return 0 if all went ok else appropriate error. | ||
| 65 | */ | ||
| 66 | static int ti_sci_intr_irq_domain_translate(struct irq_domain *domain, | ||
| 67 | struct irq_fwspec *fwspec, | ||
| 68 | unsigned long *hwirq, | ||
| 69 | unsigned int *type) | ||
| 70 | { | ||
| 71 | struct ti_sci_intr_irq_domain *intr = domain->host_data; | ||
| 72 | |||
| 73 | if (fwspec->param_count != 2) | ||
| 74 | return -EINVAL; | ||
| 75 | |||
| 76 | *hwirq = TO_HWIRQ(fwspec->param[0], fwspec->param[1]); | ||
| 77 | *type = intr->type; | ||
| 78 | |||
| 79 | return 0; | ||
| 80 | } | ||
| 81 | |||
| 82 | /** | ||
| 83 | * ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain. | ||
| 84 | * @domain: Domain to which the irqs belong | ||
| 85 | * @virq: Linux virtual IRQ to be freed. | ||
| 86 | * @nr_irqs: Number of continuous irqs to be freed | ||
| 87 | */ | ||
| 88 | static void ti_sci_intr_irq_domain_free(struct irq_domain *domain, | ||
| 89 | unsigned int virq, unsigned int nr_irqs) | ||
| 90 | { | ||
| 91 | struct ti_sci_intr_irq_domain *intr = domain->host_data; | ||
| 92 | struct irq_data *data, *parent_data; | ||
| 93 | u16 dev_id, irq_index; | ||
| 94 | |||
| 95 | parent_data = irq_domain_get_irq_data(domain->parent, virq); | ||
| 96 | data = irq_domain_get_irq_data(domain, virq); | ||
| 97 | irq_index = HWIRQ_TO_IRQID(data->hwirq); | ||
| 98 | dev_id = HWIRQ_TO_DEVID(data->hwirq); | ||
| 99 | |||
| 100 | intr->sci->ops.rm_irq_ops.free_irq(intr->sci, dev_id, irq_index, | ||
| 101 | intr->dst_id, parent_data->hwirq); | ||
| 102 | ti_sci_release_resource(intr->dst_irq, parent_data->hwirq); | ||
| 103 | irq_domain_free_irqs_parent(domain, virq, 1); | ||
| 104 | irq_domain_reset_irq_data(data); | ||
| 105 | } | ||
| 106 | |||
| 107 | /** | ||
| 108 | * ti_sci_intr_alloc_gic_irq() - Allocate GIC specific IRQ | ||
| 109 | * @domain: Pointer to the interrupt router IRQ domain | ||
| 110 | * @virq: Corresponding Linux virtual IRQ number | ||
| 111 | * @hwirq: Corresponding hwirq for the IRQ within this IRQ domain | ||
| 112 | * | ||
| 113 | * Returns 0 if all went well else appropriate error pointer. | ||
| 114 | */ | ||
| 115 | static int ti_sci_intr_alloc_gic_irq(struct irq_domain *domain, | ||
| 116 | unsigned int virq, u32 hwirq) | ||
| 117 | { | ||
| 118 | struct ti_sci_intr_irq_domain *intr = domain->host_data; | ||
| 119 | struct irq_fwspec fwspec; | ||
| 120 | u16 dev_id, irq_index; | ||
| 121 | u16 dst_irq; | ||
| 122 | int err; | ||
| 123 | |||
| 124 | dev_id = HWIRQ_TO_DEVID(hwirq); | ||
| 125 | irq_index = HWIRQ_TO_IRQID(hwirq); | ||
| 126 | |||
| 127 | dst_irq = ti_sci_get_free_resource(intr->dst_irq); | ||
| 128 | if (dst_irq == TI_SCI_RESOURCE_NULL) | ||
| 129 | return -EINVAL; | ||
| 130 | |||
| 131 | fwspec.fwnode = domain->parent->fwnode; | ||
| 132 | fwspec.param_count = 3; | ||
| 133 | fwspec.param[0] = 0; /* SPI */ | ||
| 134 | fwspec.param[1] = dst_irq - 32; /* SPI offset */ | ||
| 135 | fwspec.param[2] = intr->type; | ||
| 136 | |||
| 137 | err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); | ||
| 138 | if (err) | ||
| 139 | goto err_irqs; | ||
| 140 | |||
| 141 | err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci, dev_id, irq_index, | ||
| 142 | intr->dst_id, dst_irq); | ||
| 143 | if (err) | ||
| 144 | goto err_msg; | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | |||
| 148 | err_msg: | ||
| 149 | irq_domain_free_irqs_parent(domain, virq, 1); | ||
| 150 | err_irqs: | ||
| 151 | ti_sci_release_resource(intr->dst_irq, dst_irq); | ||
| 152 | return err; | ||
| 153 | } | ||
| 154 | |||
| 155 | /** | ||
| 156 | * ti_sci_intr_irq_domain_alloc() - Allocate Interrupt router IRQs | ||
| 157 | * @domain: Point to the interrupt router IRQ domain | ||
| 158 | * @virq: Corresponding Linux virtual IRQ number | ||
| 159 | * @nr_irqs: Continuous irqs to be allocated | ||
| 160 | * @data: Pointer to firmware specifier | ||
| 161 | * | ||
| 162 | * Return 0 if all went well else appropriate error value. | ||
| 163 | */ | ||
| 164 | static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain, | ||
| 165 | unsigned int virq, unsigned int nr_irqs, | ||
| 166 | void *data) | ||
| 167 | { | ||
| 168 | struct irq_fwspec *fwspec = data; | ||
| 169 | unsigned long hwirq; | ||
| 170 | unsigned int flags; | ||
| 171 | int err; | ||
| 172 | |||
| 173 | err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags); | ||
| 174 | if (err) | ||
| 175 | return err; | ||
| 176 | |||
| 177 | err = ti_sci_intr_alloc_gic_irq(domain, virq, hwirq); | ||
| 178 | if (err) | ||
| 179 | return err; | ||
| 180 | |||
| 181 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, | ||
| 182 | &ti_sci_intr_irq_chip, NULL); | ||
| 183 | |||
| 184 | return 0; | ||
| 185 | } | ||
| 186 | |||
| 187 | static const struct irq_domain_ops ti_sci_intr_irq_domain_ops = { | ||
| 188 | .free = ti_sci_intr_irq_domain_free, | ||
| 189 | .alloc = ti_sci_intr_irq_domain_alloc, | ||
| 190 | .translate = ti_sci_intr_irq_domain_translate, | ||
| 191 | }; | ||
| 192 | |||
| 193 | static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev) | ||
| 194 | { | ||
| 195 | struct irq_domain *parent_domain, *domain; | ||
| 196 | struct ti_sci_intr_irq_domain *intr; | ||
| 197 | struct device_node *parent_node; | ||
| 198 | struct device *dev = &pdev->dev; | ||
| 199 | int ret; | ||
| 200 | |||
| 201 | parent_node = of_irq_find_parent(dev_of_node(dev)); | ||
| 202 | if (!parent_node) { | ||
| 203 | dev_err(dev, "Failed to get IRQ parent node\n"); | ||
| 204 | return -ENODEV; | ||
| 205 | } | ||
| 206 | |||
| 207 | parent_domain = irq_find_host(parent_node); | ||
| 208 | if (!parent_domain) { | ||
| 209 | dev_err(dev, "Failed to find IRQ parent domain\n"); | ||
| 210 | return -ENODEV; | ||
| 211 | } | ||
| 212 | |||
| 213 | intr = devm_kzalloc(dev, sizeof(*intr), GFP_KERNEL); | ||
| 214 | if (!intr) | ||
| 215 | return -ENOMEM; | ||
| 216 | |||
| 217 | ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type", | ||
| 218 | &intr->type); | ||
| 219 | if (ret) { | ||
| 220 | dev_err(dev, "missing ti,intr-trigger-type property\n"); | ||
| 221 | return -EINVAL; | ||
| 222 | } | ||
| 223 | |||
| 224 | intr->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci"); | ||
| 225 | if (IS_ERR(intr->sci)) { | ||
| 226 | ret = PTR_ERR(intr->sci); | ||
| 227 | if (ret != -EPROBE_DEFER) | ||
| 228 | dev_err(dev, "ti,sci read fail %d\n", ret); | ||
| 229 | intr->sci = NULL; | ||
| 230 | return ret; | ||
| 231 | } | ||
| 232 | |||
| 233 | ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dst-id", | ||
| 234 | &intr->dst_id); | ||
| 235 | if (ret) { | ||
| 236 | dev_err(dev, "missing 'ti,sci-dst-id' property\n"); | ||
| 237 | return -EINVAL; | ||
| 238 | } | ||
| 239 | |||
| 240 | intr->dst_irq = devm_ti_sci_get_of_resource(intr->sci, dev, | ||
| 241 | intr->dst_id, | ||
| 242 | "ti,sci-rm-range-girq"); | ||
| 243 | if (IS_ERR(intr->dst_irq)) { | ||
| 244 | dev_err(dev, "Destination irq resource allocation failed\n"); | ||
| 245 | return PTR_ERR(intr->dst_irq); | ||
| 246 | } | ||
| 247 | |||
| 248 | domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev), | ||
| 249 | &ti_sci_intr_irq_domain_ops, intr); | ||
| 250 | if (!domain) { | ||
| 251 | dev_err(dev, "Failed to allocate IRQ domain\n"); | ||
| 252 | return -ENOMEM; | ||
| 253 | } | ||
| 254 | |||
| 255 | return 0; | ||
| 256 | } | ||
| 257 | |||
| 258 | static const struct of_device_id ti_sci_intr_irq_domain_of_match[] = { | ||
| 259 | { .compatible = "ti,sci-intr", }, | ||
| 260 | { /* sentinel */ }, | ||
| 261 | }; | ||
| 262 | MODULE_DEVICE_TABLE(of, ti_sci_intr_irq_domain_of_match); | ||
| 263 | |||
| 264 | static struct platform_driver ti_sci_intr_irq_domain_driver = { | ||
| 265 | .probe = ti_sci_intr_irq_domain_probe, | ||
| 266 | .driver = { | ||
| 267 | .name = "ti-sci-intr", | ||
| 268 | .of_match_table = ti_sci_intr_irq_domain_of_match, | ||
| 269 | }, | ||
| 270 | }; | ||
| 271 | module_platform_driver(ti_sci_intr_irq_domain_driver); | ||
| 272 | |||
| 273 | MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>"); | ||
| 274 | MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol"); | ||
| 275 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig index 57960e92ebe0..dbd6c60b81db 100644 --- a/drivers/soc/ti/Kconfig +++ b/drivers/soc/ti/Kconfig | |||
| @@ -74,4 +74,10 @@ config TI_SCI_PM_DOMAINS | |||
| 74 | called ti_sci_pm_domains. Note this is needed early in boot before | 74 | called ti_sci_pm_domains. Note this is needed early in boot before |
| 75 | rootfs may be available. | 75 | rootfs may be available. |
| 76 | 76 | ||
| 77 | config TI_SCI_INTA_MSI_DOMAIN | ||
| 78 | bool | ||
| 79 | select GENERIC_MSI_IRQ_DOMAIN | ||
| 80 | help | ||
| 81 | Driver to enable Interrupt Aggregator specific MSI Domain. | ||
| 82 | |||
| 77 | endif # SOC_TI | 83 | endif # SOC_TI |
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile index a22edc0b258a..b3868d392d4f 100644 --- a/drivers/soc/ti/Makefile +++ b/drivers/soc/ti/Makefile | |||
| @@ -8,3 +8,4 @@ obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o | |||
| 8 | obj-$(CONFIG_AMX3_PM) += pm33xx.o | 8 | obj-$(CONFIG_AMX3_PM) += pm33xx.o |
| 9 | obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o | 9 | obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o |
| 10 | obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o | 10 | obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o |
| 11 | obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o | ||
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c new file mode 100644 index 000000000000..0eb9462f609e --- /dev/null +++ b/drivers/soc/ti/ti_sci_inta_msi.c | |||
| @@ -0,0 +1,146 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Texas Instruments' K3 Interrupt Aggregator MSI bus | ||
| 4 | * | ||
| 5 | * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ | ||
| 6 | * Lokesh Vutla <lokeshvutla@ti.com> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/irq.h> | ||
| 10 | #include <linux/irqdomain.h> | ||
| 11 | #include <linux/msi.h> | ||
| 12 | #include <linux/of_address.h> | ||
| 13 | #include <linux/of_device.h> | ||
| 14 | #include <linux/of_irq.h> | ||
| 15 | #include <linux/soc/ti/ti_sci_inta_msi.h> | ||
| 16 | #include <linux/soc/ti/ti_sci_protocol.h> | ||
| 17 | |||
| 18 | static void ti_sci_inta_msi_write_msg(struct irq_data *data, | ||
| 19 | struct msi_msg *msg) | ||
| 20 | { | ||
| 21 | /* Nothing to do */ | ||
| 22 | } | ||
| 23 | |||
| 24 | static void ti_sci_inta_msi_compose_msi_msg(struct irq_data *data, | ||
| 25 | struct msi_msg *msg) | ||
| 26 | { | ||
| 27 | /* Nothing to do */ | ||
| 28 | } | ||
| 29 | |||
| 30 | static void ti_sci_inta_msi_update_chip_ops(struct msi_domain_info *info) | ||
| 31 | { | ||
| 32 | struct irq_chip *chip = info->chip; | ||
| 33 | |||
| 34 | if (WARN_ON(!chip)) | ||
| 35 | return; | ||
| 36 | |||
| 37 | chip->irq_request_resources = irq_chip_request_resources_parent; | ||
| 38 | chip->irq_release_resources = irq_chip_release_resources_parent; | ||
| 39 | chip->irq_compose_msi_msg = ti_sci_inta_msi_compose_msi_msg; | ||
| 40 | chip->irq_write_msi_msg = ti_sci_inta_msi_write_msg; | ||
| 41 | chip->irq_set_type = irq_chip_set_type_parent; | ||
| 42 | chip->irq_unmask = irq_chip_unmask_parent; | ||
| 43 | chip->irq_mask = irq_chip_mask_parent; | ||
| 44 | chip->irq_ack = irq_chip_ack_parent; | ||
| 45 | } | ||
| 46 | |||
| 47 | struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode, | ||
| 48 | struct msi_domain_info *info, | ||
| 49 | struct irq_domain *parent) | ||
| 50 | { | ||
| 51 | struct irq_domain *domain; | ||
| 52 | |||
| 53 | ti_sci_inta_msi_update_chip_ops(info); | ||
| 54 | |||
| 55 | domain = msi_create_irq_domain(fwnode, info, parent); | ||
| 56 | if (domain) | ||
| 57 | irq_domain_update_bus_token(domain, DOMAIN_BUS_TI_SCI_INTA_MSI); | ||
| 58 | |||
| 59 | return domain; | ||
| 60 | } | ||
| 61 | EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain); | ||
| 62 | |||
| 63 | static void ti_sci_inta_msi_free_descs(struct device *dev) | ||
| 64 | { | ||
| 65 | struct msi_desc *desc, *tmp; | ||
| 66 | |||
| 67 | list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { | ||
| 68 | list_del(&desc->list); | ||
| 69 | free_msi_entry(desc); | ||
| 70 | } | ||
| 71 | } | ||
| 72 | |||
| 73 | static int ti_sci_inta_msi_alloc_descs(struct device *dev, | ||
| 74 | struct ti_sci_resource *res) | ||
| 75 | { | ||
| 76 | struct msi_desc *msi_desc; | ||
| 77 | int set, i, count = 0; | ||
| 78 | |||
| 79 | for (set = 0; set < res->sets; set++) { | ||
| 80 | for (i = 0; i < res->desc[set].num; i++) { | ||
| 81 | msi_desc = alloc_msi_entry(dev, 1, NULL); | ||
| 82 | if (!msi_desc) { | ||
| 83 | ti_sci_inta_msi_free_descs(dev); | ||
| 84 | return -ENOMEM; | ||
| 85 | } | ||
| 86 | |||
| 87 | msi_desc->inta.dev_index = res->desc[set].start + i; | ||
| 88 | INIT_LIST_HEAD(&msi_desc->list); | ||
| 89 | list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); | ||
| 90 | count++; | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | return count; | ||
| 95 | } | ||
| 96 | |||
| 97 | int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev, | ||
| 98 | struct ti_sci_resource *res) | ||
| 99 | { | ||
| 100 | struct platform_device *pdev = to_platform_device(dev); | ||
| 101 | struct irq_domain *msi_domain; | ||
| 102 | int ret, nvec; | ||
| 103 | |||
| 104 | msi_domain = dev_get_msi_domain(dev); | ||
| 105 | if (!msi_domain) | ||
| 106 | return -EINVAL; | ||
| 107 | |||
| 108 | if (pdev->id < 0) | ||
| 109 | return -ENODEV; | ||
| 110 | |||
| 111 | nvec = ti_sci_inta_msi_alloc_descs(dev, res); | ||
| 112 | if (nvec <= 0) | ||
| 113 | return nvec; | ||
| 114 | |||
| 115 | ret = msi_domain_alloc_irqs(msi_domain, dev, nvec); | ||
| 116 | if (ret) { | ||
| 117 | dev_err(dev, "Failed to allocate IRQs %d\n", ret); | ||
| 118 | goto cleanup; | ||
| 119 | } | ||
| 120 | |||
| 121 | return 0; | ||
| 122 | |||
| 123 | cleanup: | ||
| 124 | ti_sci_inta_msi_free_descs(&pdev->dev); | ||
| 125 | return ret; | ||
| 126 | } | ||
| 127 | EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs); | ||
| 128 | |||
| 129 | void ti_sci_inta_msi_domain_free_irqs(struct device *dev) | ||
| 130 | { | ||
| 131 | msi_domain_free_irqs(dev->msi_domain, dev); | ||
| 132 | ti_sci_inta_msi_free_descs(dev); | ||
| 133 | } | ||
| 134 | EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_free_irqs); | ||
| 135 | |||
| 136 | unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 dev_index) | ||
| 137 | { | ||
| 138 | struct msi_desc *desc; | ||
| 139 | |||
| 140 | for_each_msi_entry(desc, dev) | ||
| 141 | if (desc->inta.dev_index == dev_index) | ||
| 142 | return desc->irq; | ||
| 143 | |||
| 144 | return -ENODEV; | ||
| 145 | } | ||
| 146 | EXPORT_SYMBOL_GPL(ti_sci_inta_msi_get_virq); | ||
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index e760dc5d1fa8..476e0c54de2d 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h | |||
| @@ -71,12 +71,25 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, | |||
| 71 | size_t size, enum dma_data_direction dir, unsigned long attrs); | 71 | size_t size, enum dma_data_direction dir, unsigned long attrs); |
| 72 | 72 | ||
| 73 | /* The DMA API isn't _quite_ the whole story, though... */ | 73 | /* The DMA API isn't _quite_ the whole story, though... */ |
| 74 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); | 74 | /* |
| 75 | * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device | ||
| 76 | * | ||
| 77 | * The MSI page will be stored in @desc. | ||
| 78 | * | ||
| 79 | * Return: 0 on success otherwise an error describing the failure. | ||
| 80 | */ | ||
| 81 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); | ||
| 82 | |||
| 83 | /* Update the MSI message if required. */ | ||
| 84 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, | ||
| 85 | struct msi_msg *msg); | ||
| 86 | |||
| 75 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); | 87 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); |
| 76 | 88 | ||
| 77 | #else | 89 | #else |
| 78 | 90 | ||
| 79 | struct iommu_domain; | 91 | struct iommu_domain; |
| 92 | struct msi_desc; | ||
| 80 | struct msi_msg; | 93 | struct msi_msg; |
| 81 | struct device; | 94 | struct device; |
| 82 | 95 | ||
| @@ -99,7 +112,14 @@ static inline void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
| 99 | { | 112 | { |
| 100 | } | 113 | } |
| 101 | 114 | ||
| 102 | static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | 115 | static inline int iommu_dma_prepare_msi(struct msi_desc *desc, |
| 116 | phys_addr_t msi_addr) | ||
| 117 | { | ||
| 118 | return 0; | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, | ||
| 122 | struct msi_msg *msg) | ||
| 103 | { | 123 | { |
| 104 | } | 124 | } |
| 105 | 125 | ||
diff --git a/include/linux/irq.h b/include/linux/irq.h index 7ae8de5ad0f2..fb301cf29148 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -625,6 +625,8 @@ extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); | |||
| 625 | extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, | 625 | extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, |
| 626 | void *vcpu_info); | 626 | void *vcpu_info); |
| 627 | extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); | 627 | extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); |
| 628 | extern int irq_chip_request_resources_parent(struct irq_data *data); | ||
| 629 | extern void irq_chip_release_resources_parent(struct irq_data *data); | ||
| 628 | #endif | 630 | #endif |
| 629 | 631 | ||
| 630 | /* Handling of unhandled and spurious interrupts: */ | 632 | /* Handling of unhandled and spurious interrupts: */ |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index c848a7cc502e..c7e3e39224c6 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -165,7 +165,7 @@ | |||
| 165 | #define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) | 165 | #define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) |
| 166 | #define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) | 166 | #define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) |
| 167 | #define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) | 167 | #define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) |
| 168 | #define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) | 168 | #define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) |
| 169 | #define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) | 169 | #define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) |
| 170 | #define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) | 170 | #define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) |
| 171 | #define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) | 171 | #define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) |
| @@ -192,7 +192,7 @@ | |||
| 192 | #define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) | 192 | #define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) |
| 193 | #define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) | 193 | #define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) |
| 194 | #define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) | 194 | #define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) |
| 195 | #define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) | 195 | #define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) |
| 196 | #define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) | 196 | #define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) |
| 197 | #define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) | 197 | #define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) |
| 198 | #define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) | 198 | #define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) |
| @@ -251,7 +251,7 @@ | |||
| 251 | #define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) | 251 | #define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) |
| 252 | #define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) | 252 | #define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) |
| 253 | #define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) | 253 | #define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) |
| 254 | #define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) | 254 | #define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) |
| 255 | #define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) | 255 | #define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) |
| 256 | #define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) | 256 | #define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) |
| 257 | #define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) | 257 | #define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) |
| @@ -277,7 +277,7 @@ | |||
| 277 | #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) | 277 | #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) |
| 278 | #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) | 278 | #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) |
| 279 | #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) | 279 | #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) |
| 280 | #define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) | 280 | #define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) |
| 281 | #define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) | 281 | #define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) |
| 282 | #define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) | 282 | #define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) |
| 283 | #define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) | 283 | #define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) |
| @@ -351,7 +351,7 @@ | |||
| 351 | #define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) | 351 | #define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) |
| 352 | #define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) | 352 | #define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) |
| 353 | #define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) | 353 | #define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) |
| 354 | #define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) | 354 | #define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) |
| 355 | #define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) | 355 | #define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) |
| 356 | #define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) | 356 | #define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) |
| 357 | #define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) | 357 | #define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) |
| @@ -377,7 +377,7 @@ | |||
| 377 | #define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) | 377 | #define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) |
| 378 | #define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) | 378 | #define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) |
| 379 | #define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) | 379 | #define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) |
| 380 | #define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) | 380 | #define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) |
| 381 | #define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) | 381 | #define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) |
| 382 | #define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) | 382 | #define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) |
| 383 | #define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) | 383 | #define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 61706b430907..07ec8b390161 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
| @@ -82,6 +82,7 @@ enum irq_domain_bus_token { | |||
| 82 | DOMAIN_BUS_NEXUS, | 82 | DOMAIN_BUS_NEXUS, |
| 83 | DOMAIN_BUS_IPI, | 83 | DOMAIN_BUS_IPI, |
| 84 | DOMAIN_BUS_FSL_MC_MSI, | 84 | DOMAIN_BUS_FSL_MC_MSI, |
| 85 | DOMAIN_BUS_TI_SCI_INTA_MSI, | ||
| 85 | }; | 86 | }; |
| 86 | 87 | ||
| 87 | /** | 88 | /** |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 052f04fcf953..d48e919d55ae 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
| @@ -48,6 +48,14 @@ struct fsl_mc_msi_desc { | |||
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | /** | 50 | /** |
| 51 | * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data | ||
| 52 | * @dev_index: TISCI device index | ||
| 53 | */ | ||
| 54 | struct ti_sci_inta_msi_desc { | ||
| 55 | u16 dev_index; | ||
| 56 | }; | ||
| 57 | |||
| 58 | /** | ||
| 51 | * struct msi_desc - Descriptor structure for MSI based interrupts | 59 | * struct msi_desc - Descriptor structure for MSI based interrupts |
| 52 | * @list: List head for management | 60 | * @list: List head for management |
| 53 | * @irq: The base interrupt number | 61 | * @irq: The base interrupt number |
| @@ -68,6 +76,7 @@ struct fsl_mc_msi_desc { | |||
| 68 | * @mask_base: [PCI MSI-X] Mask register base address | 76 | * @mask_base: [PCI MSI-X] Mask register base address |
| 69 | * @platform: [platform] Platform device specific msi descriptor data | 77 | * @platform: [platform] Platform device specific msi descriptor data |
| 70 | * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data | 78 | * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data |
| 79 | * @inta: [INTA] TISCI based INTA specific msi descriptor data | ||
| 71 | */ | 80 | */ |
| 72 | struct msi_desc { | 81 | struct msi_desc { |
| 73 | /* Shared device/bus type independent data */ | 82 | /* Shared device/bus type independent data */ |
| @@ -77,6 +86,9 @@ struct msi_desc { | |||
| 77 | struct device *dev; | 86 | struct device *dev; |
| 78 | struct msi_msg msg; | 87 | struct msi_msg msg; |
| 79 | struct irq_affinity_desc *affinity; | 88 | struct irq_affinity_desc *affinity; |
| 89 | #ifdef CONFIG_IRQ_MSI_IOMMU | ||
| 90 | const void *iommu_cookie; | ||
| 91 | #endif | ||
| 80 | 92 | ||
| 81 | union { | 93 | union { |
| 82 | /* PCI MSI/X specific data */ | 94 | /* PCI MSI/X specific data */ |
| @@ -106,6 +118,7 @@ struct msi_desc { | |||
| 106 | */ | 118 | */ |
| 107 | struct platform_msi_desc platform; | 119 | struct platform_msi_desc platform; |
| 108 | struct fsl_mc_msi_desc fsl_mc; | 120 | struct fsl_mc_msi_desc fsl_mc; |
| 121 | struct ti_sci_inta_msi_desc inta; | ||
| 109 | }; | 122 | }; |
| 110 | }; | 123 | }; |
| 111 | 124 | ||
| @@ -119,6 +132,29 @@ struct msi_desc { | |||
| 119 | #define for_each_msi_entry_safe(desc, tmp, dev) \ | 132 | #define for_each_msi_entry_safe(desc, tmp, dev) \ |
| 120 | list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) | 133 | list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) |
| 121 | 134 | ||
| 135 | #ifdef CONFIG_IRQ_MSI_IOMMU | ||
| 136 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) | ||
| 137 | { | ||
| 138 | return desc->iommu_cookie; | ||
| 139 | } | ||
| 140 | |||
| 141 | static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, | ||
| 142 | const void *iommu_cookie) | ||
| 143 | { | ||
| 144 | desc->iommu_cookie = iommu_cookie; | ||
| 145 | } | ||
| 146 | #else | ||
| 147 | static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) | ||
| 148 | { | ||
| 149 | return NULL; | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, | ||
| 153 | const void *iommu_cookie) | ||
| 154 | { | ||
| 155 | } | ||
| 156 | #endif | ||
| 157 | |||
| 122 | #ifdef CONFIG_PCI_MSI | 158 | #ifdef CONFIG_PCI_MSI |
| 123 | #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) | 159 | #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) |
| 124 | #define for_each_pci_msi_entry(desc, pdev) \ | 160 | #define for_each_pci_msi_entry(desc, pdev) \ |
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h new file mode 100644 index 000000000000..11fb5048f5f6 --- /dev/null +++ b/include/linux/soc/ti/ti_sci_inta_msi.h | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Texas Instruments' K3 TI SCI INTA MSI helper | ||
| 4 | * | ||
| 5 | * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ | ||
| 6 | * Lokesh Vutla <lokeshvutla@ti.com> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifndef __INCLUDE_LINUX_TI_SCI_INTA_MSI_H | ||
| 10 | #define __INCLUDE_LINUX_TI_SCI_INTA_MSI_H | ||
| 11 | |||
| 12 | #include <linux/msi.h> | ||
| 13 | #include <linux/soc/ti/ti_sci_protocol.h> | ||
| 14 | |||
| 15 | struct irq_domain | ||
| 16 | *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode, | ||
| 17 | struct msi_domain_info *info, | ||
| 18 | struct irq_domain *parent); | ||
| 19 | int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev, | ||
| 20 | struct ti_sci_resource *res); | ||
| 21 | unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 index); | ||
| 22 | void ti_sci_inta_msi_domain_free_irqs(struct device *dev); | ||
| 23 | #endif /* __INCLUDE_LINUX_IRQCHIP_TI_SCI_INTA_H */ | ||
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 18435e5c6364..568722a041bf 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h | |||
| @@ -193,14 +193,67 @@ struct ti_sci_clk_ops { | |||
| 193 | }; | 193 | }; |
| 194 | 194 | ||
| 195 | /** | 195 | /** |
| 196 | * struct ti_sci_rm_core_ops - Resource management core operations | ||
| 197 | * @get_range: Get a range of resources belonging to ti sci host. | ||
| 198 | * @get_rage_from_shost: Get a range of resources belonging to | ||
| 199 | * specified host id. | ||
| 200 | * - s_host: Host processing entity to which the | ||
| 201 | * resources are allocated | ||
| 202 | * | ||
| 203 | * NOTE: for these functions, all the parameters are consolidated and defined | ||
| 204 | * as below: | ||
| 205 | * - handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle | ||
| 206 | * - dev_id: TISCI device ID. | ||
| 207 | * - subtype: Resource assignment subtype that is being requested | ||
| 208 | * from the given device. | ||
| 209 | * - range_start: Start index of the resource range | ||
| 210 | * - range_end: Number of resources in the range | ||
| 211 | */ | ||
| 212 | struct ti_sci_rm_core_ops { | ||
| 213 | int (*get_range)(const struct ti_sci_handle *handle, u32 dev_id, | ||
| 214 | u8 subtype, u16 *range_start, u16 *range_num); | ||
| 215 | int (*get_range_from_shost)(const struct ti_sci_handle *handle, | ||
| 216 | u32 dev_id, u8 subtype, u8 s_host, | ||
| 217 | u16 *range_start, u16 *range_num); | ||
| 218 | }; | ||
| 219 | |||
| 220 | /** | ||
| 221 | * struct ti_sci_rm_irq_ops: IRQ management operations | ||
| 222 | * @set_irq: Set an IRQ route between the requested source | ||
| 223 | * and destination | ||
| 224 | * @set_event_map: Set an Event based peripheral irq to Interrupt | ||
| 225 | * Aggregator. | ||
| 226 | * @free_irq: Free an an IRQ route between the requested source | ||
| 227 | * destination. | ||
| 228 | * @free_event_map: Free an event based peripheral irq to Interrupt | ||
| 229 | * Aggregator. | ||
| 230 | */ | ||
| 231 | struct ti_sci_rm_irq_ops { | ||
| 232 | int (*set_irq)(const struct ti_sci_handle *handle, u16 src_id, | ||
| 233 | u16 src_index, u16 dst_id, u16 dst_host_irq); | ||
| 234 | int (*set_event_map)(const struct ti_sci_handle *handle, u16 src_id, | ||
| 235 | u16 src_index, u16 ia_id, u16 vint, | ||
| 236 | u16 global_event, u8 vint_status_bit); | ||
| 237 | int (*free_irq)(const struct ti_sci_handle *handle, u16 src_id, | ||
| 238 | u16 src_index, u16 dst_id, u16 dst_host_irq); | ||
| 239 | int (*free_event_map)(const struct ti_sci_handle *handle, u16 src_id, | ||
| 240 | u16 src_index, u16 ia_id, u16 vint, | ||
| 241 | u16 global_event, u8 vint_status_bit); | ||
| 242 | }; | ||
| 243 | |||
| 244 | /** | ||
| 196 | * struct ti_sci_ops - Function support for TI SCI | 245 | * struct ti_sci_ops - Function support for TI SCI |
| 197 | * @dev_ops: Device specific operations | 246 | * @dev_ops: Device specific operations |
| 198 | * @clk_ops: Clock specific operations | 247 | * @clk_ops: Clock specific operations |
| 248 | * @rm_core_ops: Resource management core operations. | ||
| 249 | * @rm_irq_ops: IRQ management specific operations | ||
| 199 | */ | 250 | */ |
| 200 | struct ti_sci_ops { | 251 | struct ti_sci_ops { |
| 201 | struct ti_sci_core_ops core_ops; | 252 | struct ti_sci_core_ops core_ops; |
| 202 | struct ti_sci_dev_ops dev_ops; | 253 | struct ti_sci_dev_ops dev_ops; |
| 203 | struct ti_sci_clk_ops clk_ops; | 254 | struct ti_sci_clk_ops clk_ops; |
| 255 | struct ti_sci_rm_core_ops rm_core_ops; | ||
| 256 | struct ti_sci_rm_irq_ops rm_irq_ops; | ||
| 204 | }; | 257 | }; |
| 205 | 258 | ||
| 206 | /** | 259 | /** |
| @@ -213,10 +266,47 @@ struct ti_sci_handle { | |||
| 213 | struct ti_sci_ops ops; | 266 | struct ti_sci_ops ops; |
| 214 | }; | 267 | }; |
| 215 | 268 | ||
| 269 | #define TI_SCI_RESOURCE_NULL 0xffff | ||
| 270 | |||
| 271 | /** | ||
| 272 | * struct ti_sci_resource_desc - Description of TI SCI resource instance range. | ||
| 273 | * @start: Start index of the resource. | ||
| 274 | * @num: Number of resources. | ||
| 275 | * @res_map: Bitmap to manage the allocation of these resources. | ||
| 276 | */ | ||
| 277 | struct ti_sci_resource_desc { | ||
| 278 | u16 start; | ||
| 279 | u16 num; | ||
| 280 | unsigned long *res_map; | ||
| 281 | }; | ||
| 282 | |||
| 283 | /** | ||
| 284 | * struct ti_sci_resource - Structure representing a resource assigned | ||
| 285 | * to a device. | ||
| 286 | * @sets: Number of sets available from this resource type | ||
| 287 | * @lock: Lock to guard the res map in each set. | ||
| 288 | * @desc: Array of resource descriptors. | ||
| 289 | */ | ||
| 290 | struct ti_sci_resource { | ||
| 291 | u16 sets; | ||
| 292 | raw_spinlock_t lock; | ||
| 293 | struct ti_sci_resource_desc *desc; | ||
| 294 | }; | ||
| 295 | |||
| 216 | #if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL) | 296 | #if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL) |
| 217 | const struct ti_sci_handle *ti_sci_get_handle(struct device *dev); | 297 | const struct ti_sci_handle *ti_sci_get_handle(struct device *dev); |
| 218 | int ti_sci_put_handle(const struct ti_sci_handle *handle); | 298 | int ti_sci_put_handle(const struct ti_sci_handle *handle); |
| 219 | const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev); | 299 | const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev); |
| 300 | const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, | ||
| 301 | const char *property); | ||
| 302 | const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, | ||
| 303 | const char *property); | ||
| 304 | u16 ti_sci_get_free_resource(struct ti_sci_resource *res); | ||
| 305 | void ti_sci_release_resource(struct ti_sci_resource *res, u16 id); | ||
| 306 | u32 ti_sci_get_num_resources(struct ti_sci_resource *res); | ||
| 307 | struct ti_sci_resource * | ||
| 308 | devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, | ||
| 309 | struct device *dev, u32 dev_id, char *of_prop); | ||
| 220 | 310 | ||
| 221 | #else /* CONFIG_TI_SCI_PROTOCOL */ | 311 | #else /* CONFIG_TI_SCI_PROTOCOL */ |
| 222 | 312 | ||
| @@ -236,6 +326,40 @@ const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) | |||
| 236 | return ERR_PTR(-EINVAL); | 326 | return ERR_PTR(-EINVAL); |
| 237 | } | 327 | } |
| 238 | 328 | ||
| 329 | static inline | ||
| 330 | const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np, | ||
| 331 | const char *property) | ||
| 332 | { | ||
| 333 | return ERR_PTR(-EINVAL); | ||
| 334 | } | ||
| 335 | |||
| 336 | static inline | ||
| 337 | const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev, | ||
| 338 | const char *property) | ||
| 339 | { | ||
| 340 | return ERR_PTR(-EINVAL); | ||
| 341 | } | ||
| 342 | |||
| 343 | static inline u16 ti_sci_get_free_resource(struct ti_sci_resource *res) | ||
| 344 | { | ||
| 345 | return TI_SCI_RESOURCE_NULL; | ||
| 346 | } | ||
| 347 | |||
| 348 | static inline void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) | ||
| 349 | { | ||
| 350 | } | ||
| 351 | |||
| 352 | static inline u32 ti_sci_get_num_resources(struct ti_sci_resource *res) | ||
| 353 | { | ||
| 354 | return 0; | ||
| 355 | } | ||
| 356 | |||
| 357 | static inline struct ti_sci_resource * | ||
| 358 | devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, | ||
| 359 | struct device *dev, u32 dev_id, char *of_prop) | ||
| 360 | { | ||
| 361 | return ERR_PTR(-EINVAL); | ||
| 362 | } | ||
| 239 | #endif /* CONFIG_TI_SCI_PROTOCOL */ | 363 | #endif /* CONFIG_TI_SCI_PROTOCOL */ |
| 240 | 364 | ||
| 241 | #endif /* __TISCI_PROTOCOL_H */ | 365 | #endif /* __TISCI_PROTOCOL_H */ |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 5f3e2baefca9..8fee06625c37 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
| @@ -91,6 +91,9 @@ config GENERIC_MSI_IRQ_DOMAIN | |||
| 91 | select IRQ_DOMAIN_HIERARCHY | 91 | select IRQ_DOMAIN_HIERARCHY |
| 92 | select GENERIC_MSI_IRQ | 92 | select GENERIC_MSI_IRQ |
| 93 | 93 | ||
| 94 | config IRQ_MSI_IOMMU | ||
| 95 | bool | ||
| 96 | |||
| 94 | config HANDLE_DOMAIN_IRQ | 97 | config HANDLE_DOMAIN_IRQ |
| 95 | bool | 98 | bool |
| 96 | 99 | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 51128bea3846..29d6c7d070b4 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -1459,6 +1459,33 @@ int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) | |||
| 1459 | return -ENOSYS; | 1459 | return -ENOSYS; |
| 1460 | } | 1460 | } |
| 1461 | EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); | 1461 | EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); |
| 1462 | |||
| 1463 | /** | ||
| 1464 | * irq_chip_request_resources_parent - Request resources on the parent interrupt | ||
| 1465 | * @data: Pointer to interrupt specific data | ||
| 1466 | */ | ||
| 1467 | int irq_chip_request_resources_parent(struct irq_data *data) | ||
| 1468 | { | ||
| 1469 | data = data->parent_data; | ||
| 1470 | |||
| 1471 | if (data->chip->irq_request_resources) | ||
| 1472 | return data->chip->irq_request_resources(data); | ||
| 1473 | |||
| 1474 | return -ENOSYS; | ||
| 1475 | } | ||
| 1476 | EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); | ||
| 1477 | |||
| 1478 | /** | ||
| 1479 | * irq_chip_release_resources_parent - Release resources on the parent interrupt | ||
| 1480 | * @data: Pointer to interrupt specific data | ||
| 1481 | */ | ||
| 1482 | void irq_chip_release_resources_parent(struct irq_data *data) | ||
| 1483 | { | ||
| 1484 | data = data->parent_data; | ||
| 1485 | if (data->chip->irq_release_resources) | ||
| 1486 | data->chip->irq_release_resources(data); | ||
| 1487 | } | ||
| 1488 | EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); | ||
| 1462 | #endif | 1489 | #endif |
| 1463 | 1490 | ||
| 1464 | /** | 1491 | /** |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 9ed29e4a7dbf..a453e229f99c 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
| @@ -1297,7 +1297,7 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, | |||
| 1297 | /** | 1297 | /** |
| 1298 | * __irq_domain_alloc_irqs - Allocate IRQs from domain | 1298 | * __irq_domain_alloc_irqs - Allocate IRQs from domain |
| 1299 | * @domain: domain to allocate from | 1299 | * @domain: domain to allocate from |
| 1300 | * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 | 1300 | * @irq_base: allocate specified IRQ number if irq_base >= 0 |
| 1301 | * @nr_irqs: number of IRQs to allocate | 1301 | * @nr_irqs: number of IRQs to allocate |
| 1302 | * @node: NUMA node id for memory allocation | 1302 | * @node: NUMA node id for memory allocation |
| 1303 | * @arg: domain specific argument | 1303 | * @arg: domain specific argument |
