diff options
148 files changed, 8937 insertions, 1004 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index e4e90104d7c3..44d4b2be92fd 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci | |||
@@ -301,3 +301,25 @@ Contact: Emil Velikov <emil.l.velikov@gmail.com> | |||
301 | Description: | 301 | Description: |
302 | This file contains the revision field of the PCI device. | 302 | This file contains the revision field of the PCI device. |
303 | The value comes from device config space. The file is read only. | 303 | The value comes from device config space. The file is read only. |
304 | |||
305 | What: /sys/bus/pci/devices/.../sriov_drivers_autoprobe | ||
306 | Date: April 2017 | ||
307 | Contact: Bodong Wang<bodong@mellanox.com> | ||
308 | Description: | ||
309 | This file is associated with the PF of a device that | ||
310 | supports SR-IOV. It determines whether newly-enabled VFs | ||
311 | are immediately bound to a driver. It initially contains | ||
312 | 1, which means the kernel automatically binds VFs to a | ||
313 | compatible driver immediately after they are enabled. If | ||
314 | an application writes 0 to the file before enabling VFs, | ||
315 | the kernel will not bind VFs to a driver. | ||
316 | |||
317 | A typical use case is to write 0 to this file, then enable | ||
318 | VFs, then assign the newly-created VFs to virtual machines. | ||
319 | Note that changing this file does not affect already- | ||
320 | enabled VFs. In this scenario, the user must first disable | ||
321 | the VFs, write 0 to sriov_drivers_autoprobe, then re-enable | ||
322 | the VFs. | ||
323 | |||
324 | This is similar to /sys/bus/pci/drivers_autoprobe, but | ||
325 | affects only the VFs associated with a specific PF. | ||
diff --git a/Documentation/ABI/testing/sysfs-class-switchtec b/Documentation/ABI/testing/sysfs-class-switchtec new file mode 100644 index 000000000000..48cb4c15e430 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-switchtec | |||
@@ -0,0 +1,96 @@ | |||
1 | switchtec - Microsemi Switchtec PCI Switch Management Endpoint | ||
2 | |||
3 | For details on this subsystem look at Documentation/switchtec.txt. | ||
4 | |||
5 | What: /sys/class/switchtec | ||
6 | Date: 05-Jan-2017 | ||
7 | KernelVersion: v4.11 | ||
8 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
9 | Description: The switchtec class subsystem folder. | ||
10 | Each registered switchtec driver is represented by a switchtecX | ||
11 | subfolder (X being an integer >= 0). | ||
12 | |||
13 | |||
14 | What: /sys/class/switchtec/switchtec[0-9]+/component_id | ||
15 | Date: 05-Jan-2017 | ||
16 | KernelVersion: v4.11 | ||
17 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
18 | Description: Component identifier as stored in the hardware (eg. PM8543) | ||
19 | (read only) | ||
20 | Values: arbitrary string. | ||
21 | |||
22 | |||
23 | What: /sys/class/switchtec/switchtec[0-9]+/component_revision | ||
24 | Date: 05-Jan-2017 | ||
25 | KernelVersion: v4.11 | ||
26 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
27 | Description: Component revision stored in the hardware (read only) | ||
28 | Values: integer. | ||
29 | |||
30 | |||
31 | What: /sys/class/switchtec/switchtec[0-9]+/component_vendor | ||
32 | Date: 05-Jan-2017 | ||
33 | KernelVersion: v4.11 | ||
34 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
35 | Description: Component vendor as stored in the hardware (eg. MICROSEM) | ||
36 | (read only) | ||
37 | Values: arbitrary string. | ||
38 | |||
39 | |||
40 | What: /sys/class/switchtec/switchtec[0-9]+/device_version | ||
41 | Date: 05-Jan-2017 | ||
42 | KernelVersion: v4.11 | ||
43 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
44 | Description: Device version as stored in the hardware (read only) | ||
45 | Values: integer. | ||
46 | |||
47 | |||
48 | What: /sys/class/switchtec/switchtec[0-9]+/fw_version | ||
49 | Date: 05-Jan-2017 | ||
50 | KernelVersion: v4.11 | ||
51 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
52 | Description: Currently running firmware version (read only) | ||
53 | Values: integer (in hexadecimal). | ||
54 | |||
55 | |||
56 | What: /sys/class/switchtec/switchtec[0-9]+/partition | ||
57 | Date: 05-Jan-2017 | ||
58 | KernelVersion: v4.11 | ||
59 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
60 | Description: Partition number for this device in the switch (read only) | ||
61 | Values: integer. | ||
62 | |||
63 | |||
64 | What: /sys/class/switchtec/switchtec[0-9]+/partition_count | ||
65 | Date: 05-Jan-2017 | ||
66 | KernelVersion: v4.11 | ||
67 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
68 | Description: Total number of partitions in the switch (read only) | ||
69 | Values: integer. | ||
70 | |||
71 | |||
72 | What: /sys/class/switchtec/switchtec[0-9]+/product_id | ||
73 | Date: 05-Jan-2017 | ||
74 | KernelVersion: v4.11 | ||
75 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
76 | Description: Product identifier as stored in the hardware (eg. PSX 48XG3) | ||
77 | (read only) | ||
78 | Values: arbitrary string. | ||
79 | |||
80 | |||
81 | What: /sys/class/switchtec/switchtec[0-9]+/product_revision | ||
82 | Date: 05-Jan-2017 | ||
83 | KernelVersion: v4.11 | ||
84 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
85 | Description: Product revision stored in the hardware (eg. RevB) | ||
86 | (read only) | ||
87 | Values: arbitrary string. | ||
88 | |||
89 | |||
90 | What: /sys/class/switchtec/switchtec[0-9]+/product_vendor | ||
91 | Date: 05-Jan-2017 | ||
92 | KernelVersion: v4.11 | ||
93 | Contact: Logan Gunthorpe <logang@deltatee.com> | ||
94 | Description: Product vendor as stored in the hardware (eg. MICROSEM) | ||
95 | (read only) | ||
96 | Values: arbitrary string. | ||
diff --git a/Documentation/PCI/00-INDEX b/Documentation/PCI/00-INDEX index 147231f1613e..00c9a90b6f38 100644 --- a/Documentation/PCI/00-INDEX +++ b/Documentation/PCI/00-INDEX | |||
@@ -12,3 +12,13 @@ pci.txt | |||
12 | - info on the PCI subsystem for device driver authors | 12 | - info on the PCI subsystem for device driver authors |
13 | pcieaer-howto.txt | 13 | pcieaer-howto.txt |
14 | - the PCI Express Advanced Error Reporting Driver Guide HOWTO | 14 | - the PCI Express Advanced Error Reporting Driver Guide HOWTO |
15 | endpoint/pci-endpoint.txt | ||
16 | - guide to add endpoint controller driver and endpoint function driver. | ||
17 | endpoint/pci-endpoint-cfs.txt | ||
18 | - guide to use configfs to configure the PCI endpoint function. | ||
19 | endpoint/pci-test-function.txt | ||
20 | - specification of *PCI test* function device. | ||
21 | endpoint/pci-test-howto.txt | ||
22 | - userguide for PCI endpoint test function. | ||
23 | endpoint/function/binding/ | ||
24 | - binding documentation for PCI endpoint function | ||
diff --git a/Documentation/PCI/endpoint/function/binding/pci-test.txt b/Documentation/PCI/endpoint/function/binding/pci-test.txt new file mode 100644 index 000000000000..3b68b955fb50 --- /dev/null +++ b/Documentation/PCI/endpoint/function/binding/pci-test.txt | |||
@@ -0,0 +1,17 @@ | |||
1 | PCI TEST ENDPOINT FUNCTION | ||
2 | |||
3 | name: Should be "pci_epf_test" to bind to the pci_epf_test driver. | ||
4 | |||
5 | Configurable Fields: | ||
6 | vendorid : should be 0x104c | ||
7 | deviceid : should be 0xb500 for DRA74x and 0xb501 for DRA72x | ||
8 | revid : don't care | ||
9 | progif_code : don't care | ||
10 | subclass_code : don't care | ||
11 | baseclass_code : should be 0xff | ||
12 | cache_line_size : don't care | ||
13 | subsys_vendor_id : don't care | ||
14 | subsys_id : don't care | ||
15 | interrupt_pin : Should be 1 - INTA, 2 - INTB, 3 - INTC, 4 -INTD | ||
16 | msi_interrupts : Should be 1 to 32 depending on the number of MSI interrupts | ||
17 | to test | ||
diff --git a/Documentation/PCI/endpoint/pci-endpoint-cfs.txt b/Documentation/PCI/endpoint/pci-endpoint-cfs.txt new file mode 100644 index 000000000000..d740f29960a4 --- /dev/null +++ b/Documentation/PCI/endpoint/pci-endpoint-cfs.txt | |||
@@ -0,0 +1,105 @@ | |||
1 | CONFIGURING PCI ENDPOINT USING CONFIGFS | ||
2 | Kishon Vijay Abraham I <kishon@ti.com> | ||
3 | |||
4 | The PCI Endpoint Core exposes configfs entry (pci_ep) to configure the | ||
5 | PCI endpoint function and to bind the endpoint function | ||
6 | with the endpoint controller. (For introducing other mechanisms to | ||
7 | configure the PCI Endpoint Function refer to [1]). | ||
8 | |||
9 | *) Mounting configfs | ||
10 | |||
11 | The PCI Endpoint Core layer creates pci_ep directory in the mounted configfs | ||
12 | directory. configfs can be mounted using the following command. | ||
13 | |||
14 | mount -t configfs none /sys/kernel/config | ||
15 | |||
16 | *) Directory Structure | ||
17 | |||
18 | The pci_ep configfs has two directories at its root: controllers and | ||
19 | functions. Every EPC device present in the system will have an entry in | ||
20 | the *controllers* directory and and every EPF driver present in the system | ||
21 | will have an entry in the *functions* directory. | ||
22 | |||
23 | /sys/kernel/config/pci_ep/ | ||
24 | .. controllers/ | ||
25 | .. functions/ | ||
26 | |||
27 | *) Creating EPF Device | ||
28 | |||
29 | Every registered EPF driver will be listed in controllers directory. The | ||
30 | entries corresponding to EPF driver will be created by the EPF core. | ||
31 | |||
32 | /sys/kernel/config/pci_ep/functions/ | ||
33 | .. <EPF Driver1>/ | ||
34 | ... <EPF Device 11>/ | ||
35 | ... <EPF Device 21>/ | ||
36 | .. <EPF Driver2>/ | ||
37 | ... <EPF Device 12>/ | ||
38 | ... <EPF Device 22>/ | ||
39 | |||
40 | In order to create a <EPF device> of the type probed by <EPF Driver>, the | ||
41 | user has to create a directory inside <EPF DriverN>. | ||
42 | |||
43 | Every <EPF device> directory consists of the following entries that can be | ||
44 | used to configure the standard configuration header of the endpoint function. | ||
45 | (These entries are created by the framework when any new <EPF Device> is | ||
46 | created) | ||
47 | |||
48 | .. <EPF Driver1>/ | ||
49 | ... <EPF Device 11>/ | ||
50 | ... vendorid | ||
51 | ... deviceid | ||
52 | ... revid | ||
53 | ... progif_code | ||
54 | ... subclass_code | ||
55 | ... baseclass_code | ||
56 | ... cache_line_size | ||
57 | ... subsys_vendor_id | ||
58 | ... subsys_id | ||
59 | ... interrupt_pin | ||
60 | |||
61 | *) EPC Device | ||
62 | |||
63 | Every registered EPC device will be listed in controllers directory. The | ||
64 | entries corresponding to EPC device will be created by the EPC core. | ||
65 | |||
66 | /sys/kernel/config/pci_ep/controllers/ | ||
67 | .. <EPC Device1>/ | ||
68 | ... <Symlink EPF Device11>/ | ||
69 | ... <Symlink EPF Device12>/ | ||
70 | ... start | ||
71 | .. <EPC Device2>/ | ||
72 | ... <Symlink EPF Device21>/ | ||
73 | ... <Symlink EPF Device22>/ | ||
74 | ... start | ||
75 | |||
76 | The <EPC Device> directory will have a list of symbolic links to | ||
77 | <EPF Device>. These symbolic links should be created by the user to | ||
78 | represent the functions present in the endpoint device. | ||
79 | |||
80 | The <EPC Device> directory will also have a *start* field. Once | ||
81 | "1" is written to this field, the endpoint device will be ready to | ||
82 | establish the link with the host. This is usually done after | ||
83 | all the EPF devices are created and linked with the EPC device. | ||
84 | |||
85 | |||
86 | | controllers/ | ||
87 | | <Directory: EPC name>/ | ||
88 | | <Symbolic Link: Function> | ||
89 | | start | ||
90 | | functions/ | ||
91 | | <Directory: EPF driver>/ | ||
92 | | <Directory: EPF device>/ | ||
93 | | vendorid | ||
94 | | deviceid | ||
95 | | revid | ||
96 | | progif_code | ||
97 | | subclass_code | ||
98 | | baseclass_code | ||
99 | | cache_line_size | ||
100 | | subsys_vendor_id | ||
101 | | subsys_id | ||
102 | | interrupt_pin | ||
103 | | function | ||
104 | |||
105 | [1] -> Documentation/PCI/endpoint/pci-endpoint.txt | ||
diff --git a/Documentation/PCI/endpoint/pci-endpoint.txt b/Documentation/PCI/endpoint/pci-endpoint.txt new file mode 100644 index 000000000000..9b1d66829290 --- /dev/null +++ b/Documentation/PCI/endpoint/pci-endpoint.txt | |||
@@ -0,0 +1,215 @@ | |||
1 | PCI ENDPOINT FRAMEWORK | ||
2 | Kishon Vijay Abraham I <kishon@ti.com> | ||
3 | |||
4 | This document is a guide to use the PCI Endpoint Framework in order to create | ||
5 | endpoint controller driver, endpoint function driver, and using configfs | ||
6 | interface to bind the function driver to the controller driver. | ||
7 | |||
8 | 1. Introduction | ||
9 | |||
10 | Linux has a comprehensive PCI subsystem to support PCI controllers that | ||
11 | operates in Root Complex mode. The subsystem has capability to scan PCI bus, | ||
12 | assign memory resources and IRQ resources, load PCI driver (based on | ||
13 | vendor ID, device ID), support other services like hot-plug, power management, | ||
14 | advanced error reporting and virtual channels. | ||
15 | |||
16 | However the PCI controller IP integrated in some SoCs is capable of operating | ||
17 | either in Root Complex mode or Endpoint mode. PCI Endpoint Framework will | ||
18 | add endpoint mode support in Linux. This will help to run Linux in an | ||
19 | EP system which can have a wide variety of use cases from testing or | ||
20 | validation, co-processor accelerator, etc. | ||
21 | |||
22 | 2. PCI Endpoint Core | ||
23 | |||
24 | The PCI Endpoint Core layer comprises 3 components: the Endpoint Controller | ||
25 | library, the Endpoint Function library, and the configfs layer to bind the | ||
26 | endpoint function with the endpoint controller. | ||
27 | |||
28 | 2.1 PCI Endpoint Controller(EPC) Library | ||
29 | |||
30 | The EPC library provides APIs to be used by the controller that can operate | ||
31 | in endpoint mode. It also provides APIs to be used by function driver/library | ||
32 | in order to implement a particular endpoint function. | ||
33 | |||
34 | 2.1.1 APIs for the PCI controller Driver | ||
35 | |||
36 | This section lists the APIs that the PCI Endpoint core provides to be used | ||
37 | by the PCI controller driver. | ||
38 | |||
39 | *) devm_pci_epc_create()/pci_epc_create() | ||
40 | |||
41 | The PCI controller driver should implement the following ops: | ||
42 | * write_header: ops to populate configuration space header | ||
43 | * set_bar: ops to configure the BAR | ||
44 | * clear_bar: ops to reset the BAR | ||
45 | * alloc_addr_space: ops to allocate in PCI controller address space | ||
46 | * free_addr_space: ops to free the allocated address space | ||
47 | * raise_irq: ops to raise a legacy or MSI interrupt | ||
48 | * start: ops to start the PCI link | ||
49 | * stop: ops to stop the PCI link | ||
50 | |||
51 | The PCI controller driver can then create a new EPC device by invoking | ||
52 | devm_pci_epc_create()/pci_epc_create(). | ||
53 | |||
54 | *) devm_pci_epc_destroy()/pci_epc_destroy() | ||
55 | |||
56 | The PCI controller driver can destroy the EPC device created by either | ||
57 | devm_pci_epc_create() or pci_epc_create() using devm_pci_epc_destroy() or | ||
58 | pci_epc_destroy(). | ||
59 | |||
60 | *) pci_epc_linkup() | ||
61 | |||
62 | In order to notify all the function devices that the EPC device to which | ||
63 | they are linked has established a link with the host, the PCI controller | ||
64 | driver should invoke pci_epc_linkup(). | ||
65 | |||
66 | *) pci_epc_mem_init() | ||
67 | |||
68 | Initialize the pci_epc_mem structure used for allocating EPC addr space. | ||
69 | |||
70 | *) pci_epc_mem_exit() | ||
71 | |||
72 | Cleanup the pci_epc_mem structure allocated during pci_epc_mem_init(). | ||
73 | |||
74 | 2.1.2 APIs for the PCI Endpoint Function Driver | ||
75 | |||
76 | This section lists the APIs that the PCI Endpoint core provides to be used | ||
77 | by the PCI endpoint function driver. | ||
78 | |||
79 | *) pci_epc_write_header() | ||
80 | |||
81 | The PCI endpoint function driver should use pci_epc_write_header() to | ||
82 | write the standard configuration header to the endpoint controller. | ||
83 | |||
84 | *) pci_epc_set_bar() | ||
85 | |||
86 | The PCI endpoint function driver should use pci_epc_set_bar() to configure | ||
87 | the Base Address Register in order for the host to assign PCI addr space. | ||
88 | Register space of the function driver is usually configured | ||
89 | using this API. | ||
90 | |||
91 | *) pci_epc_clear_bar() | ||
92 | |||
93 | The PCI endpoint function driver should use pci_epc_clear_bar() to reset | ||
94 | the BAR. | ||
95 | |||
96 | *) pci_epc_raise_irq() | ||
97 | |||
98 | The PCI endpoint function driver should use pci_epc_raise_irq() to raise | ||
99 | Legacy Interrupt or MSI Interrupt. | ||
100 | |||
101 | *) pci_epc_mem_alloc_addr() | ||
102 | |||
103 | The PCI endpoint function driver should use pci_epc_mem_alloc_addr(), to | ||
104 | allocate memory address from EPC addr space which is required to access | ||
105 | RC's buffer | ||
106 | |||
107 | *) pci_epc_mem_free_addr() | ||
108 | |||
109 | The PCI endpoint function driver should use pci_epc_mem_free_addr() to | ||
110 | free the memory space allocated using pci_epc_mem_alloc_addr(). | ||
111 | |||
112 | 2.1.3 Other APIs | ||
113 | |||
114 | There are other APIs provided by the EPC library. These are used for binding | ||
115 | the EPF device with EPC device. pci-ep-cfs.c can be used as reference for | ||
116 | using these APIs. | ||
117 | |||
118 | *) pci_epc_get() | ||
119 | |||
120 | Get a reference to the PCI endpoint controller based on the device name of | ||
121 | the controller. | ||
122 | |||
123 | *) pci_epc_put() | ||
124 | |||
125 | Release the reference to the PCI endpoint controller obtained using | ||
126 | pci_epc_get() | ||
127 | |||
128 | *) pci_epc_add_epf() | ||
129 | |||
130 | Add a PCI endpoint function to a PCI endpoint controller. A PCIe device | ||
131 | can have up to 8 functions according to the specification. | ||
132 | |||
133 | *) pci_epc_remove_epf() | ||
134 | |||
135 | Remove the PCI endpoint function from PCI endpoint controller. | ||
136 | |||
137 | *) pci_epc_start() | ||
138 | |||
139 | The PCI endpoint function driver should invoke pci_epc_start() once it | ||
140 | has configured the endpoint function and wants to start the PCI link. | ||
141 | |||
142 | *) pci_epc_stop() | ||
143 | |||
144 | The PCI endpoint function driver should invoke pci_epc_stop() to stop | ||
145 | the PCI LINK. | ||
146 | |||
147 | 2.2 PCI Endpoint Function(EPF) Library | ||
148 | |||
149 | The EPF library provides APIs to be used by the function driver and the EPC | ||
150 | library to provide endpoint mode functionality. | ||
151 | |||
152 | 2.2.1 APIs for the PCI Endpoint Function Driver | ||
153 | |||
154 | This section lists the APIs that the PCI Endpoint core provides to be used | ||
155 | by the PCI endpoint function driver. | ||
156 | |||
157 | *) pci_epf_register_driver() | ||
158 | |||
159 | The PCI Endpoint Function driver should implement the following ops: | ||
160 | * bind: ops to perform when a EPC device has been bound to EPF device | ||
161 | * unbind: ops to perform when a binding has been lost between a EPC | ||
162 | device and EPF device | ||
163 | * linkup: ops to perform when the EPC device has established a | ||
164 | connection with a host system | ||
165 | |||
166 | The PCI Function driver can then register the PCI EPF driver by using | ||
167 | pci_epf_register_driver(). | ||
168 | |||
169 | *) pci_epf_unregister_driver() | ||
170 | |||
171 | The PCI Function driver can unregister the PCI EPF driver by using | ||
172 | pci_epf_unregister_driver(). | ||
173 | |||
174 | *) pci_epf_alloc_space() | ||
175 | |||
176 | The PCI Function driver can allocate space for a particular BAR using | ||
177 | pci_epf_alloc_space(). | ||
178 | |||
179 | *) pci_epf_free_space() | ||
180 | |||
181 | The PCI Function driver can free the allocated space | ||
182 | (using pci_epf_alloc_space) by invoking pci_epf_free_space(). | ||
183 | |||
184 | 2.2.2 APIs for the PCI Endpoint Controller Library | ||
185 | This section lists the APIs that the PCI Endpoint core provides to be used | ||
186 | by the PCI endpoint controller library. | ||
187 | |||
188 | *) pci_epf_linkup() | ||
189 | |||
190 | The PCI endpoint controller library invokes pci_epf_linkup() when the | ||
191 | EPC device has established the connection to the host. | ||
192 | |||
193 | 2.2.2 Other APIs | ||
194 | There are other APIs provided by the EPF library. These are used to notify | ||
195 | the function driver when the EPF device is bound to the EPC device. | ||
196 | pci-ep-cfs.c can be used as reference for using these APIs. | ||
197 | |||
198 | *) pci_epf_create() | ||
199 | |||
200 | Create a new PCI EPF device by passing the name of the PCI EPF device. | ||
201 | This name will be used to bind the the EPF device to a EPF driver. | ||
202 | |||
203 | *) pci_epf_destroy() | ||
204 | |||
205 | Destroy the created PCI EPF device. | ||
206 | |||
207 | *) pci_epf_bind() | ||
208 | |||
209 | pci_epf_bind() should be invoked when the EPF device has been bound to | ||
210 | a EPC device. | ||
211 | |||
212 | *) pci_epf_unbind() | ||
213 | |||
214 | pci_epf_unbind() should be invoked when the binding between EPC device | ||
215 | and EPF device is lost. | ||
diff --git a/Documentation/PCI/endpoint/pci-test-function.txt b/Documentation/PCI/endpoint/pci-test-function.txt new file mode 100644 index 000000000000..0c519c9bf94a --- /dev/null +++ b/Documentation/PCI/endpoint/pci-test-function.txt | |||
@@ -0,0 +1,66 @@ | |||
1 | PCI TEST | ||
2 | Kishon Vijay Abraham I <kishon@ti.com> | ||
3 | |||
4 | Traditionally PCI RC has always been validated by using standard | ||
5 | PCI cards like ethernet PCI cards or USB PCI cards or SATA PCI cards. | ||
6 | However with the addition of EP-core in linux kernel, it is possible | ||
7 | to configure a PCI controller that can operate in EP mode to work as | ||
8 | a test device. | ||
9 | |||
10 | The PCI endpoint test device is a virtual device (defined in software) | ||
11 | used to test the endpoint functionality and serve as a sample driver | ||
12 | for other PCI endpoint devices (to use the EP framework). | ||
13 | |||
14 | The PCI endpoint test device has the following registers: | ||
15 | |||
16 | 1) PCI_ENDPOINT_TEST_MAGIC | ||
17 | 2) PCI_ENDPOINT_TEST_COMMAND | ||
18 | 3) PCI_ENDPOINT_TEST_STATUS | ||
19 | 4) PCI_ENDPOINT_TEST_SRC_ADDR | ||
20 | 5) PCI_ENDPOINT_TEST_DST_ADDR | ||
21 | 6) PCI_ENDPOINT_TEST_SIZE | ||
22 | 7) PCI_ENDPOINT_TEST_CHECKSUM | ||
23 | |||
24 | *) PCI_ENDPOINT_TEST_MAGIC | ||
25 | |||
26 | This register will be used to test BAR0. A known pattern will be written | ||
27 | and read back from MAGIC register to verify BAR0. | ||
28 | |||
29 | *) PCI_ENDPOINT_TEST_COMMAND: | ||
30 | |||
31 | This register will be used by the host driver to indicate the function | ||
32 | that the endpoint device must perform. | ||
33 | |||
34 | Bitfield Description: | ||
35 | Bit 0 : raise legacy IRQ | ||
36 | Bit 1 : raise MSI IRQ | ||
37 | Bit 2 - 7 : MSI interrupt number | ||
38 | Bit 8 : read command (read data from RC buffer) | ||
39 | Bit 9 : write command (write data to RC buffer) | ||
40 | Bit 10 : copy command (copy data from one RC buffer to another | ||
41 | RC buffer) | ||
42 | |||
43 | *) PCI_ENDPOINT_TEST_STATUS | ||
44 | |||
45 | This register reflects the status of the PCI endpoint device. | ||
46 | |||
47 | Bitfield Description: | ||
48 | Bit 0 : read success | ||
49 | Bit 1 : read fail | ||
50 | Bit 2 : write success | ||
51 | Bit 3 : write fail | ||
52 | Bit 4 : copy success | ||
53 | Bit 5 : copy fail | ||
54 | Bit 6 : IRQ raised | ||
55 | Bit 7 : source address is invalid | ||
56 | Bit 8 : destination address is invalid | ||
57 | |||
58 | *) PCI_ENDPOINT_TEST_SRC_ADDR | ||
59 | |||
60 | This register contains the source address (RC buffer address) for the | ||
61 | COPY/READ command. | ||
62 | |||
63 | *) PCI_ENDPOINT_TEST_DST_ADDR | ||
64 | |||
65 | This register contains the destination address (RC buffer address) for | ||
66 | the COPY/WRITE command. | ||
diff --git a/Documentation/PCI/endpoint/pci-test-howto.txt b/Documentation/PCI/endpoint/pci-test-howto.txt new file mode 100644 index 000000000000..75f48c3bb191 --- /dev/null +++ b/Documentation/PCI/endpoint/pci-test-howto.txt | |||
@@ -0,0 +1,179 @@ | |||
1 | PCI TEST USERGUIDE | ||
2 | Kishon Vijay Abraham I <kishon@ti.com> | ||
3 | |||
4 | This document is a guide to help users use pci-epf-test function driver | ||
5 | and pci_endpoint_test host driver for testing PCI. The list of steps to | ||
6 | be followed in the host side and EP side is given below. | ||
7 | |||
8 | 1. Endpoint Device | ||
9 | |||
10 | 1.1 Endpoint Controller Devices | ||
11 | |||
12 | To find the list of endpoint controller devices in the system: | ||
13 | |||
14 | # ls /sys/class/pci_epc/ | ||
15 | 51000000.pcie_ep | ||
16 | |||
17 | If PCI_ENDPOINT_CONFIGFS is enabled | ||
18 | # ls /sys/kernel/config/pci_ep/controllers | ||
19 | 51000000.pcie_ep | ||
20 | |||
21 | 1.2 Endpoint Function Drivers | ||
22 | |||
23 | To find the list of endpoint function drivers in the system: | ||
24 | |||
25 | # ls /sys/bus/pci-epf/drivers | ||
26 | pci_epf_test | ||
27 | |||
28 | If PCI_ENDPOINT_CONFIGFS is enabled | ||
29 | # ls /sys/kernel/config/pci_ep/functions | ||
30 | pci_epf_test | ||
31 | |||
32 | 1.3 Creating pci-epf-test Device | ||
33 | |||
34 | PCI endpoint function device can be created using the configfs. To create | ||
35 | pci-epf-test device, the following commands can be used | ||
36 | |||
37 | # mount -t configfs none /sys/kernel/config | ||
38 | # cd /sys/kernel/config/pci_ep/ | ||
39 | # mkdir functions/pci_epf_test/func1 | ||
40 | |||
41 | The "mkdir func1" above creates the pci-epf-test function device that will | ||
42 | be probed by pci_epf_test driver. | ||
43 | |||
44 | The PCI endpoint framework populates the directory with the following | ||
45 | configurable fields. | ||
46 | |||
47 | # ls functions/pci_epf_test/func1 | ||
48 | baseclass_code interrupt_pin revid subsys_vendor_id | ||
49 | cache_line_size msi_interrupts subclass_code vendorid | ||
50 | deviceid progif_code subsys_id | ||
51 | |||
52 | The PCI endpoint function driver populates these entries with default values | ||
53 | when the device is bound to the driver. The pci-epf-test driver populates | ||
54 | vendorid with 0xffff and interrupt_pin with 0x0001 | ||
55 | |||
56 | # cat functions/pci_epf_test/func1/vendorid | ||
57 | 0xffff | ||
58 | # cat functions/pci_epf_test/func1/interrupt_pin | ||
59 | 0x0001 | ||
60 | |||
61 | 1.4 Configuring pci-epf-test Device | ||
62 | |||
63 | The user can configure the pci-epf-test device using configfs entry. In order | ||
64 | to change the vendorid and the number of MSI interrupts used by the function | ||
65 | device, the following commands can be used. | ||
66 | |||
67 | # echo 0x104c > functions/pci_epf_test/func1/vendorid | ||
68 | # echo 0xb500 > functions/pci_epf_test/func1/deviceid | ||
69 | # echo 16 > functions/pci_epf_test/func1/msi_interrupts | ||
70 | |||
71 | 1.5 Binding pci-epf-test Device to EP Controller | ||
72 | |||
73 | In order for the endpoint function device to be useful, it has to be bound to | ||
74 | a PCI endpoint controller driver. Use the configfs to bind the function | ||
75 | device to one of the controller driver present in the system. | ||
76 | |||
77 | # ln -s functions/pci_epf_test/func1 controllers/51000000.pcie_ep/ | ||
78 | |||
79 | Once the above step is completed, the PCI endpoint is ready to establish a link | ||
80 | with the host. | ||
81 | |||
82 | 1.6 Start the Link | ||
83 | |||
84 | In order for the endpoint device to establish a link with the host, the _start_ | ||
85 | field should be populated with '1'. | ||
86 | |||
87 | # echo 1 > controllers/51000000.pcie_ep/start | ||
88 | |||
89 | 2. RootComplex Device | ||
90 | |||
91 | 2.1 lspci Output | ||
92 | |||
93 | Note that the devices listed here correspond to the value populated in 1.4 above | ||
94 | |||
95 | 00:00.0 PCI bridge: Texas Instruments Device 8888 (rev 01) | ||
96 | 01:00.0 Unassigned class [ff00]: Texas Instruments Device b500 | ||
97 | |||
98 | 2.2 Using Endpoint Test function Device | ||
99 | |||
100 | pcitest.sh added in tools/pci/ can be used to run all the default PCI endpoint | ||
101 | tests. Before pcitest.sh can be used pcitest.c should be compiled using the | ||
102 | following commands. | ||
103 | |||
104 | cd <kernel-dir> | ||
105 | make headers_install ARCH=arm | ||
106 | arm-linux-gnueabihf-gcc -Iusr/include tools/pci/pcitest.c -o pcitest | ||
107 | cp pcitest <rootfs>/usr/sbin/ | ||
108 | cp tools/pci/pcitest.sh <rootfs> | ||
109 | |||
110 | 2.2.1 pcitest.sh Output | ||
111 | # ./pcitest.sh | ||
112 | BAR tests | ||
113 | |||
114 | BAR0: OKAY | ||
115 | BAR1: OKAY | ||
116 | BAR2: OKAY | ||
117 | BAR3: OKAY | ||
118 | BAR4: NOT OKAY | ||
119 | BAR5: NOT OKAY | ||
120 | |||
121 | Interrupt tests | ||
122 | |||
123 | LEGACY IRQ: NOT OKAY | ||
124 | MSI1: OKAY | ||
125 | MSI2: OKAY | ||
126 | MSI3: OKAY | ||
127 | MSI4: OKAY | ||
128 | MSI5: OKAY | ||
129 | MSI6: OKAY | ||
130 | MSI7: OKAY | ||
131 | MSI8: OKAY | ||
132 | MSI9: OKAY | ||
133 | MSI10: OKAY | ||
134 | MSI11: OKAY | ||
135 | MSI12: OKAY | ||
136 | MSI13: OKAY | ||
137 | MSI14: OKAY | ||
138 | MSI15: OKAY | ||
139 | MSI16: OKAY | ||
140 | MSI17: NOT OKAY | ||
141 | MSI18: NOT OKAY | ||
142 | MSI19: NOT OKAY | ||
143 | MSI20: NOT OKAY | ||
144 | MSI21: NOT OKAY | ||
145 | MSI22: NOT OKAY | ||
146 | MSI23: NOT OKAY | ||
147 | MSI24: NOT OKAY | ||
148 | MSI25: NOT OKAY | ||
149 | MSI26: NOT OKAY | ||
150 | MSI27: NOT OKAY | ||
151 | MSI28: NOT OKAY | ||
152 | MSI29: NOT OKAY | ||
153 | MSI30: NOT OKAY | ||
154 | MSI31: NOT OKAY | ||
155 | MSI32: NOT OKAY | ||
156 | |||
157 | Read Tests | ||
158 | |||
159 | READ ( 1 bytes): OKAY | ||
160 | READ ( 1024 bytes): OKAY | ||
161 | READ ( 1025 bytes): OKAY | ||
162 | READ (1024000 bytes): OKAY | ||
163 | READ (1024001 bytes): OKAY | ||
164 | |||
165 | Write Tests | ||
166 | |||
167 | WRITE ( 1 bytes): OKAY | ||
168 | WRITE ( 1024 bytes): OKAY | ||
169 | WRITE ( 1025 bytes): OKAY | ||
170 | WRITE (1024000 bytes): OKAY | ||
171 | WRITE (1024001 bytes): OKAY | ||
172 | |||
173 | Copy Tests | ||
174 | |||
175 | COPY ( 1 bytes): OKAY | ||
176 | COPY ( 1024 bytes): OKAY | ||
177 | COPY ( 1025 bytes): OKAY | ||
178 | COPY (1024000 bytes): OKAY | ||
179 | COPY (1024001 bytes): OKAY | ||
diff --git a/Documentation/PCI/pci-iov-howto.txt b/Documentation/PCI/pci-iov-howto.txt index 2d91ae251982..d2a84151e99c 100644 --- a/Documentation/PCI/pci-iov-howto.txt +++ b/Documentation/PCI/pci-iov-howto.txt | |||
@@ -68,6 +68,18 @@ To disable SR-IOV capability: | |||
68 | echo 0 > \ | 68 | echo 0 > \ |
69 | /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_numvfs | 69 | /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_numvfs |
70 | 70 | ||
71 | To enable auto probing VFs by a compatible driver on the host, run | ||
72 | command below before enabling SR-IOV capabilities. This is the | ||
73 | default behavior. | ||
74 | echo 1 > \ | ||
75 | /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_drivers_autoprobe | ||
76 | |||
77 | To disable auto probing VFs by a compatible driver on the host, run | ||
78 | command below before enabling SR-IOV capabilities. Updating this | ||
79 | entry will not affect VFs which are already probed. | ||
80 | echo 0 > \ | ||
81 | /sys/bus/pci/devices/<DOMAIN:BUS:DEVICE.FUNCTION>/sriov_drivers_autoprobe | ||
82 | |||
71 | 3.2 Usage example | 83 | 3.2 Usage example |
72 | 84 | ||
73 | Following piece of code illustrates the usage of the SR-IOV API. | 85 | Following piece of code illustrates the usage of the SR-IOV API. |
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt index 1392c705ceca..b2480dd38c11 100644 --- a/Documentation/devicetree/bindings/pci/designware-pcie.txt +++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt | |||
@@ -6,30 +6,40 @@ Required properties: | |||
6 | - reg-names: Must be "config" for the PCIe configuration space. | 6 | - reg-names: Must be "config" for the PCIe configuration space. |
7 | (The old way of getting the configuration address space from "ranges" | 7 | (The old way of getting the configuration address space from "ranges" |
8 | is deprecated and should be avoided.) | 8 | is deprecated and should be avoided.) |
9 | - num-lanes: number of lanes to use | ||
10 | RC mode: | ||
9 | - #address-cells: set to <3> | 11 | - #address-cells: set to <3> |
10 | - #size-cells: set to <2> | 12 | - #size-cells: set to <2> |
11 | - device_type: set to "pci" | 13 | - device_type: set to "pci" |
12 | - ranges: ranges for the PCI memory and I/O regions | 14 | - ranges: ranges for the PCI memory and I/O regions |
13 | - #interrupt-cells: set to <1> | 15 | - #interrupt-cells: set to <1> |
14 | - interrupt-map-mask and interrupt-map: standard PCI properties | 16 | - interrupt-map-mask and interrupt-map: standard PCI |
15 | to define the mapping of the PCIe interface to interrupt | 17 | properties to define the mapping of the PCIe interface to interrupt |
16 | numbers. | 18 | numbers. |
17 | - num-lanes: number of lanes to use | 19 | EP mode: |
20 | - num-ib-windows: number of inbound address translation | ||
21 | windows | ||
22 | - num-ob-windows: number of outbound address translation | ||
23 | windows | ||
18 | 24 | ||
19 | Optional properties: | 25 | Optional properties: |
20 | - num-viewport: number of view ports configured in hardware. If a platform | ||
21 | does not specify it, the driver assumes 2. | ||
22 | - num-lanes: number of lanes to use (this property should be specified unless | 26 | - num-lanes: number of lanes to use (this property should be specified unless |
23 | the link is brought already up in BIOS) | 27 | the link is brought already up in BIOS) |
24 | - reset-gpio: gpio pin number of power good signal | 28 | - reset-gpio: gpio pin number of power good signal |
25 | - bus-range: PCI bus numbers covered (it is recommended for new devicetrees to | ||
26 | specify this property, to keep backwards compatibility a range of 0x00-0xff | ||
27 | is assumed if not present) | ||
28 | - clocks: Must contain an entry for each entry in clock-names. | 29 | - clocks: Must contain an entry for each entry in clock-names. |
29 | See ../clocks/clock-bindings.txt for details. | 30 | See ../clocks/clock-bindings.txt for details. |
30 | - clock-names: Must include the following entries: | 31 | - clock-names: Must include the following entries: |
31 | - "pcie" | 32 | - "pcie" |
32 | - "pcie_bus" | 33 | - "pcie_bus" |
34 | RC mode: | ||
35 | - num-viewport: number of view ports configured in | ||
36 | hardware. If a platform does not specify it, the driver assumes 2. | ||
37 | - bus-range: PCI bus numbers covered (it is recommended | ||
38 | for new devicetrees to specify this property, to keep backwards | ||
39 | compatibility a range of 0x00-0xff is assumed if not present) | ||
40 | EP mode: | ||
41 | - max-functions: maximum number of functions that can be | ||
42 | configured | ||
33 | 43 | ||
34 | Example configuration: | 44 | Example configuration: |
35 | 45 | ||
diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt new file mode 100644 index 000000000000..35d4a979bb7b --- /dev/null +++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt | |||
@@ -0,0 +1,129 @@ | |||
1 | Faraday Technology FTPCI100 PCI Host Bridge | ||
2 | |||
3 | This PCI bridge is found inside that Cortina Systems Gemini SoC platform and | ||
4 | is a generic IP block from Faraday Technology. It exists in two variants: | ||
5 | plain and dual PCI. The plain version embeds a cascading interrupt controller | ||
6 | into the host bridge. The dual version routes the interrupts to the host | ||
7 | chips interrupt controller. | ||
8 | |||
9 | The host controller appear on the PCI bus with vendor ID 0x159b (Faraday | ||
10 | Technology) and product ID 0x4321. | ||
11 | |||
12 | Mandatory properties: | ||
13 | |||
14 | - compatible: ranging from specific to generic, should be one of | ||
15 | "cortina,gemini-pci", "faraday,ftpci100" | ||
16 | "cortina,gemini-pci-dual", "faraday,ftpci100-dual" | ||
17 | "faraday,ftpci100" | ||
18 | "faraday,ftpci100-dual" | ||
19 | - reg: memory base and size for the host bridge | ||
20 | - #address-cells: set to <3> | ||
21 | - #size-cells: set to <2> | ||
22 | - #interrupt-cells: set to <1> | ||
23 | - bus-range: set to <0x00 0xff> | ||
24 | - device_type, set to "pci" | ||
25 | - ranges: see pci.txt | ||
26 | - interrupt-map-mask: see pci.txt | ||
27 | - interrupt-map: see pci.txt | ||
28 | - dma-ranges: three ranges for the inbound memory region. The ranges must | ||
29 | be aligned to a 1MB boundary, and may be 1MB, 2MB, 4MB, 8MB, 16MB, 32MB, 64MB, | ||
30 | 128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as | ||
31 | pre-fetchable. | ||
32 | |||
33 | Mandatory subnodes: | ||
34 | - For "faraday,ftpci100" a node representing the interrupt-controller inside the | ||
35 | host bridge is mandatory. It has the following mandatory properties: | ||
36 | - interrupt: see interrupt-controller/interrupts.txt | ||
37 | - interrupt-parent: see interrupt-controller/interrupts.txt | ||
38 | - interrupt-controller: see interrupt-controller/interrupts.txt | ||
39 | - #address-cells: set to <0> | ||
40 | - #interrupt-cells: set to <1> | ||
41 | |||
42 | I/O space considerations: | ||
43 | |||
44 | The plain variant has 128MiB of non-prefetchable memory space, whereas the | ||
45 | "dual" variant has 64MiB. Take this into account when describing the ranges. | ||
46 | |||
47 | Interrupt map considerations: | ||
48 | |||
49 | The "dual" variant will get INT A, B, C, D from the system interrupt controller | ||
50 | and should point to respective interrupt in that controller in its | ||
51 | interrupt-map. | ||
52 | |||
53 | The code which is the only documentation of how the Faraday PCI (the non-dual | ||
54 | variant) interrupts assigns the default interrupt mapping/swizzling has | ||
55 | typically been like this, doing the swizzling on the interrupt controller side | ||
56 | rather than in the interconnect: | ||
57 | |||
58 | interrupt-map-mask = <0xf800 0 0 7>; | ||
59 | interrupt-map = | ||
60 | <0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */ | ||
61 | <0x4800 0 0 2 &pci_intc 1>, | ||
62 | <0x4800 0 0 3 &pci_intc 2>, | ||
63 | <0x4800 0 0 4 &pci_intc 3>, | ||
64 | <0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */ | ||
65 | <0x5000 0 0 2 &pci_intc 2>, | ||
66 | <0x5000 0 0 3 &pci_intc 3>, | ||
67 | <0x5000 0 0 4 &pci_intc 0>, | ||
68 | <0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */ | ||
69 | <0x5800 0 0 2 &pci_intc 3>, | ||
70 | <0x5800 0 0 3 &pci_intc 0>, | ||
71 | <0x5800 0 0 4 &pci_intc 1>, | ||
72 | <0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */ | ||
73 | <0x6000 0 0 2 &pci_intc 0>, | ||
74 | <0x6000 0 0 3 &pci_intc 1>, | ||
75 | <0x6000 0 0 4 &pci_intc 2>; | ||
76 | |||
77 | Example: | ||
78 | |||
79 | pci@50000000 { | ||
80 | compatible = "cortina,gemini-pci", "faraday,ftpci100"; | ||
81 | reg = <0x50000000 0x100>; | ||
82 | interrupts = <8 IRQ_TYPE_LEVEL_HIGH>, /* PCI A */ | ||
83 | <26 IRQ_TYPE_LEVEL_HIGH>, /* PCI B */ | ||
84 | <27 IRQ_TYPE_LEVEL_HIGH>, /* PCI C */ | ||
85 | <28 IRQ_TYPE_LEVEL_HIGH>; /* PCI D */ | ||
86 | #address-cells = <3>; | ||
87 | #size-cells = <2>; | ||
88 | #interrupt-cells = <1>; | ||
89 | |||
90 | bus-range = <0x00 0xff>; | ||
91 | ranges = /* 1MiB I/O space 0x50000000-0x500fffff */ | ||
92 | <0x01000000 0 0 0x50000000 0 0x00100000>, | ||
93 | /* 128MiB non-prefetchable memory 0x58000000-0x5fffffff */ | ||
94 | <0x02000000 0 0x58000000 0x58000000 0 0x08000000>; | ||
95 | |||
96 | /* DMA ranges */ | ||
97 | dma-ranges = | ||
98 | /* 128MiB at 0x00000000-0x07ffffff */ | ||
99 | <0x02000000 0 0x00000000 0x00000000 0 0x08000000>, | ||
100 | /* 64MiB at 0x00000000-0x03ffffff */ | ||
101 | <0x02000000 0 0x00000000 0x00000000 0 0x04000000>, | ||
102 | /* 64MiB at 0x00000000-0x03ffffff */ | ||
103 | <0x02000000 0 0x00000000 0x00000000 0 0x04000000>; | ||
104 | |||
105 | interrupt-map-mask = <0xf800 0 0 7>; | ||
106 | interrupt-map = | ||
107 | <0x4800 0 0 1 &pci_intc 0>, /* Slot 9 */ | ||
108 | <0x4800 0 0 2 &pci_intc 1>, | ||
109 | <0x4800 0 0 3 &pci_intc 2>, | ||
110 | <0x4800 0 0 4 &pci_intc 3>, | ||
111 | <0x5000 0 0 1 &pci_intc 1>, /* Slot 10 */ | ||
112 | <0x5000 0 0 2 &pci_intc 2>, | ||
113 | <0x5000 0 0 3 &pci_intc 3>, | ||
114 | <0x5000 0 0 4 &pci_intc 0>, | ||
115 | <0x5800 0 0 1 &pci_intc 2>, /* Slot 11 */ | ||
116 | <0x5800 0 0 2 &pci_intc 3>, | ||
117 | <0x5800 0 0 3 &pci_intc 0>, | ||
118 | <0x5800 0 0 4 &pci_intc 1>, | ||
119 | <0x6000 0 0 1 &pci_intc 3>, /* Slot 12 */ | ||
120 | <0x6000 0 0 2 &pci_intc 0>, | ||
121 | <0x6000 0 0 3 &pci_intc 0>, | ||
122 | <0x6000 0 0 4 &pci_intc 0>; | ||
123 | pci_intc: interrupt-controller { | ||
124 | interrupt-parent = <&intcon>; | ||
125 | interrupt-controller; | ||
126 | #address-cells = <0>; | ||
127 | #interrupt-cells = <1>; | ||
128 | }; | ||
129 | }; | ||
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt index 83aeb1f5a645..e3d5680875b1 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt | |||
@@ -4,7 +4,11 @@ This PCIe host controller is based on the Synopsis Designware PCIe IP | |||
4 | and thus inherits all the common properties defined in designware-pcie.txt. | 4 | and thus inherits all the common properties defined in designware-pcie.txt. |
5 | 5 | ||
6 | Required properties: | 6 | Required properties: |
7 | - compatible: "fsl,imx6q-pcie", "fsl,imx6sx-pcie", "fsl,imx6qp-pcie" | 7 | - compatible: |
8 | - "fsl,imx6q-pcie" | ||
9 | - "fsl,imx6sx-pcie", | ||
10 | - "fsl,imx6qp-pcie" | ||
11 | - "fsl,imx7d-pcie" | ||
8 | - reg: base address and length of the PCIe controller | 12 | - reg: base address and length of the PCIe controller |
9 | - interrupts: A list of interrupt outputs of the controller. Must contain an | 13 | - interrupts: A list of interrupt outputs of the controller. Must contain an |
10 | entry for each entry in the interrupt-names property. | 14 | entry for each entry in the interrupt-names property. |
@@ -34,6 +38,14 @@ Additional required properties for imx6sx-pcie: | |||
34 | - clock names: Must include the following additional entries: | 38 | - clock names: Must include the following additional entries: |
35 | - "pcie_inbound_axi" | 39 | - "pcie_inbound_axi" |
36 | 40 | ||
41 | Additional required properties for imx7d-pcie: | ||
42 | - power-domains: Must be set to a phandle pointing to PCIE_PHY power domain | ||
43 | - resets: Must contain phandles to PCIe-related reset lines exposed by SRC | ||
44 | IP block | ||
45 | - reset-names: Must contain the following entires: | ||
46 | - "pciephy" | ||
47 | - "apps" | ||
48 | |||
37 | Example: | 49 | Example: |
38 | 50 | ||
39 | pcie@0x01000000 { | 51 | pcie@0x01000000 { |
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt index 60e25161f351..6a07c96227e0 100644 --- a/Documentation/devicetree/bindings/pci/ti-pci.txt +++ b/Documentation/devicetree/bindings/pci/ti-pci.txt | |||
@@ -1,17 +1,22 @@ | |||
1 | TI PCI Controllers | 1 | TI PCI Controllers |
2 | 2 | ||
3 | PCIe Designware Controller | 3 | PCIe Designware Controller |
4 | - compatible: Should be "ti,dra7-pcie"" | 4 | - compatible: Should be "ti,dra7-pcie" for RC |
5 | - reg : Two register ranges as listed in the reg-names property | 5 | Should be "ti,dra7-pcie-ep" for EP |
6 | - reg-names : The first entry must be "ti-conf" for the TI specific registers | ||
7 | The second entry must be "rc-dbics" for the designware pcie | ||
8 | registers | ||
9 | The third entry must be "config" for the PCIe configuration space | ||
10 | - phys : list of PHY specifiers (used by generic PHY framework) | 6 | - phys : list of PHY specifiers (used by generic PHY framework) |
11 | - phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the | 7 | - phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the |
12 | number of PHYs as specified in *phys* property. | 8 | number of PHYs as specified in *phys* property. |
13 | - ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>", | 9 | - ti,hwmods : Name of the hwmod associated to the pcie, "pcie<X>", |
14 | where <X> is the instance number of the pcie from the HW spec. | 10 | where <X> is the instance number of the pcie from the HW spec. |
11 | - num-lanes as specified in ../designware-pcie.txt | ||
12 | |||
13 | HOST MODE | ||
14 | ========= | ||
15 | - reg : Two register ranges as listed in the reg-names property | ||
16 | - reg-names : The first entry must be "ti-conf" for the TI specific registers | ||
17 | The second entry must be "rc-dbics" for the DesignWare PCIe | ||
18 | registers | ||
19 | The third entry must be "config" for the PCIe configuration space | ||
15 | - interrupts : Two interrupt entries must be specified. The first one is for | 20 | - interrupts : Two interrupt entries must be specified. The first one is for |
16 | main interrupt line and the second for MSI interrupt line. | 21 | main interrupt line and the second for MSI interrupt line. |
17 | - #address-cells, | 22 | - #address-cells, |
@@ -19,13 +24,36 @@ PCIe Designware Controller | |||
19 | #interrupt-cells, | 24 | #interrupt-cells, |
20 | device_type, | 25 | device_type, |
21 | ranges, | 26 | ranges, |
22 | num-lanes, | ||
23 | interrupt-map-mask, | 27 | interrupt-map-mask, |
24 | interrupt-map : as specified in ../designware-pcie.txt | 28 | interrupt-map : as specified in ../designware-pcie.txt |
25 | 29 | ||
30 | DEVICE MODE | ||
31 | =========== | ||
32 | - reg : Four register ranges as listed in the reg-names property | ||
33 | - reg-names : "ti-conf" for the TI specific registers | ||
34 | "ep_dbics" for the standard configuration registers as | ||
35 | they are locally accessed within the DIF CS space | ||
36 | "ep_dbics2" for the standard configuration registers as | ||
37 | they are locally accessed within the DIF CS2 space | ||
38 | "addr_space" used to map remote RC address space | ||
39 | - interrupts : one interrupt entries must be specified for main interrupt. | ||
40 | - num-ib-windows : number of inbound address translation windows | ||
41 | - num-ob-windows : number of outbound address translation windows | ||
42 | - ti,syscon-unaligned-access: phandle to the syscon DT node. The 1st argument | ||
43 | should contain the register offset within syscon | ||
44 | and the 2nd argument should contain the bit field | ||
45 | for setting the bit to enable unaligned | ||
46 | access. | ||
47 | |||
26 | Optional Property: | 48 | Optional Property: |
27 | - gpios : Should be added if a gpio line is required to drive PERST# line | 49 | - gpios : Should be added if a gpio line is required to drive PERST# line |
28 | 50 | ||
51 | NOTE: Two DT nodes may be added for each PCI controller; one for host | ||
52 | mode and another for device mode. So in order for PCI to | ||
53 | work in host mode, EP mode DT node should be disabled and in order to PCI to | ||
54 | work in EP mode, host mode DT node should be disabled. Host mode and EP | ||
55 | mode are mutually exclusive. | ||
56 | |||
29 | Example: | 57 | Example: |
30 | axi { | 58 | axi { |
31 | compatible = "simple-bus"; | 59 | compatible = "simple-bus"; |
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index bf34d5b3a733..e72587fe477d 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt | |||
@@ -342,8 +342,10 @@ PER-CPU MEM | |||
342 | devm_free_percpu() | 342 | devm_free_percpu() |
343 | 343 | ||
344 | PCI | 344 | PCI |
345 | pcim_enable_device() : after success, all PCI ops become managed | 345 | devm_pci_remap_cfgspace() : ioremap PCI configuration space |
346 | pcim_pin_device() : keep PCI device enabled after release | 346 | devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource |
347 | pcim_enable_device() : after success, all PCI ops become managed | ||
348 | pcim_pin_device() : keep PCI device enabled after release | ||
347 | 349 | ||
348 | PHY | 350 | PHY |
349 | devm_usb_get_phy() | 351 | devm_usb_get_phy() |
diff --git a/Documentation/filesystems/sysfs-pci.txt b/Documentation/filesystems/sysfs-pci.txt index 6ea1ceda6f52..06f1d64c6f70 100644 --- a/Documentation/filesystems/sysfs-pci.txt +++ b/Documentation/filesystems/sysfs-pci.txt | |||
@@ -113,9 +113,18 @@ Supporting PCI access on new platforms | |||
113 | -------------------------------------- | 113 | -------------------------------------- |
114 | 114 | ||
115 | In order to support PCI resource mapping as described above, Linux platform | 115 | In order to support PCI resource mapping as described above, Linux platform |
116 | code must define HAVE_PCI_MMAP and provide a pci_mmap_page_range function. | 116 | code should ideally define ARCH_GENERIC_PCI_MMAP_RESOURCE and use the generic |
117 | Platforms are free to only support subsets of the mmap functionality, but | 117 | implementation of that functionality. To support the historical interface of |
118 | useful return codes should be provided. | 118 | mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP. |
119 | |||
120 | Alternatively, platforms which set HAVE_PCI_MMAP may provide their own | ||
121 | implementation of pci_mmap_page_range() instead of defining | ||
122 | ARCH_GENERIC_PCI_MMAP_RESOURCE. | ||
123 | |||
124 | Platforms which support write-combining maps of PCI resources must define | ||
125 | arch_can_pci_mmap_wc() which shall evaluate to non-zero at runtime when | ||
126 | write-combining is permitted. Platforms which support maps of I/O resources | ||
127 | define arch_can_pci_mmap_io() similarly. | ||
119 | 128 | ||
120 | Legacy resources are protected by the HAVE_PCI_LEGACY define. Platforms | 129 | Legacy resources are protected by the HAVE_PCI_LEGACY define. Platforms |
121 | wishing to support legacy functionality should define it and provide | 130 | wishing to support legacy functionality should define it and provide |
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index a77ead911956..eccb675a2852 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt | |||
@@ -191,6 +191,7 @@ Code Seq#(hex) Include File Comments | |||
191 | 'W' 00-1F linux/watchdog.h conflict! | 191 | 'W' 00-1F linux/watchdog.h conflict! |
192 | 'W' 00-1F linux/wanrouter.h conflict! (pre 3.9) | 192 | 'W' 00-1F linux/wanrouter.h conflict! (pre 3.9) |
193 | 'W' 00-3F sound/asound.h conflict! | 193 | 'W' 00-3F sound/asound.h conflict! |
194 | 'W' 40-5F drivers/pci/switch/switchtec.c | ||
194 | 'X' all fs/xfs/xfs_fs.h conflict! | 195 | 'X' all fs/xfs/xfs_fs.h conflict! |
195 | and fs/xfs/linux-2.6/xfs_ioctl32.h | 196 | and fs/xfs/linux-2.6/xfs_ioctl32.h |
196 | and include/linux/falloc.h | 197 | and include/linux/falloc.h |
diff --git a/Documentation/misc-devices/pci-endpoint-test.txt b/Documentation/misc-devices/pci-endpoint-test.txt new file mode 100644 index 000000000000..4ebc3594b32c --- /dev/null +++ b/Documentation/misc-devices/pci-endpoint-test.txt | |||
@@ -0,0 +1,35 @@ | |||
1 | Driver for PCI Endpoint Test Function | ||
2 | |||
3 | This driver should be used as a host side driver if the root complex is | ||
4 | connected to a configurable PCI endpoint running *pci_epf_test* function | ||
5 | driver configured according to [1]. | ||
6 | |||
7 | The "pci_endpoint_test" driver can be used to perform the following tests. | ||
8 | |||
9 | The PCI driver for the test device performs the following tests | ||
10 | *) verifying addresses programmed in BAR | ||
11 | *) raise legacy IRQ | ||
12 | *) raise MSI IRQ | ||
13 | *) read data | ||
14 | *) write data | ||
15 | *) copy data | ||
16 | |||
17 | This misc driver creates /dev/pci-endpoint-test.<num> for every | ||
18 | *pci_epf_test* function connected to the root complex and "ioctls" | ||
19 | should be used to perform the above tests. | ||
20 | |||
21 | ioctl | ||
22 | ----- | ||
23 | PCITEST_BAR: Tests the BAR. The number of the BAR to be tested | ||
24 | should be passed as argument. | ||
25 | PCITEST_LEGACY_IRQ: Tests legacy IRQ | ||
26 | PCITEST_MSI: Tests message signalled interrupts. The MSI number | ||
27 | to be tested should be passed as argument. | ||
28 | PCITEST_WRITE: Perform write tests. The size of the buffer should be passed | ||
29 | as argument. | ||
30 | PCITEST_READ: Perform read tests. The size of the buffer should be passed | ||
31 | as argument. | ||
32 | PCITEST_COPY: Perform read tests. The size of the buffer should be passed | ||
33 | as argument. | ||
34 | |||
35 | [1] -> Documentation/PCI/endpoint/function/binding/pci-test.txt | ||
diff --git a/Documentation/switchtec.txt b/Documentation/switchtec.txt new file mode 100644 index 000000000000..a0a9c7b3d4d5 --- /dev/null +++ b/Documentation/switchtec.txt | |||
@@ -0,0 +1,80 @@ | |||
1 | ======================== | ||
2 | Linux Switchtec Support | ||
3 | ======================== | ||
4 | |||
5 | Microsemi's "Switchtec" line of PCI switch devices is already | ||
6 | supported by the kernel with standard PCI switch drivers. However, the | ||
7 | Switchtec device advertises a special management endpoint which | ||
8 | enables some additional functionality. This includes: | ||
9 | |||
10 | * Packet and Byte Counters | ||
11 | * Firmware Upgrades | ||
12 | * Event and Error logs | ||
13 | * Querying port link status | ||
14 | * Custom user firmware commands | ||
15 | |||
16 | The switchtec kernel module implements this functionality. | ||
17 | |||
18 | |||
19 | Interface | ||
20 | ========= | ||
21 | |||
22 | The primary means of communicating with the Switchtec management firmware is | ||
23 | through the Memory-mapped Remote Procedure Call (MRPC) interface. | ||
24 | Commands are submitted to the interface with a 4-byte command | ||
25 | identifier and up to 1KB of command specific data. The firmware will | ||
26 | respond with a 4 bytes return code and up to 1KB of command specific | ||
27 | data. The interface only processes a single command at a time. | ||
28 | |||
29 | |||
30 | Userspace Interface | ||
31 | =================== | ||
32 | |||
33 | The MRPC interface will be exposed to userspace through a simple char | ||
34 | device: /dev/switchtec#, one for each management endpoint in the system. | ||
35 | |||
36 | The char device has the following semantics: | ||
37 | |||
38 | * A write must consist of at least 4 bytes and no more than 1028 bytes. | ||
39 | The first four bytes will be interpreted as the command to run and | ||
40 | the remainder will be used as the input data. A write will send the | ||
41 | command to the firmware to begin processing. | ||
42 | |||
43 | * Each write must be followed by exactly one read. Any double write will | ||
44 | produce an error and any read that doesn't follow a write will | ||
45 | produce an error. | ||
46 | |||
47 | * A read will block until the firmware completes the command and return | ||
48 | the four bytes of status plus up to 1024 bytes of output data. (The | ||
49 | length will be specified by the size parameter of the read call -- | ||
50 | reading less than 4 bytes will produce an error. | ||
51 | |||
52 | * The poll call will also be supported for userspace applications that | ||
53 | need to do other things while waiting for the command to complete. | ||
54 | |||
55 | The following IOCTLs are also supported by the device: | ||
56 | |||
57 | * SWITCHTEC_IOCTL_FLASH_INFO - Retrieve firmware length and number | ||
58 | of partitions in the device. | ||
59 | |||
60 | * SWITCHTEC_IOCTL_FLASH_PART_INFO - Retrieve address and lengeth for | ||
61 | any specified partition in flash. | ||
62 | |||
63 | * SWITCHTEC_IOCTL_EVENT_SUMMARY - Read a structure of bitmaps | ||
64 | indicating all uncleared events. | ||
65 | |||
66 | * SWITCHTEC_IOCTL_EVENT_CTL - Get the current count, clear and set flags | ||
67 | for any event. This ioctl takes in a switchtec_ioctl_event_ctl struct | ||
68 | with the event_id, index and flags set (index being the partition or PFF | ||
69 | number for non-global events). It returns whether the event has | ||
70 | occurred, the number of times and any event specific data. The flags | ||
71 | can be used to clear the count or enable and disable actions to | ||
72 | happen when the event occurs. | ||
73 | By using the SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL flag, | ||
74 | you can set an event to trigger a poll command to return with | ||
75 | POLLPRI. In this way, userspace can wait for events to occur. | ||
76 | |||
77 | * SWITCHTEC_IOCTL_PFF_TO_PORT and SWITCHTEC_IOCTL_PORT_TO_PFF convert | ||
78 | between PCI Function Framework number (used by the event system) | ||
79 | and Switchtec Logic Port ID and Partition number (which is more | ||
80 | user friendly). | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 56b1111cc9c1..a4a9e31fed72 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9723,6 +9723,15 @@ F: include/linux/pci* | |||
9723 | F: arch/x86/pci/ | 9723 | F: arch/x86/pci/ |
9724 | F: arch/x86/kernel/quirks.c | 9724 | F: arch/x86/kernel/quirks.c |
9725 | 9725 | ||
9726 | PCI ENDPOINT SUBSYSTEM | ||
9727 | M: Kishon Vijay Abraham I <kishon@ti.com> | ||
9728 | L: linux-pci@vger.kernel.org | ||
9729 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git | ||
9730 | S: Supported | ||
9731 | F: drivers/pci/endpoint/ | ||
9732 | F: drivers/misc/pci_endpoint_test.c | ||
9733 | F: tools/pci/ | ||
9734 | |||
9726 | PCI DRIVER FOR ALTERA PCIE IP | 9735 | PCI DRIVER FOR ALTERA PCIE IP |
9727 | M: Ley Foon Tan <lftan@altera.com> | 9736 | M: Ley Foon Tan <lftan@altera.com> |
9728 | L: rfi@lists.rocketboards.org (moderated for non-subscribers) | 9737 | L: rfi@lists.rocketboards.org (moderated for non-subscribers) |
@@ -9797,6 +9806,17 @@ S: Maintained | |||
9797 | F: Documentation/devicetree/bindings/pci/aardvark-pci.txt | 9806 | F: Documentation/devicetree/bindings/pci/aardvark-pci.txt |
9798 | F: drivers/pci/host/pci-aardvark.c | 9807 | F: drivers/pci/host/pci-aardvark.c |
9799 | 9808 | ||
9809 | PCI DRIVER FOR MICROSEMI SWITCHTEC | ||
9810 | M: Kurt Schwemmer <kurt.schwemmer@microsemi.com> | ||
9811 | M: Stephen Bates <stephen.bates@microsemi.com> | ||
9812 | M: Logan Gunthorpe <logang@deltatee.com> | ||
9813 | L: linux-pci@vger.kernel.org | ||
9814 | S: Maintained | ||
9815 | F: Documentation/switchtec.txt | ||
9816 | F: Documentation/ABI/testing/sysfs-class-switchtec | ||
9817 | F: drivers/pci/switch/switchtec* | ||
9818 | F: include/uapi/linux/switchtec_ioctl.h | ||
9819 | |||
9800 | PCI DRIVER FOR NVIDIA TEGRA | 9820 | PCI DRIVER FOR NVIDIA TEGRA |
9801 | M: Thierry Reding <thierry.reding@gmail.com> | 9821 | M: Thierry Reding <thierry.reding@gmail.com> |
9802 | L: linux-tegra@vger.kernel.org | 9822 | L: linux-tegra@vger.kernel.org |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 42871fb8340e..2cfbc531f63b 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -187,6 +187,16 @@ static inline void pci_ioremap_set_mem_type(int mem_type) {} | |||
187 | extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); | 187 | extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); |
188 | 188 | ||
189 | /* | 189 | /* |
190 | * PCI configuration space mapping function. | ||
191 | * | ||
192 | * The PCI specification does not allow configuration write | ||
193 | * transactions to be posted. Add an arch specific | ||
194 | * pci_remap_cfgspace() definition that is implemented | ||
195 | * through strongly ordered memory mappings. | ||
196 | */ | ||
197 | #define pci_remap_cfgspace pci_remap_cfgspace | ||
198 | void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size); | ||
199 | /* | ||
190 | * Now, pick up the machine-defined IO definitions | 200 | * Now, pick up the machine-defined IO definitions |
191 | */ | 201 | */ |
192 | #ifdef CONFIG_NEED_MACH_IO_H | 202 | #ifdef CONFIG_NEED_MACH_IO_H |
diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 057d381f4e57..396c92bcc0cf 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h | |||
@@ -29,8 +29,7 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
29 | #define PCI_DMA_BUS_IS_PHYS (1) | 29 | #define PCI_DMA_BUS_IS_PHYS (1) |
30 | 30 | ||
31 | #define HAVE_PCI_MMAP | 31 | #define HAVE_PCI_MMAP |
32 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 32 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
33 | enum pci_mmap_state mmap_state, int write_combine); | ||
34 | 33 | ||
35 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | 34 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) |
36 | { | 35 | { |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 2f0e07735d1d..b259956365a0 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -597,25 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, | |||
597 | return start; | 597 | return start; |
598 | } | 598 | } |
599 | 599 | ||
600 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
601 | enum pci_mmap_state mmap_state, int write_combine) | ||
602 | { | ||
603 | if (mmap_state == pci_mmap_io) | ||
604 | return -EINVAL; | ||
605 | |||
606 | /* | ||
607 | * Mark this as IO | ||
608 | */ | ||
609 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
610 | |||
611 | if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
612 | vma->vm_end - vma->vm_start, | ||
613 | vma->vm_page_prot)) | ||
614 | return -EAGAIN; | ||
615 | |||
616 | return 0; | ||
617 | } | ||
618 | |||
619 | void __init pci_map_io_early(unsigned long pfn) | 600 | void __init pci_map_io_early(unsigned long pfn) |
620 | { | 601 | { |
621 | struct map_desc pci_io_desc = { | 602 | struct map_desc pci_io_desc = { |
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c index 6c679659cda5..67ebff829cf2 100644 --- a/arch/arm/mach-omap2/clockdomains7xx_data.c +++ b/arch/arm/mach-omap2/clockdomains7xx_data.c | |||
@@ -524,7 +524,7 @@ static struct clockdomain pcie_7xx_clkdm = { | |||
524 | .dep_bit = DRA7XX_PCIE_STATDEP_SHIFT, | 524 | .dep_bit = DRA7XX_PCIE_STATDEP_SHIFT, |
525 | .wkdep_srcs = pcie_wkup_sleep_deps, | 525 | .wkdep_srcs = pcie_wkup_sleep_deps, |
526 | .sleepdep_srcs = pcie_wkup_sleep_deps, | 526 | .sleepdep_srcs = pcie_wkup_sleep_deps, |
527 | .flags = CLKDM_CAN_HWSUP_SWSUP, | 527 | .flags = CLKDM_CAN_SWSUP, |
528 | }; | 528 | }; |
529 | 529 | ||
530 | static struct clockdomain atl_7xx_clkdm = { | 530 | static struct clockdomain atl_7xx_clkdm = { |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index ff0eed23ddf1..fc91205ff46c 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -481,6 +481,13 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) | |||
481 | __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); | 481 | __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte)); |
482 | } | 482 | } |
483 | EXPORT_SYMBOL_GPL(pci_ioremap_io); | 483 | EXPORT_SYMBOL_GPL(pci_ioremap_io); |
484 | |||
485 | void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size) | ||
486 | { | ||
487 | return arch_ioremap_caller(res_cookie, size, MT_UNCACHED, | ||
488 | __builtin_return_address(0)); | ||
489 | } | ||
490 | EXPORT_SYMBOL_GPL(pci_remap_cfgspace); | ||
484 | #endif | 491 | #endif |
485 | 492 | ||
486 | /* | 493 | /* |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 33a45bd96860..3b8e728cc944 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -436,6 +436,18 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) | |||
436 | } | 436 | } |
437 | EXPORT_SYMBOL(ioremap_wc); | 437 | EXPORT_SYMBOL(ioremap_wc); |
438 | 438 | ||
439 | #ifdef CONFIG_PCI | ||
440 | |||
441 | #include <asm/mach/map.h> | ||
442 | |||
443 | void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size) | ||
444 | { | ||
445 | return arch_ioremap_caller(res_cookie, size, MT_UNCACHED, | ||
446 | __builtin_return_address(0)); | ||
447 | } | ||
448 | EXPORT_SYMBOL_GPL(pci_remap_cfgspace); | ||
449 | #endif | ||
450 | |||
439 | void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) | 451 | void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) |
440 | { | 452 | { |
441 | return (void *)phys_addr; | 453 | return (void *)phys_addr; |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 0c00c87bb9dd..35b2e50f17fb 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -173,6 +173,16 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); | |||
173 | #define iounmap __iounmap | 173 | #define iounmap __iounmap |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * PCI configuration space mapping function. | ||
177 | * | ||
178 | * The PCI specification disallows posted write configuration transactions. | ||
179 | * Add an arch specific pci_remap_cfgspace() definition that is implemented | ||
180 | * through nGnRnE device memory attribute as recommended by the ARM v8 | ||
181 | * Architecture reference manual Issue A.k B2.8.2 "Device memory". | ||
182 | */ | ||
183 | #define pci_remap_cfgspace(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) | ||
184 | |||
185 | /* | ||
176 | * io{read,write}{16,32,64}be() macros | 186 | * io{read,write}{16,32,64}be() macros |
177 | */ | 187 | */ |
178 | #define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) | 188 | #define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) |
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index b9a7ba9ca44c..1fc19744ffe9 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h | |||
@@ -22,6 +22,8 @@ | |||
22 | */ | 22 | */ |
23 | #define PCI_DMA_BUS_IS_PHYS (0) | 23 | #define PCI_DMA_BUS_IS_PHYS (0) |
24 | 24 | ||
25 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 | ||
26 | |||
25 | extern int isa_dma_bridge_buggy; | 27 | extern int isa_dma_bridge_buggy; |
26 | 28 | ||
27 | #ifdef CONFIG_PCI | 29 | #ifdef CONFIG_PCI |
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c index 212266a2c5d9..394c2a73d5e2 100644 --- a/arch/cris/arch-v32/drivers/pci/bios.c +++ b/arch/cris/arch-v32/drivers/pci/bios.c | |||
@@ -14,28 +14,6 @@ void pcibios_set_master(struct pci_dev *dev) | |||
14 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); | 14 | pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); |
15 | } | 15 | } |
16 | 16 | ||
17 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
18 | enum pci_mmap_state mmap_state, int write_combine) | ||
19 | { | ||
20 | unsigned long prot; | ||
21 | |||
22 | /* Leave vm_pgoff as-is, the PCI space address is the physical | ||
23 | * address on this platform. | ||
24 | */ | ||
25 | prot = pgprot_val(vma->vm_page_prot); | ||
26 | vma->vm_page_prot = __pgprot(prot); | ||
27 | |||
28 | /* Write-combine setting is ignored, it is changed via the mtrr | ||
29 | * interfaces on this platform. | ||
30 | */ | ||
31 | if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
32 | vma->vm_end - vma->vm_start, | ||
33 | vma->vm_page_prot)) | ||
34 | return -EAGAIN; | ||
35 | |||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | resource_size_t | 17 | resource_size_t |
40 | pcibios_align_resource(void *data, const struct resource *res, | 18 | pcibios_align_resource(void *data, const struct resource *res, |
41 | resource_size_t size, resource_size_t align) | 19 | resource_size_t size, resource_size_t align) |
diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h index b1b289df04c7..6e505332b3e3 100644 --- a/arch/cris/include/asm/pci.h +++ b/arch/cris/include/asm/pci.h | |||
@@ -42,9 +42,7 @@ struct pci_dev; | |||
42 | #define PCI_DMA_BUS_IS_PHYS (1) | 42 | #define PCI_DMA_BUS_IS_PHYS (1) |
43 | 43 | ||
44 | #define HAVE_PCI_MMAP | 44 | #define HAVE_PCI_MMAP |
45 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 45 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
46 | enum pci_mmap_state mmap_state, int write_combine); | ||
47 | |||
48 | 46 | ||
49 | #endif /* __KERNEL__ */ | 47 | #endif /* __KERNEL__ */ |
50 | 48 | ||
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index c0835b0dc722..6459f2d46200 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h | |||
@@ -51,8 +51,9 @@ extern unsigned long ia64_max_iommu_merge_mask; | |||
51 | #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) | 51 | #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) |
52 | 52 | ||
53 | #define HAVE_PCI_MMAP | 53 | #define HAVE_PCI_MMAP |
54 | extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, | 54 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
55 | enum pci_mmap_state mmap_state, int write_combine); | 55 | #define arch_can_pci_mmap_wc() 1 |
56 | |||
56 | #define HAVE_PCI_LEGACY | 57 | #define HAVE_PCI_LEGACY |
57 | extern int pci_mmap_legacy_page_range(struct pci_bus *bus, | 58 | extern int pci_mmap_legacy_page_range(struct pci_bus *bus, |
58 | struct vm_area_struct *vma, | 59 | struct vm_area_struct *vma, |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 8f6ac2f8ae4c..4068bde623dc 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -418,52 +418,6 @@ pcibios_align_resource (void *data, const struct resource *res, | |||
418 | return res->start; | 418 | return res->start; |
419 | } | 419 | } |
420 | 420 | ||
421 | int | ||
422 | pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, | ||
423 | enum pci_mmap_state mmap_state, int write_combine) | ||
424 | { | ||
425 | unsigned long size = vma->vm_end - vma->vm_start; | ||
426 | pgprot_t prot; | ||
427 | |||
428 | /* | ||
429 | * I/O space cannot be accessed via normal processor loads and | ||
430 | * stores on this platform. | ||
431 | */ | ||
432 | if (mmap_state == pci_mmap_io) | ||
433 | /* | ||
434 | * XXX we could relax this for I/O spaces for which ACPI | ||
435 | * indicates that the space is 1-to-1 mapped. But at the | ||
436 | * moment, we don't support multiple PCI address spaces and | ||
437 | * the legacy I/O space is not 1-to-1 mapped, so this is moot. | ||
438 | */ | ||
439 | return -EINVAL; | ||
440 | |||
441 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) | ||
442 | return -EINVAL; | ||
443 | |||
444 | prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, | ||
445 | vma->vm_page_prot); | ||
446 | |||
447 | /* | ||
448 | * If the user requested WC, the kernel uses UC or WC for this region, | ||
449 | * and the chipset supports WC, we can use WC. Otherwise, we have to | ||
450 | * use the same attribute the kernel uses. | ||
451 | */ | ||
452 | if (write_combine && | ||
453 | ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC || | ||
454 | (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) && | ||
455 | efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) | ||
456 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
457 | else | ||
458 | vma->vm_page_prot = prot; | ||
459 | |||
460 | if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
461 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | ||
462 | return -EAGAIN; | ||
463 | |||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | /** | 421 | /** |
468 | * ia64_pci_get_legacy_mem - generic legacy mem routine | 422 | * ia64_pci_get_legacy_mem - generic legacy mem routine |
469 | * @bus: bus to get legacy memory base address for | 423 | * @bus: bus to get legacy memory base address for |
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 2a120bb70e54..efd4983cb697 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h | |||
@@ -46,12 +46,10 @@ extern int pci_domain_nr(struct pci_bus *bus); | |||
46 | extern int pci_proc_domain(struct pci_bus *bus); | 46 | extern int pci_proc_domain(struct pci_bus *bus); |
47 | 47 | ||
48 | struct vm_area_struct; | 48 | struct vm_area_struct; |
49 | /* Map a range of PCI memory or I/O space for a device into user space */ | ||
50 | int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, | ||
51 | enum pci_mmap_state mmap_state, int write_combine); | ||
52 | 49 | ||
53 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ | 50 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ |
54 | #define HAVE_PCI_MMAP 1 | 51 | #define HAVE_PCI_MMAP 1 |
52 | #define arch_can_pci_mmap_io() 1 | ||
55 | 53 | ||
56 | extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, | 54 | extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, |
57 | size_t count); | 55 | size_t count); |
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 13bc93242c0c..404fb38d06b7 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c | |||
@@ -278,7 +278,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, | |||
278 | * | 278 | * |
279 | * Returns a negative error code on failure, zero on success. | 279 | * Returns a negative error code on failure, zero on success. |
280 | */ | 280 | */ |
281 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 281 | int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma, |
282 | enum pci_mmap_state mmap_state, int write_combine) | 282 | enum pci_mmap_state mmap_state, int write_combine) |
283 | { | 283 | { |
284 | resource_size_t offset = | 284 | resource_size_t offset = |
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 30d1129d8624..1000c1b4c875 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h | |||
@@ -110,10 +110,7 @@ extern unsigned long PCIBIOS_MIN_MEM; | |||
110 | extern void pcibios_set_master(struct pci_dev *dev); | 110 | extern void pcibios_set_master(struct pci_dev *dev); |
111 | 111 | ||
112 | #define HAVE_PCI_MMAP | 112 | #define HAVE_PCI_MMAP |
113 | 113 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE | |
114 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
115 | enum pci_mmap_state mmap_state, int write_combine); | ||
116 | |||
117 | #define HAVE_ARCH_PCI_RESOURCE_TO_USER | 114 | #define HAVE_ARCH_PCI_RESOURCE_TO_USER |
118 | 115 | ||
119 | /* | 116 | /* |
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index f6325fa657fb..bd67ac74fe2d 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c | |||
@@ -57,27 +57,3 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, | |||
57 | *start = fixup_bigphys_addr(rsrc->start, size); | 57 | *start = fixup_bigphys_addr(rsrc->start, size); |
58 | *end = rsrc->start + size; | 58 | *end = rsrc->start + size; |
59 | } | 59 | } |
60 | |||
61 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
62 | enum pci_mmap_state mmap_state, int write_combine) | ||
63 | { | ||
64 | unsigned long prot; | ||
65 | |||
66 | /* | ||
67 | * I/O space can be accessed via normal processor loads and stores on | ||
68 | * this platform but for now we elect not to do this and portable | ||
69 | * drivers should not do this anyway. | ||
70 | */ | ||
71 | if (mmap_state == pci_mmap_io) | ||
72 | return -EINVAL; | ||
73 | |||
74 | /* | ||
75 | * Ignore write-combine; for now only return uncached mappings. | ||
76 | */ | ||
77 | prot = pgprot_val(vma->vm_page_prot); | ||
78 | prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; | ||
79 | vma->vm_page_prot = __pgprot(prot); | ||
80 | |||
81 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
82 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | ||
83 | } | ||
diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 51159fff025a..d27654902f28 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h | |||
@@ -74,9 +74,7 @@ static inline int pci_controller_num(struct pci_dev *dev) | |||
74 | } | 74 | } |
75 | 75 | ||
76 | #define HAVE_PCI_MMAP | 76 | #define HAVE_PCI_MMAP |
77 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 77 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
78 | enum pci_mmap_state mmap_state, | ||
79 | int write_combine); | ||
80 | 78 | ||
81 | #endif /* __KERNEL__ */ | 79 | #endif /* __KERNEL__ */ |
82 | 80 | ||
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c index b7ab8378964c..e0f4617c0c7a 100644 --- a/arch/mn10300/unit-asb2305/pci-asb2305.c +++ b/arch/mn10300/unit-asb2305/pci-asb2305.c | |||
@@ -210,26 +210,3 @@ void __init pcibios_resource_survey(void) | |||
210 | pcibios_allocate_resources(0); | 210 | pcibios_allocate_resources(0); |
211 | pcibios_allocate_resources(1); | 211 | pcibios_allocate_resources(1); |
212 | } | 212 | } |
213 | |||
214 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
215 | enum pci_mmap_state mmap_state, int write_combine) | ||
216 | { | ||
217 | unsigned long prot; | ||
218 | |||
219 | /* Leave vm_pgoff as-is, the PCI space address is the physical | ||
220 | * address on this platform. | ||
221 | */ | ||
222 | vma->vm_flags |= VM_LOCKED; | ||
223 | |||
224 | prot = pgprot_val(vma->vm_page_prot); | ||
225 | prot &= ~_PAGE_CACHE; | ||
226 | vma->vm_page_prot = __pgprot(prot); | ||
227 | |||
228 | /* Write-combine setting is ignored */ | ||
229 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
230 | vma->vm_end - vma->vm_start, | ||
231 | vma->vm_page_prot)) | ||
232 | return -EAGAIN; | ||
233 | |||
234 | return 0; | ||
235 | } | ||
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index defebd956585..1de1a3f412ec 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h | |||
@@ -200,8 +200,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
200 | } | 200 | } |
201 | 201 | ||
202 | #define HAVE_PCI_MMAP | 202 | #define HAVE_PCI_MMAP |
203 | 203 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE | |
204 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
205 | enum pci_mmap_state mmap_state, int write_combine); | ||
206 | 204 | ||
207 | #endif /* __ASM_PARISC_PCI_H */ | 205 | #endif /* __ASM_PARISC_PCI_H */ |
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 0903c6abd7a4..13ee3569959a 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c | |||
@@ -227,34 +227,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, | |||
227 | return start; | 227 | return start; |
228 | } | 228 | } |
229 | 229 | ||
230 | |||
231 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
232 | enum pci_mmap_state mmap_state, int write_combine) | ||
233 | { | ||
234 | unsigned long prot; | ||
235 | |||
236 | /* | ||
237 | * I/O space can be accessed via normal processor loads and stores on | ||
238 | * this platform but for now we elect not to do this and portable | ||
239 | * drivers should not do this anyway. | ||
240 | */ | ||
241 | if (mmap_state == pci_mmap_io) | ||
242 | return -EINVAL; | ||
243 | |||
244 | if (write_combine) | ||
245 | return -EINVAL; | ||
246 | |||
247 | /* | ||
248 | * Ignore write-combine; for now only return uncached mappings. | ||
249 | */ | ||
250 | prot = pgprot_val(vma->vm_page_prot); | ||
251 | prot |= _PAGE_NO_CACHE; | ||
252 | vma->vm_page_prot = __pgprot(prot); | ||
253 | |||
254 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
255 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | ||
256 | } | ||
257 | |||
258 | /* | 230 | /* |
259 | * A driver is enabling the device. We make sure that all the appropriate | 231 | * A driver is enabling the device. We make sure that all the appropriate |
260 | * bits are set to allow the device to operate as the driver is expecting. | 232 | * bits are set to allow the device to operate as the driver is expecting. |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 5011b69107a7..f90b22c722e1 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -173,6 +173,8 @@ struct machdep_calls { | |||
173 | /* Called after scan and before resource survey */ | 173 | /* Called after scan and before resource survey */ |
174 | void (*pcibios_fixup_phb)(struct pci_controller *hose); | 174 | void (*pcibios_fixup_phb)(struct pci_controller *hose); |
175 | 175 | ||
176 | resource_size_t (*pcibios_default_alignment)(void); | ||
177 | |||
176 | #ifdef CONFIG_PCI_IOV | 178 | #ifdef CONFIG_PCI_IOV |
177 | void (*pcibios_fixup_sriov)(struct pci_dev *pdev); | 179 | void (*pcibios_fixup_sriov)(struct pci_dev *pdev); |
178 | resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno); | 180 | resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno); |
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 93eded8d3843..c8975dac535f 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -77,12 +77,11 @@ extern int pci_domain_nr(struct pci_bus *bus); | |||
77 | extern int pci_proc_domain(struct pci_bus *bus); | 77 | extern int pci_proc_domain(struct pci_bus *bus); |
78 | 78 | ||
79 | struct vm_area_struct; | 79 | struct vm_area_struct; |
80 | /* Map a range of PCI memory or I/O space for a device into user space */ | ||
81 | int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, | ||
82 | enum pci_mmap_state mmap_state, int write_combine); | ||
83 | 80 | ||
84 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ | 81 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() and it does WC */ |
85 | #define HAVE_PCI_MMAP 1 | 82 | #define HAVE_PCI_MMAP 1 |
83 | #define arch_can_pci_mmap_io() 1 | ||
84 | #define arch_can_pci_mmap_wc() 1 | ||
86 | 85 | ||
87 | extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, | 86 | extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, |
88 | size_t count); | 87 | size_t count); |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index ffda24a38dda..341a7469cab8 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -233,6 +233,14 @@ void pcibios_reset_secondary_bus(struct pci_dev *dev) | |||
233 | pci_reset_secondary_bus(dev); | 233 | pci_reset_secondary_bus(dev); |
234 | } | 234 | } |
235 | 235 | ||
236 | resource_size_t pcibios_default_alignment(void) | ||
237 | { | ||
238 | if (ppc_md.pcibios_default_alignment) | ||
239 | return ppc_md.pcibios_default_alignment(); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | |||
236 | #ifdef CONFIG_PCI_IOV | 244 | #ifdef CONFIG_PCI_IOV |
237 | resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno) | 245 | resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno) |
238 | { | 246 | { |
@@ -513,7 +521,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, | |||
513 | * | 521 | * |
514 | * Returns a negative error code on failure, zero on success. | 522 | * Returns a negative error code on failure, zero on success. |
515 | */ | 523 | */ |
516 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 524 | int pci_mmap_page_range(struct pci_dev *dev, int bar, |
525 | struct vm_area_struct *vma, | ||
517 | enum pci_mmap_state mmap_state, int write_combine) | 526 | enum pci_mmap_state mmap_state, int write_combine) |
518 | { | 527 | { |
519 | resource_size_t offset = | 528 | resource_size_t offset = |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6fdbd383f676..283caf1070c9 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -3330,6 +3330,11 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type) | |||
3330 | } | 3330 | } |
3331 | } | 3331 | } |
3332 | 3332 | ||
3333 | static resource_size_t pnv_pci_default_alignment(void) | ||
3334 | { | ||
3335 | return PAGE_SIZE; | ||
3336 | } | ||
3337 | |||
3333 | #ifdef CONFIG_PCI_IOV | 3338 | #ifdef CONFIG_PCI_IOV |
3334 | static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, | 3339 | static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, |
3335 | int resno) | 3340 | int resno) |
@@ -3863,6 +3868,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
3863 | hose->controller_ops = pnv_pci_ioda_controller_ops; | 3868 | hose->controller_ops = pnv_pci_ioda_controller_ops; |
3864 | } | 3869 | } |
3865 | 3870 | ||
3871 | ppc_md.pcibios_default_alignment = pnv_pci_default_alignment; | ||
3872 | |||
3866 | #ifdef CONFIG_PCI_IOV | 3873 | #ifdef CONFIG_PCI_IOV |
3867 | ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; | 3874 | ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; |
3868 | ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment; | 3875 | ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment; |
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index 84563e39a5b8..c99ee286b69f 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c | |||
@@ -269,27 +269,6 @@ void __ref pcibios_report_status(unsigned int status_mask, int warn) | |||
269 | } | 269 | } |
270 | } | 270 | } |
271 | 271 | ||
272 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
273 | enum pci_mmap_state mmap_state, int write_combine) | ||
274 | { | ||
275 | /* | ||
276 | * I/O space can be accessed via normal processor loads and stores on | ||
277 | * this platform but for now we elect not to do this and portable | ||
278 | * drivers should not do this anyway. | ||
279 | */ | ||
280 | if (mmap_state == pci_mmap_io) | ||
281 | return -EINVAL; | ||
282 | |||
283 | /* | ||
284 | * Ignore write-combine; for now only return uncached mappings. | ||
285 | */ | ||
286 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
287 | |||
288 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
289 | vma->vm_end - vma->vm_start, | ||
290 | vma->vm_page_prot); | ||
291 | } | ||
292 | |||
293 | #ifndef CONFIG_GENERIC_IOMAP | 272 | #ifndef CONFIG_GENERIC_IOMAP |
294 | 273 | ||
295 | void __iomem *__pci_ioport_map(struct pci_dev *dev, | 274 | void __iomem *__pci_ioport_map(struct pci_dev *dev, |
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index 644314f2b1ef..17fa69bc814d 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h | |||
@@ -66,8 +66,8 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM; | |||
66 | struct pci_dev; | 66 | struct pci_dev; |
67 | 67 | ||
68 | #define HAVE_PCI_MMAP | 68 | #define HAVE_PCI_MMAP |
69 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 69 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
70 | enum pci_mmap_state mmap_state, int write_combine); | 70 | |
71 | extern void pcibios_set_master(struct pci_dev *dev); | 71 | extern void pcibios_set_master(struct pci_dev *dev); |
72 | 72 | ||
73 | /* Dynamic DMA mapping stuff. | 73 | /* Dynamic DMA mapping stuff. |
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 2303635158f5..b957ca5527a3 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h | |||
@@ -42,13 +42,10 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
42 | /* Platform support for /proc/bus/pci/X/Y mmap()s. */ | 42 | /* Platform support for /proc/bus/pci/X/Y mmap()s. */ |
43 | 43 | ||
44 | #define HAVE_PCI_MMAP | 44 | #define HAVE_PCI_MMAP |
45 | #define arch_can_pci_mmap_io() 1 | ||
45 | #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA | 46 | #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA |
46 | #define get_pci_unmapped_area get_fb_unmapped_area | 47 | #define get_pci_unmapped_area get_fb_unmapped_area |
47 | 48 | ||
48 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
49 | enum pci_mmap_state mmap_state, | ||
50 | int write_combine); | ||
51 | |||
52 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | 49 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) |
53 | { | 50 | { |
54 | return PCI_IRQ_NONE; | 51 | return PCI_IRQ_NONE; |
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 015e55a7495d..7eceaa10836f 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -862,9 +862,9 @@ static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vm | |||
862 | * | 862 | * |
863 | * Returns a negative error code on failure, zero on success. | 863 | * Returns a negative error code on failure, zero on success. |
864 | */ | 864 | */ |
865 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 865 | int pci_mmap_page_range(struct pci_dev *dev, int bar, |
866 | enum pci_mmap_state mmap_state, | 866 | struct vm_area_struct *vma, |
867 | int write_combine) | 867 | enum pci_mmap_state mmap_state, int write_combine) |
868 | { | 868 | { |
869 | int ret; | 869 | int ret; |
870 | 870 | ||
diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h index 37e55d018de5..ac5acdf4c4d0 100644 --- a/arch/unicore32/include/asm/pci.h +++ b/arch/unicore32/include/asm/pci.h | |||
@@ -17,8 +17,7 @@ | |||
17 | #include <mach/hardware.h> /* for PCIBIOS_MIN_* */ | 17 | #include <mach/hardware.h> /* for PCIBIOS_MIN_* */ |
18 | 18 | ||
19 | #define HAVE_PCI_MMAP | 19 | #define HAVE_PCI_MMAP |
20 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 20 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
21 | enum pci_mmap_state mmap_state, int write_combine); | ||
22 | 21 | ||
23 | #endif /* __KERNEL__ */ | 22 | #endif /* __KERNEL__ */ |
24 | #endif | 23 | #endif |
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c index 62137d13c6f9..1053bca1f8aa 100644 --- a/arch/unicore32/kernel/pci.c +++ b/arch/unicore32/kernel/pci.c | |||
@@ -356,26 +356,3 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) | |||
356 | } | 356 | } |
357 | return 0; | 357 | return 0; |
358 | } | 358 | } |
359 | |||
360 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
361 | enum pci_mmap_state mmap_state, int write_combine) | ||
362 | { | ||
363 | unsigned long phys; | ||
364 | |||
365 | if (mmap_state == pci_mmap_io) | ||
366 | return -EINVAL; | ||
367 | |||
368 | phys = vma->vm_pgoff; | ||
369 | |||
370 | /* | ||
371 | * Mark this as IO | ||
372 | */ | ||
373 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
374 | |||
375 | if (remap_pfn_range(vma, vma->vm_start, phys, | ||
376 | vma->vm_end - vma->vm_start, | ||
377 | vma->vm_page_prot)) | ||
378 | return -EAGAIN; | ||
379 | |||
380 | return 0; | ||
381 | } | ||
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 1411dbed5e5e..f513cc231151 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/string.h> | 7 | #include <linux/string.h> |
8 | #include <linux/scatterlist.h> | 8 | #include <linux/scatterlist.h> |
9 | #include <asm/io.h> | 9 | #include <asm/io.h> |
10 | #include <asm/pat.h> | ||
10 | #include <asm/x86_init.h> | 11 | #include <asm/x86_init.h> |
11 | 12 | ||
12 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
@@ -102,10 +103,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); | |||
102 | 103 | ||
103 | 104 | ||
104 | #define HAVE_PCI_MMAP | 105 | #define HAVE_PCI_MMAP |
105 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 106 | #define arch_can_pci_mmap_wc() pat_enabled() |
106 | enum pci_mmap_state mmap_state, | 107 | #define ARCH_GENERIC_PCI_MMAP_RESOURCE |
107 | int write_combine); | ||
108 | |||
109 | 108 | ||
110 | #ifdef CONFIG_PCI | 109 | #ifdef CONFIG_PCI |
111 | extern void early_quirks(void); | 110 | extern void early_quirks(void); |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 6fa84d531f4f..7b4307163eac 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -406,50 +406,3 @@ void __init pcibios_resource_survey(void) | |||
406 | */ | 406 | */ |
407 | ioapic_insert_resources(); | 407 | ioapic_insert_resources(); |
408 | } | 408 | } |
409 | |||
410 | static const struct vm_operations_struct pci_mmap_ops = { | ||
411 | .access = generic_access_phys, | ||
412 | }; | ||
413 | |||
414 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
415 | enum pci_mmap_state mmap_state, int write_combine) | ||
416 | { | ||
417 | unsigned long prot; | ||
418 | |||
419 | /* I/O space cannot be accessed via normal processor loads and | ||
420 | * stores on this platform. | ||
421 | */ | ||
422 | if (mmap_state == pci_mmap_io) | ||
423 | return -EINVAL; | ||
424 | |||
425 | prot = pgprot_val(vma->vm_page_prot); | ||
426 | |||
427 | /* | ||
428 | * Return error if pat is not enabled and write_combine is requested. | ||
429 | * Caller can followup with UC MINUS request and add a WC mtrr if there | ||
430 | * is a free mtrr slot. | ||
431 | */ | ||
432 | if (!pat_enabled() && write_combine) | ||
433 | return -EINVAL; | ||
434 | |||
435 | if (pat_enabled() && write_combine) | ||
436 | prot |= cachemode2protval(_PAGE_CACHE_MODE_WC); | ||
437 | else if (pat_enabled() || boot_cpu_data.x86 > 3) | ||
438 | /* | ||
439 | * ioremap() and ioremap_nocache() defaults to UC MINUS for now. | ||
440 | * To avoid attribute conflicts, request UC MINUS here | ||
441 | * as well. | ||
442 | */ | ||
443 | prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); | ||
444 | |||
445 | vma->vm_page_prot = __pgprot(prot); | ||
446 | |||
447 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
448 | vma->vm_end - vma->vm_start, | ||
449 | vma->vm_page_prot)) | ||
450 | return -EAGAIN; | ||
451 | |||
452 | vma->vm_ops = &pci_mmap_ops; | ||
453 | |||
454 | return 0; | ||
455 | } | ||
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index 5d6bd932ba4e..e4f366a488d3 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h | |||
@@ -46,12 +46,9 @@ struct pci_dev; | |||
46 | 46 | ||
47 | #define PCI_DMA_BUS_IS_PHYS (1) | 47 | #define PCI_DMA_BUS_IS_PHYS (1) |
48 | 48 | ||
49 | /* Map a range of PCI memory or I/O space for a device into user space */ | ||
50 | int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, | ||
51 | enum pci_mmap_state mmap_state, int write_combine); | ||
52 | |||
53 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ | 49 | /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ |
54 | #define HAVE_PCI_MMAP 1 | 50 | #define HAVE_PCI_MMAP 1 |
51 | #define arch_can_pci_mmap_io() 1 | ||
55 | 52 | ||
56 | #endif /* __KERNEL__ */ | 53 | #endif /* __KERNEL__ */ |
57 | 54 | ||
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index b848cc3dc913..903963ee495d 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c | |||
@@ -334,25 +334,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, | |||
334 | } | 334 | } |
335 | 335 | ||
336 | /* | 336 | /* |
337 | * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci | ||
338 | * device mapping. | ||
339 | */ | ||
340 | static __inline__ void | ||
341 | __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, | ||
342 | enum pci_mmap_state mmap_state, int write_combine) | ||
343 | { | ||
344 | int prot = pgprot_val(vma->vm_page_prot); | ||
345 | |||
346 | /* Set to write-through */ | ||
347 | prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT; | ||
348 | #if 0 | ||
349 | if (!write_combine) | ||
350 | prot |= _PAGE_WRITETHRU; | ||
351 | #endif | ||
352 | vma->vm_page_prot = __pgprot(prot); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Perform the actual remap of the pages for a PCI device mapping, as | 337 | * Perform the actual remap of the pages for a PCI device mapping, as |
357 | * appropriate for this architecture. The region in the process to map | 338 | * appropriate for this architecture. The region in the process to map |
358 | * is described by vm_start and vm_end members of VMA, the base physical | 339 | * is described by vm_start and vm_end members of VMA, the base physical |
@@ -362,7 +343,8 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma, | |||
362 | * | 343 | * |
363 | * Returns a negative error code on failure, zero on success. | 344 | * Returns a negative error code on failure, zero on success. |
364 | */ | 345 | */ |
365 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 346 | int pci_mmap_page_range(struct pci_dev *dev, int bar, |
347 | struct vm_area_struct *vma, | ||
366 | enum pci_mmap_state mmap_state, | 348 | enum pci_mmap_state mmap_state, |
367 | int write_combine) | 349 | int write_combine) |
368 | { | 350 | { |
@@ -372,7 +354,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
372 | if (ret < 0) | 354 | if (ret < 0) |
373 | return ret; | 355 | return ret; |
374 | 356 | ||
375 | __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); | 357 | vma->vm_page_prot = pgprot_device(vma->vm_page_prot); |
376 | 358 | ||
377 | ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | 359 | ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
378 | vma->vm_end - vma->vm_start,vma->vm_page_prot); | 360 | vma->vm_end - vma->vm_start,vma->vm_page_prot); |
diff --git a/drivers/Makefile b/drivers/Makefile index 903b19199b69..edba1edc6654 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -14,7 +14,9 @@ obj-$(CONFIG_GENERIC_PHY) += phy/ | |||
14 | obj-$(CONFIG_PINCTRL) += pinctrl/ | 14 | obj-$(CONFIG_PINCTRL) += pinctrl/ |
15 | obj-$(CONFIG_GPIOLIB) += gpio/ | 15 | obj-$(CONFIG_GPIOLIB) += gpio/ |
16 | obj-y += pwm/ | 16 | obj-y += pwm/ |
17 | |||
17 | obj-$(CONFIG_PCI) += pci/ | 18 | obj-$(CONFIG_PCI) += pci/ |
19 | obj-$(CONFIG_PCI_ENDPOINT) += pci/endpoint/ | ||
18 | # PCI dwc controller drivers | 20 | # PCI dwc controller drivers |
19 | obj-y += pci/dwc/ | 21 | obj-y += pci/dwc/ |
20 | 22 | ||
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 2944353253ed..a4e8432fc2fb 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c | |||
@@ -54,6 +54,7 @@ static struct mcfg_fixup mcfg_quirks[] = { | |||
54 | 54 | ||
55 | #define QCOM_ECAM32(seg) \ | 55 | #define QCOM_ECAM32(seg) \ |
56 | { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } | 56 | { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } |
57 | |||
57 | QCOM_ECAM32(0), | 58 | QCOM_ECAM32(0), |
58 | QCOM_ECAM32(1), | 59 | QCOM_ECAM32(1), |
59 | QCOM_ECAM32(2), | 60 | QCOM_ECAM32(2), |
@@ -68,6 +69,7 @@ static struct mcfg_fixup mcfg_quirks[] = { | |||
68 | { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \ | 69 | { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \ |
69 | { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \ | 70 | { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \ |
70 | { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops } | 71 | { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops } |
72 | |||
71 | HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops), | 73 | HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops), |
72 | HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops), | 74 | HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops), |
73 | HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops), | 75 | HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops), |
@@ -77,6 +79,7 @@ static struct mcfg_fixup mcfg_quirks[] = { | |||
77 | 79 | ||
78 | #define THUNDER_PEM_RES(addr, node) \ | 80 | #define THUNDER_PEM_RES(addr, node) \ |
79 | DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M) | 81 | DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M) |
82 | |||
80 | #define THUNDER_PEM_QUIRK(rev, node) \ | 83 | #define THUNDER_PEM_QUIRK(rev, node) \ |
81 | { "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \ | 84 | { "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \ |
82 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \ | 85 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \ |
@@ -90,13 +93,16 @@ static struct mcfg_fixup mcfg_quirks[] = { | |||
90 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \ | 93 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \ |
91 | { "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \ | 94 | { "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \ |
92 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) } | 95 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) } |
93 | /* SoC pass2.x */ | ||
94 | THUNDER_PEM_QUIRK(1, 0), | ||
95 | THUNDER_PEM_QUIRK(1, 1), | ||
96 | 96 | ||
97 | #define THUNDER_ECAM_QUIRK(rev, seg) \ | 97 | #define THUNDER_ECAM_QUIRK(rev, seg) \ |
98 | { "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \ | 98 | { "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \ |
99 | &pci_thunder_ecam_ops } | 99 | &pci_thunder_ecam_ops } |
100 | |||
101 | /* SoC pass2.x */ | ||
102 | THUNDER_PEM_QUIRK(1, 0), | ||
103 | THUNDER_PEM_QUIRK(1, 1), | ||
104 | THUNDER_ECAM_QUIRK(1, 10), | ||
105 | |||
100 | /* SoC pass1.x */ | 106 | /* SoC pass1.x */ |
101 | THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */ | 107 | THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */ |
102 | THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */ | 108 | THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */ |
@@ -112,9 +118,11 @@ static struct mcfg_fixup mcfg_quirks[] = { | |||
112 | #define XGENE_V1_ECAM_MCFG(rev, seg) \ | 118 | #define XGENE_V1_ECAM_MCFG(rev, seg) \ |
113 | {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ | 119 | {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ |
114 | &xgene_v1_pcie_ecam_ops } | 120 | &xgene_v1_pcie_ecam_ops } |
121 | |||
115 | #define XGENE_V2_ECAM_MCFG(rev, seg) \ | 122 | #define XGENE_V2_ECAM_MCFG(rev, seg) \ |
116 | {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ | 123 | {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ |
117 | &xgene_v2_pcie_ecam_ops } | 124 | &xgene_v2_pcie_ecam_ops } |
125 | |||
118 | /* X-Gene SoC with v1 PCIe controller */ | 126 | /* X-Gene SoC with v1 PCIe controller */ |
119 | XGENE_V1_ECAM_MCFG(1, 0), | 127 | XGENE_V1_ECAM_MCFG(1, 0), |
120 | XGENE_V1_ECAM_MCFG(1, 1), | 128 | XGENE_V1_ECAM_MCFG(1, 1), |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 0f6916d2d549..39279fd630bc 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -13841,14 +13841,14 @@ static void init_chip(struct hfi1_devdata *dd) | |||
13841 | dd_dev_info(dd, "Resetting CSRs with FLR\n"); | 13841 | dd_dev_info(dd, "Resetting CSRs with FLR\n"); |
13842 | 13842 | ||
13843 | /* do the FLR, the DC reset will remain */ | 13843 | /* do the FLR, the DC reset will remain */ |
13844 | hfi1_pcie_flr(dd); | 13844 | pcie_flr(dd->pcidev); |
13845 | 13845 | ||
13846 | /* restore command and BARs */ | 13846 | /* restore command and BARs */ |
13847 | restore_pci_variables(dd); | 13847 | restore_pci_variables(dd); |
13848 | 13848 | ||
13849 | if (is_ax(dd)) { | 13849 | if (is_ax(dd)) { |
13850 | dd_dev_info(dd, "Resetting CSRs with FLR\n"); | 13850 | dd_dev_info(dd, "Resetting CSRs with FLR\n"); |
13851 | hfi1_pcie_flr(dd); | 13851 | pcie_flr(dd->pcidev); |
13852 | restore_pci_variables(dd); | 13852 | restore_pci_variables(dd); |
13853 | } | 13853 | } |
13854 | } else { | 13854 | } else { |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index f06674317abf..14063bd30c2a 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
@@ -1825,7 +1825,6 @@ int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); | |||
1825 | void hfi1_pcie_cleanup(struct pci_dev *); | 1825 | void hfi1_pcie_cleanup(struct pci_dev *); |
1826 | int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *); | 1826 | int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *); |
1827 | void hfi1_pcie_ddcleanup(struct hfi1_devdata *); | 1827 | void hfi1_pcie_ddcleanup(struct hfi1_devdata *); |
1828 | void hfi1_pcie_flr(struct hfi1_devdata *); | ||
1829 | int pcie_speeds(struct hfi1_devdata *); | 1828 | int pcie_speeds(struct hfi1_devdata *); |
1830 | void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *); | 1829 | void request_msix(struct hfi1_devdata *, u32 *, struct hfi1_msix_entry *); |
1831 | void hfi1_enable_intx(struct pci_dev *); | 1830 | void hfi1_enable_intx(struct pci_dev *); |
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index e39e01b79382..93faf86d54b6 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c | |||
@@ -240,36 +240,6 @@ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd) | |||
240 | iounmap(dd->piobase); | 240 | iounmap(dd->piobase); |
241 | } | 241 | } |
242 | 242 | ||
243 | /* | ||
244 | * Do a Function Level Reset (FLR) on the device. | ||
245 | * Based on static function drivers/pci/pci.c:pcie_flr(). | ||
246 | */ | ||
247 | void hfi1_pcie_flr(struct hfi1_devdata *dd) | ||
248 | { | ||
249 | int i; | ||
250 | u16 status; | ||
251 | |||
252 | /* no need to check for the capability - we know the device has it */ | ||
253 | |||
254 | /* wait for Transaction Pending bit to clear, at most a few ms */ | ||
255 | for (i = 0; i < 4; i++) { | ||
256 | if (i) | ||
257 | msleep((1 << (i - 1)) * 100); | ||
258 | |||
259 | pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVSTA, &status); | ||
260 | if (!(status & PCI_EXP_DEVSTA_TRPND)) | ||
261 | goto clear; | ||
262 | } | ||
263 | |||
264 | dd_dev_err(dd, "Transaction Pending bit is not clearing, proceeding with reset anyway\n"); | ||
265 | |||
266 | clear: | ||
267 | pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL, | ||
268 | PCI_EXP_DEVCTL_BCR_FLR); | ||
269 | /* PCIe spec requires the function to be back within 100ms */ | ||
270 | msleep(100); | ||
271 | } | ||
272 | |||
273 | static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt, | 243 | static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt, |
274 | struct hfi1_msix_entry *hfi1_msix_entry) | 244 | struct hfi1_msix_entry *hfi1_msix_entry) |
275 | { | 245 | { |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 39d1acb27452..2cba76e6fa3c 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -490,6 +490,13 @@ config ASPEED_LPC_CTRL | |||
490 | ioctl()s, the driver also provides a read/write interface to a BMC ram | 490 | ioctl()s, the driver also provides a read/write interface to a BMC ram |
491 | region where the host LPC read/write region can be buffered. | 491 | region where the host LPC read/write region can be buffered. |
492 | 492 | ||
493 | config PCI_ENDPOINT_TEST | ||
494 | depends on PCI | ||
495 | tristate "PCI Endpoint Test driver" | ||
496 | ---help--- | ||
497 | Enable this configuration option to enable the host side test driver | ||
498 | for PCI Endpoint. | ||
499 | |||
493 | source "drivers/misc/c2port/Kconfig" | 500 | source "drivers/misc/c2port/Kconfig" |
494 | source "drivers/misc/eeprom/Kconfig" | 501 | source "drivers/misc/eeprom/Kconfig" |
495 | source "drivers/misc/cb710/Kconfig" | 502 | source "drivers/misc/cb710/Kconfig" |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 4fb10af2ea1c..81ef3e67acc9 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -53,6 +53,7 @@ obj-$(CONFIG_ECHO) += echo/ | |||
53 | obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o | 53 | obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o |
54 | obj-$(CONFIG_CXL_BASE) += cxl/ | 54 | obj-$(CONFIG_CXL_BASE) += cxl/ |
55 | obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o | 55 | obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o |
56 | obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o | ||
56 | 57 | ||
57 | lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o | 58 | lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o |
58 | lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o | 59 | lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o |
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c new file mode 100644 index 000000000000..09c10f426b64 --- /dev/null +++ b/drivers/misc/pci_endpoint_test.c | |||
@@ -0,0 +1,534 @@ | |||
1 | /** | ||
2 | * Host side test driver to test endpoint functionality | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/crc32.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/miscdevice.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/mutex.h> | ||
29 | #include <linux/random.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/pci_ids.h> | ||
33 | |||
34 | #include <linux/pci_regs.h> | ||
35 | |||
36 | #include <uapi/linux/pcitest.h> | ||
37 | |||
38 | #define DRV_MODULE_NAME "pci-endpoint-test" | ||
39 | |||
40 | #define PCI_ENDPOINT_TEST_MAGIC 0x0 | ||
41 | |||
42 | #define PCI_ENDPOINT_TEST_COMMAND 0x4 | ||
43 | #define COMMAND_RAISE_LEGACY_IRQ BIT(0) | ||
44 | #define COMMAND_RAISE_MSI_IRQ BIT(1) | ||
45 | #define MSI_NUMBER_SHIFT 2 | ||
46 | /* 6 bits for MSI number */ | ||
47 | #define COMMAND_READ BIT(8) | ||
48 | #define COMMAND_WRITE BIT(9) | ||
49 | #define COMMAND_COPY BIT(10) | ||
50 | |||
51 | #define PCI_ENDPOINT_TEST_STATUS 0x8 | ||
52 | #define STATUS_READ_SUCCESS BIT(0) | ||
53 | #define STATUS_READ_FAIL BIT(1) | ||
54 | #define STATUS_WRITE_SUCCESS BIT(2) | ||
55 | #define STATUS_WRITE_FAIL BIT(3) | ||
56 | #define STATUS_COPY_SUCCESS BIT(4) | ||
57 | #define STATUS_COPY_FAIL BIT(5) | ||
58 | #define STATUS_IRQ_RAISED BIT(6) | ||
59 | #define STATUS_SRC_ADDR_INVALID BIT(7) | ||
60 | #define STATUS_DST_ADDR_INVALID BIT(8) | ||
61 | |||
62 | #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc | ||
63 | #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 | ||
64 | |||
65 | #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14 | ||
66 | #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18 | ||
67 | |||
68 | #define PCI_ENDPOINT_TEST_SIZE 0x1c | ||
69 | #define PCI_ENDPOINT_TEST_CHECKSUM 0x20 | ||
70 | |||
71 | static DEFINE_IDA(pci_endpoint_test_ida); | ||
72 | |||
73 | #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ | ||
74 | miscdev) | ||
75 | enum pci_barno { | ||
76 | BAR_0, | ||
77 | BAR_1, | ||
78 | BAR_2, | ||
79 | BAR_3, | ||
80 | BAR_4, | ||
81 | BAR_5, | ||
82 | }; | ||
83 | |||
84 | struct pci_endpoint_test { | ||
85 | struct pci_dev *pdev; | ||
86 | void __iomem *base; | ||
87 | void __iomem *bar[6]; | ||
88 | struct completion irq_raised; | ||
89 | int last_irq; | ||
90 | /* mutex to protect the ioctls */ | ||
91 | struct mutex mutex; | ||
92 | struct miscdevice miscdev; | ||
93 | }; | ||
94 | |||
95 | static int bar_size[] = { 4, 512, 1024, 16384, 131072, 1048576 }; | ||
96 | |||
97 | static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, | ||
98 | u32 offset) | ||
99 | { | ||
100 | return readl(test->base + offset); | ||
101 | } | ||
102 | |||
103 | static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test, | ||
104 | u32 offset, u32 value) | ||
105 | { | ||
106 | writel(value, test->base + offset); | ||
107 | } | ||
108 | |||
109 | static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test, | ||
110 | int bar, int offset) | ||
111 | { | ||
112 | return readl(test->bar[bar] + offset); | ||
113 | } | ||
114 | |||
115 | static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test, | ||
116 | int bar, u32 offset, u32 value) | ||
117 | { | ||
118 | writel(value, test->bar[bar] + offset); | ||
119 | } | ||
120 | |||
121 | static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id) | ||
122 | { | ||
123 | struct pci_endpoint_test *test = dev_id; | ||
124 | u32 reg; | ||
125 | |||
126 | reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); | ||
127 | if (reg & STATUS_IRQ_RAISED) { | ||
128 | test->last_irq = irq; | ||
129 | complete(&test->irq_raised); | ||
130 | reg &= ~STATUS_IRQ_RAISED; | ||
131 | } | ||
132 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, | ||
133 | reg); | ||
134 | |||
135 | return IRQ_HANDLED; | ||
136 | } | ||
137 | |||
138 | static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, | ||
139 | enum pci_barno barno) | ||
140 | { | ||
141 | int j; | ||
142 | u32 val; | ||
143 | int size; | ||
144 | |||
145 | if (!test->bar[barno]) | ||
146 | return false; | ||
147 | |||
148 | size = bar_size[barno]; | ||
149 | |||
150 | for (j = 0; j < size; j += 4) | ||
151 | pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0); | ||
152 | |||
153 | for (j = 0; j < size; j += 4) { | ||
154 | val = pci_endpoint_test_bar_readl(test, barno, j); | ||
155 | if (val != 0xA0A0A0A0) | ||
156 | return false; | ||
157 | } | ||
158 | |||
159 | return true; | ||
160 | } | ||
161 | |||
162 | static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test) | ||
163 | { | ||
164 | u32 val; | ||
165 | |||
166 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, | ||
167 | COMMAND_RAISE_LEGACY_IRQ); | ||
168 | val = wait_for_completion_timeout(&test->irq_raised, | ||
169 | msecs_to_jiffies(1000)); | ||
170 | if (!val) | ||
171 | return false; | ||
172 | |||
173 | return true; | ||
174 | } | ||
175 | |||
176 | static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, | ||
177 | u8 msi_num) | ||
178 | { | ||
179 | u32 val; | ||
180 | struct pci_dev *pdev = test->pdev; | ||
181 | |||
182 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, | ||
183 | msi_num << MSI_NUMBER_SHIFT | | ||
184 | COMMAND_RAISE_MSI_IRQ); | ||
185 | val = wait_for_completion_timeout(&test->irq_raised, | ||
186 | msecs_to_jiffies(1000)); | ||
187 | if (!val) | ||
188 | return false; | ||
189 | |||
190 | if (test->last_irq - pdev->irq == msi_num - 1) | ||
191 | return true; | ||
192 | |||
193 | return false; | ||
194 | } | ||
195 | |||
196 | static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size) | ||
197 | { | ||
198 | bool ret = false; | ||
199 | void *src_addr; | ||
200 | void *dst_addr; | ||
201 | dma_addr_t src_phys_addr; | ||
202 | dma_addr_t dst_phys_addr; | ||
203 | struct pci_dev *pdev = test->pdev; | ||
204 | struct device *dev = &pdev->dev; | ||
205 | u32 src_crc32; | ||
206 | u32 dst_crc32; | ||
207 | |||
208 | src_addr = dma_alloc_coherent(dev, size, &src_phys_addr, GFP_KERNEL); | ||
209 | if (!src_addr) { | ||
210 | dev_err(dev, "failed to allocate source buffer\n"); | ||
211 | ret = false; | ||
212 | goto err; | ||
213 | } | ||
214 | |||
215 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, | ||
216 | lower_32_bits(src_phys_addr)); | ||
217 | |||
218 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, | ||
219 | upper_32_bits(src_phys_addr)); | ||
220 | |||
221 | get_random_bytes(src_addr, size); | ||
222 | src_crc32 = crc32_le(~0, src_addr, size); | ||
223 | |||
224 | dst_addr = dma_alloc_coherent(dev, size, &dst_phys_addr, GFP_KERNEL); | ||
225 | if (!dst_addr) { | ||
226 | dev_err(dev, "failed to allocate destination address\n"); | ||
227 | ret = false; | ||
228 | goto err_src_addr; | ||
229 | } | ||
230 | |||
231 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, | ||
232 | lower_32_bits(dst_phys_addr)); | ||
233 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, | ||
234 | upper_32_bits(dst_phys_addr)); | ||
235 | |||
236 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, | ||
237 | size); | ||
238 | |||
239 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, | ||
240 | 1 << MSI_NUMBER_SHIFT | COMMAND_COPY); | ||
241 | |||
242 | wait_for_completion(&test->irq_raised); | ||
243 | |||
244 | dst_crc32 = crc32_le(~0, dst_addr, size); | ||
245 | if (dst_crc32 == src_crc32) | ||
246 | ret = true; | ||
247 | |||
248 | dma_free_coherent(dev, size, dst_addr, dst_phys_addr); | ||
249 | |||
250 | err_src_addr: | ||
251 | dma_free_coherent(dev, size, src_addr, src_phys_addr); | ||
252 | |||
253 | err: | ||
254 | return ret; | ||
255 | } | ||
256 | |||
257 | static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size) | ||
258 | { | ||
259 | bool ret = false; | ||
260 | u32 reg; | ||
261 | void *addr; | ||
262 | dma_addr_t phys_addr; | ||
263 | struct pci_dev *pdev = test->pdev; | ||
264 | struct device *dev = &pdev->dev; | ||
265 | u32 crc32; | ||
266 | |||
267 | addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); | ||
268 | if (!addr) { | ||
269 | dev_err(dev, "failed to allocate address\n"); | ||
270 | ret = false; | ||
271 | goto err; | ||
272 | } | ||
273 | |||
274 | get_random_bytes(addr, size); | ||
275 | |||
276 | crc32 = crc32_le(~0, addr, size); | ||
277 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM, | ||
278 | crc32); | ||
279 | |||
280 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, | ||
281 | lower_32_bits(phys_addr)); | ||
282 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, | ||
283 | upper_32_bits(phys_addr)); | ||
284 | |||
285 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); | ||
286 | |||
287 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, | ||
288 | 1 << MSI_NUMBER_SHIFT | COMMAND_READ); | ||
289 | |||
290 | wait_for_completion(&test->irq_raised); | ||
291 | |||
292 | reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); | ||
293 | if (reg & STATUS_READ_SUCCESS) | ||
294 | ret = true; | ||
295 | |||
296 | dma_free_coherent(dev, size, addr, phys_addr); | ||
297 | |||
298 | err: | ||
299 | return ret; | ||
300 | } | ||
301 | |||
302 | static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size) | ||
303 | { | ||
304 | bool ret = false; | ||
305 | void *addr; | ||
306 | dma_addr_t phys_addr; | ||
307 | struct pci_dev *pdev = test->pdev; | ||
308 | struct device *dev = &pdev->dev; | ||
309 | u32 crc32; | ||
310 | |||
311 | addr = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); | ||
312 | if (!addr) { | ||
313 | dev_err(dev, "failed to allocate destination address\n"); | ||
314 | ret = false; | ||
315 | goto err; | ||
316 | } | ||
317 | |||
318 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, | ||
319 | lower_32_bits(phys_addr)); | ||
320 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, | ||
321 | upper_32_bits(phys_addr)); | ||
322 | |||
323 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); | ||
324 | |||
325 | pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, | ||
326 | 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE); | ||
327 | |||
328 | wait_for_completion(&test->irq_raised); | ||
329 | |||
330 | crc32 = crc32_le(~0, addr, size); | ||
331 | if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) | ||
332 | ret = true; | ||
333 | |||
334 | dma_free_coherent(dev, size, addr, phys_addr); | ||
335 | err: | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, | ||
340 | unsigned long arg) | ||
341 | { | ||
342 | int ret = -EINVAL; | ||
343 | enum pci_barno bar; | ||
344 | struct pci_endpoint_test *test = to_endpoint_test(file->private_data); | ||
345 | |||
346 | mutex_lock(&test->mutex); | ||
347 | switch (cmd) { | ||
348 | case PCITEST_BAR: | ||
349 | bar = arg; | ||
350 | if (bar < 0 || bar > 5) | ||
351 | goto ret; | ||
352 | ret = pci_endpoint_test_bar(test, bar); | ||
353 | break; | ||
354 | case PCITEST_LEGACY_IRQ: | ||
355 | ret = pci_endpoint_test_legacy_irq(test); | ||
356 | break; | ||
357 | case PCITEST_MSI: | ||
358 | ret = pci_endpoint_test_msi_irq(test, arg); | ||
359 | break; | ||
360 | case PCITEST_WRITE: | ||
361 | ret = pci_endpoint_test_write(test, arg); | ||
362 | break; | ||
363 | case PCITEST_READ: | ||
364 | ret = pci_endpoint_test_read(test, arg); | ||
365 | break; | ||
366 | case PCITEST_COPY: | ||
367 | ret = pci_endpoint_test_copy(test, arg); | ||
368 | break; | ||
369 | } | ||
370 | |||
371 | ret: | ||
372 | mutex_unlock(&test->mutex); | ||
373 | return ret; | ||
374 | } | ||
375 | |||
376 | static const struct file_operations pci_endpoint_test_fops = { | ||
377 | .owner = THIS_MODULE, | ||
378 | .unlocked_ioctl = pci_endpoint_test_ioctl, | ||
379 | }; | ||
380 | |||
381 | static int pci_endpoint_test_probe(struct pci_dev *pdev, | ||
382 | const struct pci_device_id *ent) | ||
383 | { | ||
384 | int i; | ||
385 | int err; | ||
386 | int irq; | ||
387 | int id; | ||
388 | char name[20]; | ||
389 | enum pci_barno bar; | ||
390 | void __iomem *base; | ||
391 | struct device *dev = &pdev->dev; | ||
392 | struct pci_endpoint_test *test; | ||
393 | struct miscdevice *misc_device; | ||
394 | |||
395 | if (pci_is_bridge(pdev)) | ||
396 | return -ENODEV; | ||
397 | |||
398 | test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL); | ||
399 | if (!test) | ||
400 | return -ENOMEM; | ||
401 | |||
402 | test->pdev = pdev; | ||
403 | init_completion(&test->irq_raised); | ||
404 | mutex_init(&test->mutex); | ||
405 | |||
406 | err = pci_enable_device(pdev); | ||
407 | if (err) { | ||
408 | dev_err(dev, "Cannot enable PCI device\n"); | ||
409 | return err; | ||
410 | } | ||
411 | |||
412 | err = pci_request_regions(pdev, DRV_MODULE_NAME); | ||
413 | if (err) { | ||
414 | dev_err(dev, "Cannot obtain PCI resources\n"); | ||
415 | goto err_disable_pdev; | ||
416 | } | ||
417 | |||
418 | pci_set_master(pdev); | ||
419 | |||
420 | irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); | ||
421 | if (irq < 0) | ||
422 | dev_err(dev, "failed to get MSI interrupts\n"); | ||
423 | |||
424 | err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler, | ||
425 | IRQF_SHARED, DRV_MODULE_NAME, test); | ||
426 | if (err) { | ||
427 | dev_err(dev, "failed to request IRQ %d\n", pdev->irq); | ||
428 | goto err_disable_msi; | ||
429 | } | ||
430 | |||
431 | for (i = 1; i < irq; i++) { | ||
432 | err = devm_request_irq(dev, pdev->irq + i, | ||
433 | pci_endpoint_test_irqhandler, | ||
434 | IRQF_SHARED, DRV_MODULE_NAME, test); | ||
435 | if (err) | ||
436 | dev_err(dev, "failed to request IRQ %d for MSI %d\n", | ||
437 | pdev->irq + i, i + 1); | ||
438 | } | ||
439 | |||
440 | for (bar = BAR_0; bar <= BAR_5; bar++) { | ||
441 | base = pci_ioremap_bar(pdev, bar); | ||
442 | if (!base) { | ||
443 | dev_err(dev, "failed to read BAR%d\n", bar); | ||
444 | WARN_ON(bar == BAR_0); | ||
445 | } | ||
446 | test->bar[bar] = base; | ||
447 | } | ||
448 | |||
449 | test->base = test->bar[0]; | ||
450 | if (!test->base) { | ||
451 | dev_err(dev, "Cannot perform PCI test without BAR0\n"); | ||
452 | goto err_iounmap; | ||
453 | } | ||
454 | |||
455 | pci_set_drvdata(pdev, test); | ||
456 | |||
457 | id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); | ||
458 | if (id < 0) { | ||
459 | dev_err(dev, "unable to get id\n"); | ||
460 | goto err_iounmap; | ||
461 | } | ||
462 | |||
463 | snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); | ||
464 | misc_device = &test->miscdev; | ||
465 | misc_device->minor = MISC_DYNAMIC_MINOR; | ||
466 | misc_device->name = name; | ||
467 | misc_device->fops = &pci_endpoint_test_fops, | ||
468 | |||
469 | err = misc_register(misc_device); | ||
470 | if (err) { | ||
471 | dev_err(dev, "failed to register device\n"); | ||
472 | goto err_ida_remove; | ||
473 | } | ||
474 | |||
475 | return 0; | ||
476 | |||
477 | err_ida_remove: | ||
478 | ida_simple_remove(&pci_endpoint_test_ida, id); | ||
479 | |||
480 | err_iounmap: | ||
481 | for (bar = BAR_0; bar <= BAR_5; bar++) { | ||
482 | if (test->bar[bar]) | ||
483 | pci_iounmap(pdev, test->bar[bar]); | ||
484 | } | ||
485 | |||
486 | err_disable_msi: | ||
487 | pci_disable_msi(pdev); | ||
488 | pci_release_regions(pdev); | ||
489 | |||
490 | err_disable_pdev: | ||
491 | pci_disable_device(pdev); | ||
492 | |||
493 | return err; | ||
494 | } | ||
495 | |||
496 | static void pci_endpoint_test_remove(struct pci_dev *pdev) | ||
497 | { | ||
498 | int id; | ||
499 | enum pci_barno bar; | ||
500 | struct pci_endpoint_test *test = pci_get_drvdata(pdev); | ||
501 | struct miscdevice *misc_device = &test->miscdev; | ||
502 | |||
503 | if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) | ||
504 | return; | ||
505 | |||
506 | misc_deregister(&test->miscdev); | ||
507 | ida_simple_remove(&pci_endpoint_test_ida, id); | ||
508 | for (bar = BAR_0; bar <= BAR_5; bar++) { | ||
509 | if (test->bar[bar]) | ||
510 | pci_iounmap(pdev, test->bar[bar]); | ||
511 | } | ||
512 | pci_disable_msi(pdev); | ||
513 | pci_release_regions(pdev); | ||
514 | pci_disable_device(pdev); | ||
515 | } | ||
516 | |||
517 | static const struct pci_device_id pci_endpoint_test_tbl[] = { | ||
518 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, | ||
519 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, | ||
520 | { } | ||
521 | }; | ||
522 | MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); | ||
523 | |||
524 | static struct pci_driver pci_endpoint_test_driver = { | ||
525 | .name = DRV_MODULE_NAME, | ||
526 | .id_table = pci_endpoint_test_tbl, | ||
527 | .probe = pci_endpoint_test_probe, | ||
528 | .remove = pci_endpoint_test_remove, | ||
529 | }; | ||
530 | module_pci_driver(pci_endpoint_test_driver); | ||
531 | |||
532 | MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER"); | ||
533 | MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); | ||
534 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 22a29df1d29e..d39cba214320 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -7332,18 +7332,6 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) | |||
7332 | } | 7332 | } |
7333 | 7333 | ||
7334 | #ifdef CONFIG_PCI_IOV | 7334 | #ifdef CONFIG_PCI_IOV |
7335 | static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, | ||
7336 | struct pci_dev *vfdev) | ||
7337 | { | ||
7338 | if (!pci_wait_for_pending_transaction(vfdev)) | ||
7339 | e_dev_warn("Issuing VFLR with pending transactions\n"); | ||
7340 | |||
7341 | e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); | ||
7342 | pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); | ||
7343 | |||
7344 | msleep(100); | ||
7345 | } | ||
7346 | |||
7347 | static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) | 7335 | static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) |
7348 | { | 7336 | { |
7349 | struct ixgbe_hw *hw = &adapter->hw; | 7337 | struct ixgbe_hw *hw = &adapter->hw; |
@@ -7376,7 +7364,7 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) | |||
7376 | pci_read_config_word(vfdev, PCI_STATUS, &status_reg); | 7364 | pci_read_config_word(vfdev, PCI_STATUS, &status_reg); |
7377 | if (status_reg != IXGBE_FAILED_READ_CFG_WORD && | 7365 | if (status_reg != IXGBE_FAILED_READ_CFG_WORD && |
7378 | status_reg & PCI_STATUS_REC_MASTER_ABORT) | 7366 | status_reg & PCI_STATUS_REC_MASTER_ABORT) |
7379 | ixgbe_issue_vf_flr(adapter, vfdev); | 7367 | pcie_flr(vfdev); |
7380 | } | 7368 | } |
7381 | } | 7369 | } |
7382 | 7370 | ||
@@ -10602,7 +10590,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |||
10602 | * VFLR. Just clean up the AER in that case. | 10590 | * VFLR. Just clean up the AER in that case. |
10603 | */ | 10591 | */ |
10604 | if (vfdev) { | 10592 | if (vfdev) { |
10605 | ixgbe_issue_vf_flr(adapter, vfdev); | 10593 | pcie_flr(vfdev); |
10606 | /* Free device reference count */ | 10594 | /* Free device reference count */ |
10607 | pci_dev_put(vfdev); | 10595 | pci_dev_put(vfdev); |
10608 | } | 10596 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 56a315bd4d96..fed803232edc 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -132,7 +132,6 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) | |||
132 | struct nvme_queue { | 132 | struct nvme_queue { |
133 | struct device *q_dmadev; | 133 | struct device *q_dmadev; |
134 | struct nvme_dev *dev; | 134 | struct nvme_dev *dev; |
135 | char irqname[24]; /* nvme4294967295-65535\0 */ | ||
136 | spinlock_t q_lock; | 135 | spinlock_t q_lock; |
137 | struct nvme_command *sq_cmds; | 136 | struct nvme_command *sq_cmds; |
138 | struct nvme_command __iomem *sq_cmds_io; | 137 | struct nvme_command __iomem *sq_cmds_io; |
@@ -329,11 +328,6 @@ static unsigned int nvme_cmd_size(struct nvme_dev *dev) | |||
329 | nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES); | 328 | nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES); |
330 | } | 329 | } |
331 | 330 | ||
332 | static int nvmeq_irq(struct nvme_queue *nvmeq) | ||
333 | { | ||
334 | return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector); | ||
335 | } | ||
336 | |||
337 | static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | 331 | static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
338 | unsigned int hctx_idx) | 332 | unsigned int hctx_idx) |
339 | { | 333 | { |
@@ -1078,7 +1072,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) | |||
1078 | spin_unlock_irq(&nvmeq->q_lock); | 1072 | spin_unlock_irq(&nvmeq->q_lock); |
1079 | return 1; | 1073 | return 1; |
1080 | } | 1074 | } |
1081 | vector = nvmeq_irq(nvmeq); | 1075 | vector = nvmeq->cq_vector; |
1082 | nvmeq->dev->online_queues--; | 1076 | nvmeq->dev->online_queues--; |
1083 | nvmeq->cq_vector = -1; | 1077 | nvmeq->cq_vector = -1; |
1084 | spin_unlock_irq(&nvmeq->q_lock); | 1078 | spin_unlock_irq(&nvmeq->q_lock); |
@@ -1086,7 +1080,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) | |||
1086 | if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) | 1080 | if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) |
1087 | blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q); | 1081 | blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q); |
1088 | 1082 | ||
1089 | free_irq(vector, nvmeq); | 1083 | pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq); |
1090 | 1084 | ||
1091 | return 0; | 1085 | return 0; |
1092 | } | 1086 | } |
@@ -1171,8 +1165,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
1171 | 1165 | ||
1172 | nvmeq->q_dmadev = dev->dev; | 1166 | nvmeq->q_dmadev = dev->dev; |
1173 | nvmeq->dev = dev; | 1167 | nvmeq->dev = dev; |
1174 | snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", | ||
1175 | dev->ctrl.instance, qid); | ||
1176 | spin_lock_init(&nvmeq->q_lock); | 1168 | spin_lock_init(&nvmeq->q_lock); |
1177 | nvmeq->cq_head = 0; | 1169 | nvmeq->cq_head = 0; |
1178 | nvmeq->cq_phase = 1; | 1170 | nvmeq->cq_phase = 1; |
@@ -1195,12 +1187,16 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
1195 | 1187 | ||
1196 | static int queue_request_irq(struct nvme_queue *nvmeq) | 1188 | static int queue_request_irq(struct nvme_queue *nvmeq) |
1197 | { | 1189 | { |
1198 | if (use_threaded_interrupts) | 1190 | struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); |
1199 | return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check, | 1191 | int nr = nvmeq->dev->ctrl.instance; |
1200 | nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq); | 1192 | |
1201 | else | 1193 | if (use_threaded_interrupts) { |
1202 | return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED, | 1194 | return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, |
1203 | nvmeq->irqname, nvmeq); | 1195 | nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); |
1196 | } else { | ||
1197 | return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, | ||
1198 | NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); | ||
1199 | } | ||
1204 | } | 1200 | } |
1205 | 1201 | ||
1206 | static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) | 1202 | static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) |
@@ -1557,7 +1553,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1557 | } | 1553 | } |
1558 | 1554 | ||
1559 | /* Deregister the admin queue's interrupt */ | 1555 | /* Deregister the admin queue's interrupt */ |
1560 | free_irq(pci_irq_vector(pdev, 0), adminq); | 1556 | pci_free_irq(pdev, 0, adminq); |
1561 | 1557 | ||
1562 | /* | 1558 | /* |
1563 | * If we enable msix early due to not intx, disable it again before | 1559 | * If we enable msix early due to not intx, disable it again before |
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index 0ee42c3e66a1..c9d4d3a7b0fe 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c | |||
@@ -285,51 +285,6 @@ parse_failed: | |||
285 | EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); | 285 | EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); |
286 | #endif /* CONFIG_OF_ADDRESS */ | 286 | #endif /* CONFIG_OF_ADDRESS */ |
287 | 287 | ||
288 | #ifdef CONFIG_PCI_MSI | ||
289 | |||
290 | static LIST_HEAD(of_pci_msi_chip_list); | ||
291 | static DEFINE_MUTEX(of_pci_msi_chip_mutex); | ||
292 | |||
293 | int of_pci_msi_chip_add(struct msi_controller *chip) | ||
294 | { | ||
295 | if (!of_property_read_bool(chip->of_node, "msi-controller")) | ||
296 | return -EINVAL; | ||
297 | |||
298 | mutex_lock(&of_pci_msi_chip_mutex); | ||
299 | list_add(&chip->list, &of_pci_msi_chip_list); | ||
300 | mutex_unlock(&of_pci_msi_chip_mutex); | ||
301 | |||
302 | return 0; | ||
303 | } | ||
304 | EXPORT_SYMBOL_GPL(of_pci_msi_chip_add); | ||
305 | |||
306 | void of_pci_msi_chip_remove(struct msi_controller *chip) | ||
307 | { | ||
308 | mutex_lock(&of_pci_msi_chip_mutex); | ||
309 | list_del(&chip->list); | ||
310 | mutex_unlock(&of_pci_msi_chip_mutex); | ||
311 | } | ||
312 | EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove); | ||
313 | |||
314 | struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node) | ||
315 | { | ||
316 | struct msi_controller *c; | ||
317 | |||
318 | mutex_lock(&of_pci_msi_chip_mutex); | ||
319 | list_for_each_entry(c, &of_pci_msi_chip_list, list) { | ||
320 | if (c->of_node == of_node) { | ||
321 | mutex_unlock(&of_pci_msi_chip_mutex); | ||
322 | return c; | ||
323 | } | ||
324 | } | ||
325 | mutex_unlock(&of_pci_msi_chip_mutex); | ||
326 | |||
327 | return NULL; | ||
328 | } | ||
329 | EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node); | ||
330 | |||
331 | #endif /* CONFIG_PCI_MSI */ | ||
332 | |||
333 | /** | 288 | /** |
334 | * of_pci_map_rid - Translate a requester ID through a downstream mapping. | 289 | * of_pci_map_rid - Translate a requester ID through a downstream mapping. |
335 | * @np: root complex device node. | 290 | * @np: root complex device node. |
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index df141420c902..e0cacb7b8563 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -134,3 +134,5 @@ config PCI_HYPERV | |||
134 | source "drivers/pci/hotplug/Kconfig" | 134 | source "drivers/pci/hotplug/Kconfig" |
135 | source "drivers/pci/dwc/Kconfig" | 135 | source "drivers/pci/dwc/Kconfig" |
136 | source "drivers/pci/host/Kconfig" | 136 | source "drivers/pci/host/Kconfig" |
137 | source "drivers/pci/endpoint/Kconfig" | ||
138 | source "drivers/pci/switch/Kconfig" | ||
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 8db5079f09a7..462c1f5f5546 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ | 5 | obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ |
6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ | 6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ |
7 | irq.o vpd.o setup-bus.o vc.o | 7 | irq.o vpd.o setup-bus.o vc.o mmap.o |
8 | obj-$(CONFIG_PROC_FS) += proc.o | 8 | obj-$(CONFIG_PROC_FS) += proc.o |
9 | obj-$(CONFIG_SYSFS) += slot.o | 9 | obj-$(CONFIG_SYSFS) += slot.o |
10 | 10 | ||
@@ -68,3 +68,4 @@ ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG | |||
68 | 68 | ||
69 | # PCI host controller drivers | 69 | # PCI host controller drivers |
70 | obj-y += host/ | 70 | obj-y += host/ |
71 | obj-y += switch/ | ||
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 8b7382705bf2..74cf5fffb1e1 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
@@ -629,7 +629,7 @@ void pci_vpd_release(struct pci_dev *dev) | |||
629 | * | 629 | * |
630 | * When access is locked, any userspace reads or writes to config | 630 | * When access is locked, any userspace reads or writes to config |
631 | * space and concurrent lock requests will sleep until access is | 631 | * space and concurrent lock requests will sleep until access is |
632 | * allowed via pci_cfg_access_unlocked again. | 632 | * allowed via pci_cfg_access_unlock() again. |
633 | */ | 633 | */ |
634 | void pci_cfg_access_lock(struct pci_dev *dev) | 634 | void pci_cfg_access_lock(struct pci_dev *dev) |
635 | { | 635 | { |
@@ -700,7 +700,8 @@ static bool pcie_downstream_port(const struct pci_dev *dev) | |||
700 | int type = pci_pcie_type(dev); | 700 | int type = pci_pcie_type(dev); |
701 | 701 | ||
702 | return type == PCI_EXP_TYPE_ROOT_PORT || | 702 | return type == PCI_EXP_TYPE_ROOT_PORT || |
703 | type == PCI_EXP_TYPE_DOWNSTREAM; | 703 | type == PCI_EXP_TYPE_DOWNSTREAM || |
704 | type == PCI_EXP_TYPE_PCIE_BRIDGE; | ||
704 | } | 705 | } |
705 | 706 | ||
706 | bool pcie_cap_has_lnkctl(const struct pci_dev *dev) | 707 | bool pcie_cap_has_lnkctl(const struct pci_dev *dev) |
@@ -890,3 +891,59 @@ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, | |||
890 | return ret; | 891 | return ret; |
891 | } | 892 | } |
892 | EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); | 893 | EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); |
894 | |||
895 | int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) | ||
896 | { | ||
897 | if (pci_dev_is_disconnected(dev)) { | ||
898 | *val = ~0; | ||
899 | return -ENODEV; | ||
900 | } | ||
901 | return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); | ||
902 | } | ||
903 | EXPORT_SYMBOL(pci_read_config_byte); | ||
904 | |||
905 | int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) | ||
906 | { | ||
907 | if (pci_dev_is_disconnected(dev)) { | ||
908 | *val = ~0; | ||
909 | return -ENODEV; | ||
910 | } | ||
911 | return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); | ||
912 | } | ||
913 | EXPORT_SYMBOL(pci_read_config_word); | ||
914 | |||
915 | int pci_read_config_dword(const struct pci_dev *dev, int where, | ||
916 | u32 *val) | ||
917 | { | ||
918 | if (pci_dev_is_disconnected(dev)) { | ||
919 | *val = ~0; | ||
920 | return -ENODEV; | ||
921 | } | ||
922 | return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); | ||
923 | } | ||
924 | EXPORT_SYMBOL(pci_read_config_dword); | ||
925 | |||
926 | int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) | ||
927 | { | ||
928 | if (pci_dev_is_disconnected(dev)) | ||
929 | return -ENODEV; | ||
930 | return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); | ||
931 | } | ||
932 | EXPORT_SYMBOL(pci_write_config_byte); | ||
933 | |||
934 | int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) | ||
935 | { | ||
936 | if (pci_dev_is_disconnected(dev)) | ||
937 | return -ENODEV; | ||
938 | return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); | ||
939 | } | ||
940 | EXPORT_SYMBOL(pci_write_config_word); | ||
941 | |||
942 | int pci_write_config_dword(const struct pci_dev *dev, int where, | ||
943 | u32 val) | ||
944 | { | ||
945 | if (pci_dev_is_disconnected(dev)) | ||
946 | return -ENODEV; | ||
947 | return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); | ||
948 | } | ||
949 | EXPORT_SYMBOL(pci_write_config_dword); | ||
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index d2d2ba5b8a68..b7e15526d676 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig | |||
@@ -9,16 +9,44 @@ config PCIE_DW_HOST | |||
9 | depends on PCI_MSI_IRQ_DOMAIN | 9 | depends on PCI_MSI_IRQ_DOMAIN |
10 | select PCIE_DW | 10 | select PCIE_DW |
11 | 11 | ||
12 | config PCIE_DW_EP | ||
13 | bool | ||
14 | depends on PCI_ENDPOINT | ||
15 | select PCIE_DW | ||
16 | |||
12 | config PCI_DRA7XX | 17 | config PCI_DRA7XX |
13 | bool "TI DRA7xx PCIe controller" | 18 | bool "TI DRA7xx PCIe controller" |
14 | depends on PCI | 19 | depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT |
15 | depends on OF && HAS_IOMEM && TI_PIPE3 | 20 | depends on OF && HAS_IOMEM && TI_PIPE3 |
21 | help | ||
22 | Enables support for the PCIe controller in the DRA7xx SoC. There | ||
23 | are two instances of PCIe controller in DRA7xx. This controller can | ||
24 | work either as EP or RC. In order to enable host-specific features | ||
25 | PCI_DRA7XX_HOST must be selected and in order to enable device- | ||
26 | specific features PCI_DRA7XX_EP must be selected. This uses | ||
27 | the Designware core. | ||
28 | |||
29 | if PCI_DRA7XX | ||
30 | |||
31 | config PCI_DRA7XX_HOST | ||
32 | bool "PCI DRA7xx Host Mode" | ||
33 | depends on PCI | ||
16 | depends on PCI_MSI_IRQ_DOMAIN | 34 | depends on PCI_MSI_IRQ_DOMAIN |
17 | select PCIE_DW_HOST | 35 | select PCIE_DW_HOST |
36 | default y | ||
18 | help | 37 | help |
19 | Enables support for the PCIe controller in the DRA7xx SoC. There | 38 | Enables support for the PCIe controller in the DRA7xx SoC to work in |
20 | are two instances of PCIe controller in DRA7xx. This controller can | 39 | host mode. |
21 | act both as EP and RC. This reuses the Designware core. | 40 | |
41 | config PCI_DRA7XX_EP | ||
42 | bool "PCI DRA7xx Endpoint Mode" | ||
43 | depends on PCI_ENDPOINT | ||
44 | select PCIE_DW_EP | ||
45 | help | ||
46 | Enables support for the PCIe controller in the DRA7xx SoC to work in | ||
47 | endpoint mode. | ||
48 | |||
49 | endif | ||
22 | 50 | ||
23 | config PCIE_DW_PLAT | 51 | config PCIE_DW_PLAT |
24 | bool "Platform bus based DesignWare PCIe Controller" | 52 | bool "Platform bus based DesignWare PCIe Controller" |
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile index a2df13c28798..f31a8596442a 100644 --- a/drivers/pci/dwc/Makefile +++ b/drivers/pci/dwc/Makefile | |||
@@ -1,7 +1,10 @@ | |||
1 | obj-$(CONFIG_PCIE_DW) += pcie-designware.o | 1 | obj-$(CONFIG_PCIE_DW) += pcie-designware.o |
2 | obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o | 2 | obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o |
3 | obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o | ||
3 | obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o | 4 | obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o |
4 | obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o | 5 | ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),) |
6 | obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o | ||
7 | endif | ||
5 | obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o | 8 | obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o |
6 | obj-$(CONFIG_PCI_IMX6) += pci-imx6.o | 9 | obj-$(CONFIG_PCI_IMX6) += pci-imx6.o |
7 | obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o | 10 | obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o |
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index 0984baff07e3..8decf46cf525 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c | |||
@@ -10,12 +10,14 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/delay.h> | ||
13 | #include <linux/err.h> | 14 | #include <linux/err.h> |
14 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
15 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
16 | #include <linux/irqdomain.h> | 17 | #include <linux/irqdomain.h> |
17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/of_device.h> | ||
19 | #include <linux/of_gpio.h> | 21 | #include <linux/of_gpio.h> |
20 | #include <linux/of_pci.h> | 22 | #include <linux/of_pci.h> |
21 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
@@ -24,6 +26,8 @@ | |||
24 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
25 | #include <linux/resource.h> | 27 | #include <linux/resource.h> |
26 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/mfd/syscon.h> | ||
30 | #include <linux/regmap.h> | ||
27 | 31 | ||
28 | #include "pcie-designware.h" | 32 | #include "pcie-designware.h" |
29 | 33 | ||
@@ -57,6 +61,11 @@ | |||
57 | #define MSI BIT(4) | 61 | #define MSI BIT(4) |
58 | #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) | 62 | #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) |
59 | 63 | ||
64 | #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 | ||
65 | #define DEVICE_TYPE_EP 0x0 | ||
66 | #define DEVICE_TYPE_LEG_EP 0x1 | ||
67 | #define DEVICE_TYPE_RC 0x4 | ||
68 | |||
60 | #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 | 69 | #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 |
61 | #define LTSSM_EN 0x1 | 70 | #define LTSSM_EN 0x1 |
62 | 71 | ||
@@ -66,6 +75,13 @@ | |||
66 | 75 | ||
67 | #define EXP_CAP_ID_OFFSET 0x70 | 76 | #define EXP_CAP_ID_OFFSET 0x70 |
68 | 77 | ||
78 | #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 | ||
79 | #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 | ||
80 | |||
81 | #define PCIECTRL_TI_CONF_MSI_XMT 0x012c | ||
82 | #define MSI_REQ_GRANT BIT(0) | ||
83 | #define MSI_VECTOR_SHIFT 7 | ||
84 | |||
69 | struct dra7xx_pcie { | 85 | struct dra7xx_pcie { |
70 | struct dw_pcie *pci; | 86 | struct dw_pcie *pci; |
71 | void __iomem *base; /* DT ti_conf */ | 87 | void __iomem *base; /* DT ti_conf */ |
@@ -73,6 +89,11 @@ struct dra7xx_pcie { | |||
73 | struct phy **phy; | 89 | struct phy **phy; |
74 | int link_gen; | 90 | int link_gen; |
75 | struct irq_domain *irq_domain; | 91 | struct irq_domain *irq_domain; |
92 | enum dw_pcie_device_mode mode; | ||
93 | }; | ||
94 | |||
95 | struct dra7xx_pcie_of_data { | ||
96 | enum dw_pcie_device_mode mode; | ||
76 | }; | 97 | }; |
77 | 98 | ||
78 | #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) | 99 | #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) |
@@ -88,6 +109,11 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, | |||
88 | writel(value, pcie->base + offset); | 109 | writel(value, pcie->base + offset); |
89 | } | 110 | } |
90 | 111 | ||
112 | static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr) | ||
113 | { | ||
114 | return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; | ||
115 | } | ||
116 | |||
91 | static int dra7xx_pcie_link_up(struct dw_pcie *pci) | 117 | static int dra7xx_pcie_link_up(struct dw_pcie *pci) |
92 | { | 118 | { |
93 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | 119 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
@@ -96,9 +122,19 @@ static int dra7xx_pcie_link_up(struct dw_pcie *pci) | |||
96 | return !!(reg & LINK_UP); | 122 | return !!(reg & LINK_UP); |
97 | } | 123 | } |
98 | 124 | ||
99 | static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx) | 125 | static void dra7xx_pcie_stop_link(struct dw_pcie *pci) |
100 | { | 126 | { |
101 | struct dw_pcie *pci = dra7xx->pci; | 127 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
128 | u32 reg; | ||
129 | |||
130 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | ||
131 | reg &= ~LTSSM_EN; | ||
132 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | ||
133 | } | ||
134 | |||
135 | static int dra7xx_pcie_establish_link(struct dw_pcie *pci) | ||
136 | { | ||
137 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
102 | struct device *dev = pci->dev; | 138 | struct device *dev = pci->dev; |
103 | u32 reg; | 139 | u32 reg; |
104 | u32 exp_cap_off = EXP_CAP_ID_OFFSET; | 140 | u32 exp_cap_off = EXP_CAP_ID_OFFSET; |
@@ -132,34 +168,42 @@ static int dra7xx_pcie_establish_link(struct dra7xx_pcie *dra7xx) | |||
132 | reg |= LTSSM_EN; | 168 | reg |= LTSSM_EN; |
133 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | 169 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); |
134 | 170 | ||
135 | return dw_pcie_wait_for_link(pci); | 171 | return 0; |
136 | } | 172 | } |
137 | 173 | ||
138 | static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) | 174 | static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) |
139 | { | 175 | { |
140 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, | ||
141 | ~INTERRUPTS); | ||
142 | dra7xx_pcie_writel(dra7xx, | ||
143 | PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS); | ||
144 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, | 176 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, |
145 | ~LEG_EP_INTERRUPTS & ~MSI); | 177 | ~LEG_EP_INTERRUPTS & ~MSI); |
146 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, | 178 | |
179 | dra7xx_pcie_writel(dra7xx, | ||
180 | PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, | ||
147 | MSI | LEG_EP_INTERRUPTS); | 181 | MSI | LEG_EP_INTERRUPTS); |
148 | } | 182 | } |
149 | 183 | ||
184 | static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) | ||
185 | { | ||
186 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, | ||
187 | ~INTERRUPTS); | ||
188 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, | ||
189 | INTERRUPTS); | ||
190 | } | ||
191 | |||
192 | static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) | ||
193 | { | ||
194 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | ||
195 | dra7xx_pcie_enable_msi_interrupts(dra7xx); | ||
196 | } | ||
197 | |||
150 | static void dra7xx_pcie_host_init(struct pcie_port *pp) | 198 | static void dra7xx_pcie_host_init(struct pcie_port *pp) |
151 | { | 199 | { |
152 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 200 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
153 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | 201 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); |
154 | 202 | ||
155 | pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; | ||
156 | pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; | ||
157 | pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; | ||
158 | pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR; | ||
159 | |||
160 | dw_pcie_setup_rc(pp); | 203 | dw_pcie_setup_rc(pp); |
161 | 204 | ||
162 | dra7xx_pcie_establish_link(dra7xx); | 205 | dra7xx_pcie_establish_link(pci); |
206 | dw_pcie_wait_for_link(pci); | ||
163 | dw_pcie_msi_init(pp); | 207 | dw_pcie_msi_init(pp); |
164 | dra7xx_pcie_enable_interrupts(dra7xx); | 208 | dra7xx_pcie_enable_interrupts(dra7xx); |
165 | } | 209 | } |
@@ -237,6 +281,7 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) | |||
237 | struct dra7xx_pcie *dra7xx = arg; | 281 | struct dra7xx_pcie *dra7xx = arg; |
238 | struct dw_pcie *pci = dra7xx->pci; | 282 | struct dw_pcie *pci = dra7xx->pci; |
239 | struct device *dev = pci->dev; | 283 | struct device *dev = pci->dev; |
284 | struct dw_pcie_ep *ep = &pci->ep; | ||
240 | u32 reg; | 285 | u32 reg; |
241 | 286 | ||
242 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); | 287 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); |
@@ -273,8 +318,11 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) | |||
273 | if (reg & LINK_REQ_RST) | 318 | if (reg & LINK_REQ_RST) |
274 | dev_dbg(dev, "Link Request Reset\n"); | 319 | dev_dbg(dev, "Link Request Reset\n"); |
275 | 320 | ||
276 | if (reg & LINK_UP_EVT) | 321 | if (reg & LINK_UP_EVT) { |
322 | if (dra7xx->mode == DW_PCIE_EP_TYPE) | ||
323 | dw_pcie_ep_linkup(ep); | ||
277 | dev_dbg(dev, "Link-up state change\n"); | 324 | dev_dbg(dev, "Link-up state change\n"); |
325 | } | ||
278 | 326 | ||
279 | if (reg & CFG_BME_EVT) | 327 | if (reg & CFG_BME_EVT) |
280 | dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); | 328 | dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); |
@@ -287,6 +335,94 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) | |||
287 | return IRQ_HANDLED; | 335 | return IRQ_HANDLED; |
288 | } | 336 | } |
289 | 337 | ||
338 | static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) | ||
339 | { | ||
340 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
341 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
342 | |||
343 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | ||
344 | } | ||
345 | |||
346 | static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) | ||
347 | { | ||
348 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); | ||
349 | mdelay(1); | ||
350 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); | ||
351 | } | ||
352 | |||
353 | static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, | ||
354 | u8 interrupt_num) | ||
355 | { | ||
356 | u32 reg; | ||
357 | |||
358 | reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; | ||
359 | reg |= MSI_REQ_GRANT; | ||
360 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); | ||
361 | } | ||
362 | |||
363 | static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, | ||
364 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
365 | { | ||
366 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
367 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
368 | |||
369 | switch (type) { | ||
370 | case PCI_EPC_IRQ_LEGACY: | ||
371 | dra7xx_pcie_raise_legacy_irq(dra7xx); | ||
372 | break; | ||
373 | case PCI_EPC_IRQ_MSI: | ||
374 | dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); | ||
375 | break; | ||
376 | default: | ||
377 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
378 | } | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static struct dw_pcie_ep_ops pcie_ep_ops = { | ||
384 | .ep_init = dra7xx_pcie_ep_init, | ||
385 | .raise_irq = dra7xx_pcie_raise_irq, | ||
386 | }; | ||
387 | |||
388 | static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, | ||
389 | struct platform_device *pdev) | ||
390 | { | ||
391 | int ret; | ||
392 | struct dw_pcie_ep *ep; | ||
393 | struct resource *res; | ||
394 | struct device *dev = &pdev->dev; | ||
395 | struct dw_pcie *pci = dra7xx->pci; | ||
396 | |||
397 | ep = &pci->ep; | ||
398 | ep->ops = &pcie_ep_ops; | ||
399 | |||
400 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); | ||
401 | pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res)); | ||
402 | if (!pci->dbi_base) | ||
403 | return -ENOMEM; | ||
404 | |||
405 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); | ||
406 | pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res)); | ||
407 | if (!pci->dbi_base2) | ||
408 | return -ENOMEM; | ||
409 | |||
410 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
411 | if (!res) | ||
412 | return -EINVAL; | ||
413 | |||
414 | ep->phys_base = res->start; | ||
415 | ep->addr_size = resource_size(res); | ||
416 | |||
417 | ret = dw_pcie_ep_init(ep); | ||
418 | if (ret) { | ||
419 | dev_err(dev, "failed to initialize endpoint\n"); | ||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | return 0; | ||
424 | } | ||
425 | |||
290 | static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, | 426 | static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, |
291 | struct platform_device *pdev) | 427 | struct platform_device *pdev) |
292 | { | 428 | { |
@@ -329,6 +465,9 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, | |||
329 | } | 465 | } |
330 | 466 | ||
331 | static const struct dw_pcie_ops dw_pcie_ops = { | 467 | static const struct dw_pcie_ops dw_pcie_ops = { |
468 | .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, | ||
469 | .start_link = dra7xx_pcie_establish_link, | ||
470 | .stop_link = dra7xx_pcie_stop_link, | ||
332 | .link_up = dra7xx_pcie_link_up, | 471 | .link_up = dra7xx_pcie_link_up, |
333 | }; | 472 | }; |
334 | 473 | ||
@@ -371,6 +510,68 @@ err_phy: | |||
371 | return ret; | 510 | return ret; |
372 | } | 511 | } |
373 | 512 | ||
513 | static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { | ||
514 | .mode = DW_PCIE_RC_TYPE, | ||
515 | }; | ||
516 | |||
517 | static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { | ||
518 | .mode = DW_PCIE_EP_TYPE, | ||
519 | }; | ||
520 | |||
521 | static const struct of_device_id of_dra7xx_pcie_match[] = { | ||
522 | { | ||
523 | .compatible = "ti,dra7-pcie", | ||
524 | .data = &dra7xx_pcie_rc_of_data, | ||
525 | }, | ||
526 | { | ||
527 | .compatible = "ti,dra7-pcie-ep", | ||
528 | .data = &dra7xx_pcie_ep_of_data, | ||
529 | }, | ||
530 | {}, | ||
531 | }; | ||
532 | |||
533 | /* | ||
534 | * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 | ||
535 | * @dra7xx: the dra7xx device where the workaround should be applied | ||
536 | * | ||
537 | * Access to the PCIe slave port that are not 32-bit aligned will result | ||
538 | * in incorrect mapping to TLP Address and Byte enable fields. Therefore, | ||
539 | * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or | ||
540 | * 0x3. | ||
541 | * | ||
542 | * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. | ||
543 | */ | ||
544 | static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) | ||
545 | { | ||
546 | int ret; | ||
547 | struct device_node *np = dev->of_node; | ||
548 | struct of_phandle_args args; | ||
549 | struct regmap *regmap; | ||
550 | |||
551 | regmap = syscon_regmap_lookup_by_phandle(np, | ||
552 | "ti,syscon-unaligned-access"); | ||
553 | if (IS_ERR(regmap)) { | ||
554 | dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | |||
558 | ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", | ||
559 | 2, 0, &args); | ||
560 | if (ret) { | ||
561 | dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); | ||
562 | return ret; | ||
563 | } | ||
564 | |||
565 | ret = regmap_update_bits(regmap, args.args[0], args.args[1], | ||
566 | args.args[1]); | ||
567 | if (ret) | ||
568 | dev_err(dev, "failed to enable unaligned access\n"); | ||
569 | |||
570 | of_node_put(args.np); | ||
571 | |||
572 | return ret; | ||
573 | } | ||
574 | |||
374 | static int __init dra7xx_pcie_probe(struct platform_device *pdev) | 575 | static int __init dra7xx_pcie_probe(struct platform_device *pdev) |
375 | { | 576 | { |
376 | u32 reg; | 577 | u32 reg; |
@@ -388,6 +589,16 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) | |||
388 | struct device_node *np = dev->of_node; | 589 | struct device_node *np = dev->of_node; |
389 | char name[10]; | 590 | char name[10]; |
390 | struct gpio_desc *reset; | 591 | struct gpio_desc *reset; |
592 | const struct of_device_id *match; | ||
593 | const struct dra7xx_pcie_of_data *data; | ||
594 | enum dw_pcie_device_mode mode; | ||
595 | |||
596 | match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); | ||
597 | if (!match) | ||
598 | return -EINVAL; | ||
599 | |||
600 | data = (struct dra7xx_pcie_of_data *)match->data; | ||
601 | mode = (enum dw_pcie_device_mode)data->mode; | ||
391 | 602 | ||
392 | dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); | 603 | dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); |
393 | if (!dra7xx) | 604 | if (!dra7xx) |
@@ -409,13 +620,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) | |||
409 | return -EINVAL; | 620 | return -EINVAL; |
410 | } | 621 | } |
411 | 622 | ||
412 | ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, | ||
413 | IRQF_SHARED, "dra7xx-pcie-main", dra7xx); | ||
414 | if (ret) { | ||
415 | dev_err(dev, "failed to request irq\n"); | ||
416 | return ret; | ||
417 | } | ||
418 | |||
419 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); | 623 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); |
420 | base = devm_ioremap_nocache(dev, res->start, resource_size(res)); | 624 | base = devm_ioremap_nocache(dev, res->start, resource_size(res)); |
421 | if (!base) | 625 | if (!base) |
@@ -473,9 +677,37 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) | |||
473 | if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) | 677 | if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) |
474 | dra7xx->link_gen = 2; | 678 | dra7xx->link_gen = 2; |
475 | 679 | ||
476 | ret = dra7xx_add_pcie_port(dra7xx, pdev); | 680 | switch (mode) { |
477 | if (ret < 0) | 681 | case DW_PCIE_RC_TYPE: |
682 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, | ||
683 | DEVICE_TYPE_RC); | ||
684 | ret = dra7xx_add_pcie_port(dra7xx, pdev); | ||
685 | if (ret < 0) | ||
686 | goto err_gpio; | ||
687 | break; | ||
688 | case DW_PCIE_EP_TYPE: | ||
689 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, | ||
690 | DEVICE_TYPE_EP); | ||
691 | |||
692 | ret = dra7xx_pcie_ep_unaligned_memaccess(dev); | ||
693 | if (ret) | ||
694 | goto err_gpio; | ||
695 | |||
696 | ret = dra7xx_add_pcie_ep(dra7xx, pdev); | ||
697 | if (ret < 0) | ||
698 | goto err_gpio; | ||
699 | break; | ||
700 | default: | ||
701 | dev_err(dev, "INVALID device type %d\n", mode); | ||
702 | } | ||
703 | dra7xx->mode = mode; | ||
704 | |||
705 | ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, | ||
706 | IRQF_SHARED, "dra7xx-pcie-main", dra7xx); | ||
707 | if (ret) { | ||
708 | dev_err(dev, "failed to request irq\n"); | ||
478 | goto err_gpio; | 709 | goto err_gpio; |
710 | } | ||
479 | 711 | ||
480 | return 0; | 712 | return 0; |
481 | 713 | ||
@@ -496,6 +728,9 @@ static int dra7xx_pcie_suspend(struct device *dev) | |||
496 | struct dw_pcie *pci = dra7xx->pci; | 728 | struct dw_pcie *pci = dra7xx->pci; |
497 | u32 val; | 729 | u32 val; |
498 | 730 | ||
731 | if (dra7xx->mode != DW_PCIE_RC_TYPE) | ||
732 | return 0; | ||
733 | |||
499 | /* clear MSE */ | 734 | /* clear MSE */ |
500 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | 735 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
501 | val &= ~PCI_COMMAND_MEMORY; | 736 | val &= ~PCI_COMMAND_MEMORY; |
@@ -510,6 +745,9 @@ static int dra7xx_pcie_resume(struct device *dev) | |||
510 | struct dw_pcie *pci = dra7xx->pci; | 745 | struct dw_pcie *pci = dra7xx->pci; |
511 | u32 val; | 746 | u32 val; |
512 | 747 | ||
748 | if (dra7xx->mode != DW_PCIE_RC_TYPE) | ||
749 | return 0; | ||
750 | |||
513 | /* set MSE */ | 751 | /* set MSE */ |
514 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | 752 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); |
515 | val |= PCI_COMMAND_MEMORY; | 753 | val |= PCI_COMMAND_MEMORY; |
@@ -548,11 +786,6 @@ static const struct dev_pm_ops dra7xx_pcie_pm_ops = { | |||
548 | dra7xx_pcie_resume_noirq) | 786 | dra7xx_pcie_resume_noirq) |
549 | }; | 787 | }; |
550 | 788 | ||
551 | static const struct of_device_id of_dra7xx_pcie_match[] = { | ||
552 | { .compatible = "ti,dra7-pcie", }, | ||
553 | {}, | ||
554 | }; | ||
555 | |||
556 | static struct platform_driver dra7xx_pcie_driver = { | 789 | static struct platform_driver dra7xx_pcie_driver = { |
557 | .driver = { | 790 | .driver = { |
558 | .name = "dra7-pcie", | 791 | .name = "dra7-pcie", |
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 44f774c12fb2..546082ad5a3f 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c | |||
@@ -521,23 +521,25 @@ static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep) | |||
521 | exynos_pcie_msi_init(ep); | 521 | exynos_pcie_msi_init(ep); |
522 | } | 522 | } |
523 | 523 | ||
524 | static u32 exynos_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) | 524 | static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, |
525 | u32 reg, size_t size) | ||
525 | { | 526 | { |
526 | struct exynos_pcie *ep = to_exynos_pcie(pci); | 527 | struct exynos_pcie *ep = to_exynos_pcie(pci); |
527 | u32 val; | 528 | u32 val; |
528 | 529 | ||
529 | exynos_pcie_sideband_dbi_r_mode(ep, true); | 530 | exynos_pcie_sideband_dbi_r_mode(ep, true); |
530 | val = readl(pci->dbi_base + reg); | 531 | dw_pcie_read(base + reg, size, &val); |
531 | exynos_pcie_sideband_dbi_r_mode(ep, false); | 532 | exynos_pcie_sideband_dbi_r_mode(ep, false); |
532 | return val; | 533 | return val; |
533 | } | 534 | } |
534 | 535 | ||
535 | static void exynos_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) | 536 | static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, |
537 | u32 reg, size_t size, u32 val) | ||
536 | { | 538 | { |
537 | struct exynos_pcie *ep = to_exynos_pcie(pci); | 539 | struct exynos_pcie *ep = to_exynos_pcie(pci); |
538 | 540 | ||
539 | exynos_pcie_sideband_dbi_w_mode(ep, true); | 541 | exynos_pcie_sideband_dbi_w_mode(ep, true); |
540 | writel(val, pci->dbi_base + reg); | 542 | dw_pcie_write(base + reg, size, val); |
541 | exynos_pcie_sideband_dbi_w_mode(ep, false); | 543 | exynos_pcie_sideband_dbi_w_mode(ep, false); |
542 | } | 544 | } |
543 | 545 | ||
@@ -644,8 +646,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, | |||
644 | } | 646 | } |
645 | 647 | ||
646 | static const struct dw_pcie_ops dw_pcie_ops = { | 648 | static const struct dw_pcie_ops dw_pcie_ops = { |
647 | .readl_dbi = exynos_pcie_readl_dbi, | 649 | .read_dbi = exynos_pcie_read_dbi, |
648 | .writel_dbi = exynos_pcie_writel_dbi, | 650 | .write_dbi = exynos_pcie_write_dbi, |
649 | .link_up = exynos_pcie_link_up, | 651 | .link_up = exynos_pcie_link_up, |
650 | }; | 652 | }; |
651 | 653 | ||
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index 801e46cd266d..a98cba55c7f0 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
20 | #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> | ||
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | #include <linux/of_gpio.h> | 22 | #include <linux/of_gpio.h> |
22 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
@@ -27,6 +28,7 @@ | |||
27 | #include <linux/signal.h> | 28 | #include <linux/signal.h> |
28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
29 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
31 | #include <linux/reset.h> | ||
30 | 32 | ||
31 | #include "pcie-designware.h" | 33 | #include "pcie-designware.h" |
32 | 34 | ||
@@ -36,6 +38,7 @@ enum imx6_pcie_variants { | |||
36 | IMX6Q, | 38 | IMX6Q, |
37 | IMX6SX, | 39 | IMX6SX, |
38 | IMX6QP, | 40 | IMX6QP, |
41 | IMX7D, | ||
39 | }; | 42 | }; |
40 | 43 | ||
41 | struct imx6_pcie { | 44 | struct imx6_pcie { |
@@ -47,6 +50,8 @@ struct imx6_pcie { | |||
47 | struct clk *pcie_inbound_axi; | 50 | struct clk *pcie_inbound_axi; |
48 | struct clk *pcie; | 51 | struct clk *pcie; |
49 | struct regmap *iomuxc_gpr; | 52 | struct regmap *iomuxc_gpr; |
53 | struct reset_control *pciephy_reset; | ||
54 | struct reset_control *apps_reset; | ||
50 | enum imx6_pcie_variants variant; | 55 | enum imx6_pcie_variants variant; |
51 | u32 tx_deemph_gen1; | 56 | u32 tx_deemph_gen1; |
52 | u32 tx_deemph_gen2_3p5db; | 57 | u32 tx_deemph_gen2_3p5db; |
@@ -56,6 +61,11 @@ struct imx6_pcie { | |||
56 | int link_gen; | 61 | int link_gen; |
57 | }; | 62 | }; |
58 | 63 | ||
64 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ | ||
65 | #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 | ||
66 | #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 | ||
67 | #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 | ||
68 | |||
59 | /* PCIe Root Complex registers (memory-mapped) */ | 69 | /* PCIe Root Complex registers (memory-mapped) */ |
60 | #define PCIE_RC_LCR 0x7c | 70 | #define PCIE_RC_LCR 0x7c |
61 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 | 71 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 |
@@ -248,6 +258,10 @@ static int imx6q_pcie_abort_handler(unsigned long addr, | |||
248 | static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) | 258 | static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) |
249 | { | 259 | { |
250 | switch (imx6_pcie->variant) { | 260 | switch (imx6_pcie->variant) { |
261 | case IMX7D: | ||
262 | reset_control_assert(imx6_pcie->pciephy_reset); | ||
263 | reset_control_assert(imx6_pcie->apps_reset); | ||
264 | break; | ||
251 | case IMX6SX: | 265 | case IMX6SX: |
252 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 266 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
253 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN, | 267 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN, |
@@ -303,11 +317,32 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) | |||
303 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | 317 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, |
304 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); | 318 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); |
305 | break; | 319 | break; |
320 | case IMX7D: | ||
321 | break; | ||
306 | } | 322 | } |
307 | 323 | ||
308 | return ret; | 324 | return ret; |
309 | } | 325 | } |
310 | 326 | ||
327 | static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) | ||
328 | { | ||
329 | u32 val; | ||
330 | unsigned int retries; | ||
331 | struct device *dev = imx6_pcie->pci->dev; | ||
332 | |||
333 | for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { | ||
334 | regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); | ||
335 | |||
336 | if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) | ||
337 | return; | ||
338 | |||
339 | usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, | ||
340 | PHY_PLL_LOCK_WAIT_USLEEP_MAX); | ||
341 | } | ||
342 | |||
343 | dev_err(dev, "PCIe PLL lock timeout\n"); | ||
344 | } | ||
345 | |||
311 | static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) | 346 | static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) |
312 | { | 347 | { |
313 | struct dw_pcie *pci = imx6_pcie->pci; | 348 | struct dw_pcie *pci = imx6_pcie->pci; |
@@ -351,6 +386,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) | |||
351 | } | 386 | } |
352 | 387 | ||
353 | switch (imx6_pcie->variant) { | 388 | switch (imx6_pcie->variant) { |
389 | case IMX7D: | ||
390 | reset_control_deassert(imx6_pcie->pciephy_reset); | ||
391 | imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); | ||
392 | break; | ||
354 | case IMX6SX: | 393 | case IMX6SX: |
355 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, | 394 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, |
356 | IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); | 395 | IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); |
@@ -377,35 +416,44 @@ err_pcie_bus: | |||
377 | 416 | ||
378 | static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) | 417 | static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) |
379 | { | 418 | { |
380 | if (imx6_pcie->variant == IMX6SX) | 419 | switch (imx6_pcie->variant) { |
420 | case IMX7D: | ||
421 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
422 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); | ||
423 | break; | ||
424 | case IMX6SX: | ||
381 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 425 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
382 | IMX6SX_GPR12_PCIE_RX_EQ_MASK, | 426 | IMX6SX_GPR12_PCIE_RX_EQ_MASK, |
383 | IMX6SX_GPR12_PCIE_RX_EQ_2); | 427 | IMX6SX_GPR12_PCIE_RX_EQ_2); |
428 | /* FALLTHROUGH */ | ||
429 | default: | ||
430 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
431 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); | ||
384 | 432 | ||
385 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 433 | /* configure constant input signal to the pcie ctrl and phy */ |
386 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); | 434 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
435 | IMX6Q_GPR12_LOS_LEVEL, 9 << 4); | ||
436 | |||
437 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
438 | IMX6Q_GPR8_TX_DEEMPH_GEN1, | ||
439 | imx6_pcie->tx_deemph_gen1 << 0); | ||
440 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
441 | IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, | ||
442 | imx6_pcie->tx_deemph_gen2_3p5db << 6); | ||
443 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
444 | IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, | ||
445 | imx6_pcie->tx_deemph_gen2_6db << 12); | ||
446 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
447 | IMX6Q_GPR8_TX_SWING_FULL, | ||
448 | imx6_pcie->tx_swing_full << 18); | ||
449 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
450 | IMX6Q_GPR8_TX_SWING_LOW, | ||
451 | imx6_pcie->tx_swing_low << 25); | ||
452 | break; | ||
453 | } | ||
387 | 454 | ||
388 | /* configure constant input signal to the pcie ctrl and phy */ | ||
389 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 455 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
390 | IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); | 456 | IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); |
391 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
392 | IMX6Q_GPR12_LOS_LEVEL, 9 << 4); | ||
393 | |||
394 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
395 | IMX6Q_GPR8_TX_DEEMPH_GEN1, | ||
396 | imx6_pcie->tx_deemph_gen1 << 0); | ||
397 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
398 | IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, | ||
399 | imx6_pcie->tx_deemph_gen2_3p5db << 6); | ||
400 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
401 | IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, | ||
402 | imx6_pcie->tx_deemph_gen2_6db << 12); | ||
403 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
404 | IMX6Q_GPR8_TX_SWING_FULL, | ||
405 | imx6_pcie->tx_swing_full << 18); | ||
406 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
407 | IMX6Q_GPR8_TX_SWING_LOW, | ||
408 | imx6_pcie->tx_swing_low << 25); | ||
409 | } | 457 | } |
410 | 458 | ||
411 | static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) | 459 | static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) |
@@ -469,8 +517,11 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) | |||
469 | dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); | 517 | dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); |
470 | 518 | ||
471 | /* Start LTSSM. */ | 519 | /* Start LTSSM. */ |
472 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 520 | if (imx6_pcie->variant == IMX7D) |
473 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | 521 | reset_control_deassert(imx6_pcie->apps_reset); |
522 | else | ||
523 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
524 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | ||
474 | 525 | ||
475 | ret = imx6_pcie_wait_for_link(imx6_pcie); | 526 | ret = imx6_pcie_wait_for_link(imx6_pcie); |
476 | if (ret) | 527 | if (ret) |
@@ -482,29 +533,40 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) | |||
482 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; | 533 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; |
483 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; | 534 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; |
484 | dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); | 535 | dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); |
485 | } else { | ||
486 | dev_info(dev, "Link: Gen2 disabled\n"); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * Start Directed Speed Change so the best possible speed both link | ||
491 | * partners support can be negotiated. | ||
492 | */ | ||
493 | tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
494 | tmp |= PORT_LOGIC_SPEED_CHANGE; | ||
495 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); | ||
496 | 536 | ||
497 | ret = imx6_pcie_wait_for_speed_change(imx6_pcie); | 537 | /* |
498 | if (ret) { | 538 | * Start Directed Speed Change so the best possible |
499 | dev_err(dev, "Failed to bring link up!\n"); | 539 | * speed both link partners support can be negotiated. |
500 | goto err_reset_phy; | 540 | */ |
501 | } | 541 | tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); |
542 | tmp |= PORT_LOGIC_SPEED_CHANGE; | ||
543 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); | ||
544 | |||
545 | if (imx6_pcie->variant != IMX7D) { | ||
546 | /* | ||
547 | * On i.MX7, DIRECT_SPEED_CHANGE behaves differently | ||
548 | * from i.MX6 family when no link speed transition | ||
549 | * occurs and we go Gen1 -> yep, Gen1. The difference | ||
550 | * is that, in such case, it will not be cleared by HW | ||
551 | * which will cause the following code to report false | ||
552 | * failure. | ||
553 | */ | ||
554 | |||
555 | ret = imx6_pcie_wait_for_speed_change(imx6_pcie); | ||
556 | if (ret) { | ||
557 | dev_err(dev, "Failed to bring link up!\n"); | ||
558 | goto err_reset_phy; | ||
559 | } | ||
560 | } | ||
502 | 561 | ||
503 | /* Make sure link training is finished as well! */ | 562 | /* Make sure link training is finished as well! */ |
504 | ret = imx6_pcie_wait_for_link(imx6_pcie); | 563 | ret = imx6_pcie_wait_for_link(imx6_pcie); |
505 | if (ret) { | 564 | if (ret) { |
506 | dev_err(dev, "Failed to bring link up!\n"); | 565 | dev_err(dev, "Failed to bring link up!\n"); |
507 | goto err_reset_phy; | 566 | goto err_reset_phy; |
567 | } | ||
568 | } else { | ||
569 | dev_info(dev, "Link: Gen2 disabled\n"); | ||
508 | } | 570 | } |
509 | 571 | ||
510 | tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); | 572 | tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); |
@@ -544,8 +606,8 @@ static struct dw_pcie_host_ops imx6_pcie_host_ops = { | |||
544 | .host_init = imx6_pcie_host_init, | 606 | .host_init = imx6_pcie_host_init, |
545 | }; | 607 | }; |
546 | 608 | ||
547 | static int __init imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, | 609 | static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, |
548 | struct platform_device *pdev) | 610 | struct platform_device *pdev) |
549 | { | 611 | { |
550 | struct dw_pcie *pci = imx6_pcie->pci; | 612 | struct dw_pcie *pci = imx6_pcie->pci; |
551 | struct pcie_port *pp = &pci->pp; | 613 | struct pcie_port *pp = &pci->pp; |
@@ -585,7 +647,7 @@ static const struct dw_pcie_ops dw_pcie_ops = { | |||
585 | .link_up = imx6_pcie_link_up, | 647 | .link_up = imx6_pcie_link_up, |
586 | }; | 648 | }; |
587 | 649 | ||
588 | static int __init imx6_pcie_probe(struct platform_device *pdev) | 650 | static int imx6_pcie_probe(struct platform_device *pdev) |
589 | { | 651 | { |
590 | struct device *dev = &pdev->dev; | 652 | struct device *dev = &pdev->dev; |
591 | struct dw_pcie *pci; | 653 | struct dw_pcie *pci; |
@@ -609,10 +671,6 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
609 | imx6_pcie->variant = | 671 | imx6_pcie->variant = |
610 | (enum imx6_pcie_variants)of_device_get_match_data(dev); | 672 | (enum imx6_pcie_variants)of_device_get_match_data(dev); |
611 | 673 | ||
612 | /* Added for PCI abort handling */ | ||
613 | hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, | ||
614 | "imprecise external abort"); | ||
615 | |||
616 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 674 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
617 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); | 675 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); |
618 | if (IS_ERR(pci->dbi_base)) | 676 | if (IS_ERR(pci->dbi_base)) |
@@ -632,6 +690,8 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
632 | dev_err(dev, "unable to get reset gpio\n"); | 690 | dev_err(dev, "unable to get reset gpio\n"); |
633 | return ret; | 691 | return ret; |
634 | } | 692 | } |
693 | } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { | ||
694 | return imx6_pcie->reset_gpio; | ||
635 | } | 695 | } |
636 | 696 | ||
637 | /* Fetch clocks */ | 697 | /* Fetch clocks */ |
@@ -653,13 +713,31 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
653 | return PTR_ERR(imx6_pcie->pcie); | 713 | return PTR_ERR(imx6_pcie->pcie); |
654 | } | 714 | } |
655 | 715 | ||
656 | if (imx6_pcie->variant == IMX6SX) { | 716 | switch (imx6_pcie->variant) { |
717 | case IMX6SX: | ||
657 | imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, | 718 | imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, |
658 | "pcie_inbound_axi"); | 719 | "pcie_inbound_axi"); |
659 | if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { | 720 | if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { |
660 | dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); | 721 | dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); |
661 | return PTR_ERR(imx6_pcie->pcie_inbound_axi); | 722 | return PTR_ERR(imx6_pcie->pcie_inbound_axi); |
662 | } | 723 | } |
724 | break; | ||
725 | case IMX7D: | ||
726 | imx6_pcie->pciephy_reset = devm_reset_control_get(dev, | ||
727 | "pciephy"); | ||
728 | if (IS_ERR(imx6_pcie->pciephy_reset)) { | ||
729 | dev_err(dev, "Failed to get PCIEPHY reset control\n"); | ||
730 | return PTR_ERR(imx6_pcie->pciephy_reset); | ||
731 | } | ||
732 | |||
733 | imx6_pcie->apps_reset = devm_reset_control_get(dev, "apps"); | ||
734 | if (IS_ERR(imx6_pcie->apps_reset)) { | ||
735 | dev_err(dev, "Failed to get PCIE APPS reset control\n"); | ||
736 | return PTR_ERR(imx6_pcie->apps_reset); | ||
737 | } | ||
738 | break; | ||
739 | default: | ||
740 | break; | ||
663 | } | 741 | } |
664 | 742 | ||
665 | /* Grab GPR config register range */ | 743 | /* Grab GPR config register range */ |
@@ -718,6 +796,7 @@ static const struct of_device_id imx6_pcie_of_match[] = { | |||
718 | { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, | 796 | { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, |
719 | { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, | 797 | { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, |
720 | { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, | 798 | { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, |
799 | { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, | ||
721 | {}, | 800 | {}, |
722 | }; | 801 | }; |
723 | 802 | ||
@@ -725,12 +804,24 @@ static struct platform_driver imx6_pcie_driver = { | |||
725 | .driver = { | 804 | .driver = { |
726 | .name = "imx6q-pcie", | 805 | .name = "imx6q-pcie", |
727 | .of_match_table = imx6_pcie_of_match, | 806 | .of_match_table = imx6_pcie_of_match, |
807 | .suppress_bind_attrs = true, | ||
728 | }, | 808 | }, |
809 | .probe = imx6_pcie_probe, | ||
729 | .shutdown = imx6_pcie_shutdown, | 810 | .shutdown = imx6_pcie_shutdown, |
730 | }; | 811 | }; |
731 | 812 | ||
732 | static int __init imx6_pcie_init(void) | 813 | static int __init imx6_pcie_init(void) |
733 | { | 814 | { |
734 | return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe); | 815 | /* |
816 | * Since probe() can be deferred we need to make sure that | ||
817 | * hook_fault_code is not called after __init memory is freed | ||
818 | * by kernel and since imx6q_pcie_abort_handler() is a no-op, | ||
819 | * we can install the handler here without risking it | ||
820 | * accessing some uninitialized driver state. | ||
821 | */ | ||
822 | hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, | ||
823 | "imprecise external abort"); | ||
824 | |||
825 | return platform_driver_register(&imx6_pcie_driver); | ||
735 | } | 826 | } |
736 | device_initcall(imx6_pcie_init); | 827 | device_initcall(imx6_pcie_init); |
diff --git a/drivers/pci/dwc/pci-keystone-dw.c b/drivers/pci/dwc/pci-keystone-dw.c index 6b396f6b4615..8bc626e640c8 100644 --- a/drivers/pci/dwc/pci-keystone-dw.c +++ b/drivers/pci/dwc/pci-keystone-dw.c | |||
@@ -543,7 +543,7 @@ int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, | |||
543 | 543 | ||
544 | /* Index 0 is the config reg. space address */ | 544 | /* Index 0 is the config reg. space address */ |
545 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 545 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
546 | pci->dbi_base = devm_ioremap_resource(dev, res); | 546 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); |
547 | if (IS_ERR(pci->dbi_base)) | 547 | if (IS_ERR(pci->dbi_base)) |
548 | return PTR_ERR(pci->dbi_base); | 548 | return PTR_ERR(pci->dbi_base); |
549 | 549 | ||
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index c32e392a0ae6..27d638c4e134 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c | |||
@@ -283,7 +283,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev) | |||
283 | pcie->pci = pci; | 283 | pcie->pci = pci; |
284 | 284 | ||
285 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | 285 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); |
286 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); | 286 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); |
287 | if (IS_ERR(pci->dbi_base)) | 287 | if (IS_ERR(pci->dbi_base)) |
288 | return PTR_ERR(pci->dbi_base); | 288 | return PTR_ERR(pci->dbi_base); |
289 | 289 | ||
@@ -305,6 +305,7 @@ static struct platform_driver ls_pcie_driver = { | |||
305 | .driver = { | 305 | .driver = { |
306 | .name = "layerscape-pcie", | 306 | .name = "layerscape-pcie", |
307 | .of_match_table = ls_pcie_of_match, | 307 | .of_match_table = ls_pcie_of_match, |
308 | .suppress_bind_attrs = true, | ||
308 | }, | 309 | }, |
309 | }; | 310 | }; |
310 | builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); | 311 | builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); |
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index f110e3b24a26..495b023042b3 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c | |||
@@ -230,7 +230,7 @@ static int armada8k_pcie_probe(struct platform_device *pdev) | |||
230 | 230 | ||
231 | /* Get the dw-pcie unit configuration/control registers base. */ | 231 | /* Get the dw-pcie unit configuration/control registers base. */ |
232 | base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); | 232 | base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); |
233 | pci->dbi_base = devm_ioremap_resource(dev, base); | 233 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); |
234 | if (IS_ERR(pci->dbi_base)) { | 234 | if (IS_ERR(pci->dbi_base)) { |
235 | dev_err(dev, "couldn't remap regs base %p\n", base); | 235 | dev_err(dev, "couldn't remap regs base %p\n", base); |
236 | ret = PTR_ERR(pci->dbi_base); | 236 | ret = PTR_ERR(pci->dbi_base); |
@@ -262,6 +262,7 @@ static struct platform_driver armada8k_pcie_driver = { | |||
262 | .driver = { | 262 | .driver = { |
263 | .name = "armada8k-pcie", | 263 | .name = "armada8k-pcie", |
264 | .of_match_table = of_match_ptr(armada8k_pcie_of_match), | 264 | .of_match_table = of_match_ptr(armada8k_pcie_of_match), |
265 | .suppress_bind_attrs = true, | ||
265 | }, | 266 | }, |
266 | }; | 267 | }; |
267 | builtin_platform_driver(armada8k_pcie_driver); | 268 | builtin_platform_driver(armada8k_pcie_driver); |
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 6d23683c0892..82a04acc42fd 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c | |||
@@ -78,6 +78,11 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u | |||
78 | regmap_write(artpec6_pcie->regmap, offset, val); | 78 | regmap_write(artpec6_pcie->regmap, offset, val); |
79 | } | 79 | } |
80 | 80 | ||
81 | static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr) | ||
82 | { | ||
83 | return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR; | ||
84 | } | ||
85 | |||
81 | static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) | 86 | static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) |
82 | { | 87 | { |
83 | struct dw_pcie *pci = artpec6_pcie->pci; | 88 | struct dw_pcie *pci = artpec6_pcie->pci; |
@@ -142,11 +147,6 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie) | |||
142 | */ | 147 | */ |
143 | dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN); | 148 | dw_pcie_writel_dbi(pci, MISC_CONTROL_1_OFF, DBI_RO_WR_EN); |
144 | 149 | ||
145 | pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR; | ||
146 | pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR; | ||
147 | pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR; | ||
148 | pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR; | ||
149 | |||
150 | /* setup root complex */ | 150 | /* setup root complex */ |
151 | dw_pcie_setup_rc(pp); | 151 | dw_pcie_setup_rc(pp); |
152 | 152 | ||
@@ -235,6 +235,7 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, | |||
235 | } | 235 | } |
236 | 236 | ||
237 | static const struct dw_pcie_ops dw_pcie_ops = { | 237 | static const struct dw_pcie_ops dw_pcie_ops = { |
238 | .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, | ||
238 | }; | 239 | }; |
239 | 240 | ||
240 | static int artpec6_pcie_probe(struct platform_device *pdev) | 241 | static int artpec6_pcie_probe(struct platform_device *pdev) |
@@ -294,6 +295,7 @@ static struct platform_driver artpec6_pcie_driver = { | |||
294 | .driver = { | 295 | .driver = { |
295 | .name = "artpec6-pcie", | 296 | .name = "artpec6-pcie", |
296 | .of_match_table = artpec6_pcie_of_match, | 297 | .of_match_table = artpec6_pcie_of_match, |
298 | .suppress_bind_attrs = true, | ||
297 | }, | 299 | }, |
298 | }; | 300 | }; |
299 | builtin_platform_driver(artpec6_pcie_driver); | 301 | builtin_platform_driver(artpec6_pcie_driver); |
diff --git a/drivers/pci/dwc/pcie-designware-ep.c b/drivers/pci/dwc/pcie-designware-ep.c new file mode 100644 index 000000000000..398406393f37 --- /dev/null +++ b/drivers/pci/dwc/pcie-designware-ep.c | |||
@@ -0,0 +1,342 @@ | |||
1 | /** | ||
2 | * Synopsys Designware PCIe Endpoint controller driver | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/of.h> | ||
21 | |||
22 | #include "pcie-designware.h" | ||
23 | #include <linux/pci-epc.h> | ||
24 | #include <linux/pci-epf.h> | ||
25 | |||
26 | void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) | ||
27 | { | ||
28 | struct pci_epc *epc = ep->epc; | ||
29 | |||
30 | pci_epc_linkup(epc); | ||
31 | } | ||
32 | |||
33 | static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) | ||
34 | { | ||
35 | u32 reg; | ||
36 | |||
37 | reg = PCI_BASE_ADDRESS_0 + (4 * bar); | ||
38 | dw_pcie_writel_dbi2(pci, reg, 0x0); | ||
39 | dw_pcie_writel_dbi(pci, reg, 0x0); | ||
40 | } | ||
41 | |||
42 | static int dw_pcie_ep_write_header(struct pci_epc *epc, | ||
43 | struct pci_epf_header *hdr) | ||
44 | { | ||
45 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
46 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
47 | |||
48 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); | ||
49 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); | ||
50 | dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); | ||
51 | dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); | ||
52 | dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, | ||
53 | hdr->subclass_code | hdr->baseclass_code << 8); | ||
54 | dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, | ||
55 | hdr->cache_line_size); | ||
56 | dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, | ||
57 | hdr->subsys_vendor_id); | ||
58 | dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); | ||
59 | dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, | ||
60 | hdr->interrupt_pin); | ||
61 | |||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, | ||
66 | dma_addr_t cpu_addr, | ||
67 | enum dw_pcie_as_type as_type) | ||
68 | { | ||
69 | int ret; | ||
70 | u32 free_win; | ||
71 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
72 | |||
73 | free_win = find_first_zero_bit(&ep->ib_window_map, | ||
74 | sizeof(ep->ib_window_map)); | ||
75 | if (free_win >= ep->num_ib_windows) { | ||
76 | dev_err(pci->dev, "no free inbound window\n"); | ||
77 | return -EINVAL; | ||
78 | } | ||
79 | |||
80 | ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, | ||
81 | as_type); | ||
82 | if (ret < 0) { | ||
83 | dev_err(pci->dev, "Failed to program IB window\n"); | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | ep->bar_to_atu[bar] = free_win; | ||
88 | set_bit(free_win, &ep->ib_window_map); | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, | ||
94 | u64 pci_addr, size_t size) | ||
95 | { | ||
96 | u32 free_win; | ||
97 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
98 | |||
99 | free_win = find_first_zero_bit(&ep->ob_window_map, | ||
100 | sizeof(ep->ob_window_map)); | ||
101 | if (free_win >= ep->num_ob_windows) { | ||
102 | dev_err(pci->dev, "no free outbound window\n"); | ||
103 | return -EINVAL; | ||
104 | } | ||
105 | |||
106 | dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, | ||
107 | phys_addr, pci_addr, size); | ||
108 | |||
109 | set_bit(free_win, &ep->ob_window_map); | ||
110 | ep->outbound_addr[free_win] = phys_addr; | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static void dw_pcie_ep_clear_bar(struct pci_epc *epc, enum pci_barno bar) | ||
116 | { | ||
117 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
118 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
119 | u32 atu_index = ep->bar_to_atu[bar]; | ||
120 | |||
121 | dw_pcie_ep_reset_bar(pci, bar); | ||
122 | |||
123 | dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); | ||
124 | clear_bit(atu_index, &ep->ib_window_map); | ||
125 | } | ||
126 | |||
127 | static int dw_pcie_ep_set_bar(struct pci_epc *epc, enum pci_barno bar, | ||
128 | dma_addr_t bar_phys, size_t size, int flags) | ||
129 | { | ||
130 | int ret; | ||
131 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
132 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
133 | enum dw_pcie_as_type as_type; | ||
134 | u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); | ||
135 | |||
136 | if (!(flags & PCI_BASE_ADDRESS_SPACE)) | ||
137 | as_type = DW_PCIE_AS_MEM; | ||
138 | else | ||
139 | as_type = DW_PCIE_AS_IO; | ||
140 | |||
141 | ret = dw_pcie_ep_inbound_atu(ep, bar, bar_phys, as_type); | ||
142 | if (ret) | ||
143 | return ret; | ||
144 | |||
145 | dw_pcie_writel_dbi2(pci, reg, size - 1); | ||
146 | dw_pcie_writel_dbi(pci, reg, flags); | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, | ||
152 | u32 *atu_index) | ||
153 | { | ||
154 | u32 index; | ||
155 | |||
156 | for (index = 0; index < ep->num_ob_windows; index++) { | ||
157 | if (ep->outbound_addr[index] != addr) | ||
158 | continue; | ||
159 | *atu_index = index; | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | return -EINVAL; | ||
164 | } | ||
165 | |||
166 | static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, phys_addr_t addr) | ||
167 | { | ||
168 | int ret; | ||
169 | u32 atu_index; | ||
170 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
171 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
172 | |||
173 | ret = dw_pcie_find_index(ep, addr, &atu_index); | ||
174 | if (ret < 0) | ||
175 | return; | ||
176 | |||
177 | dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); | ||
178 | clear_bit(atu_index, &ep->ob_window_map); | ||
179 | } | ||
180 | |||
181 | static int dw_pcie_ep_map_addr(struct pci_epc *epc, phys_addr_t addr, | ||
182 | u64 pci_addr, size_t size) | ||
183 | { | ||
184 | int ret; | ||
185 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
186 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
187 | |||
188 | ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); | ||
189 | if (ret) { | ||
190 | dev_err(pci->dev, "failed to enable address\n"); | ||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int dw_pcie_ep_get_msi(struct pci_epc *epc) | ||
198 | { | ||
199 | int val; | ||
200 | u32 lower_addr; | ||
201 | u32 upper_addr; | ||
202 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
203 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
204 | |||
205 | val = dw_pcie_readb_dbi(pci, MSI_MESSAGE_CONTROL); | ||
206 | val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; | ||
207 | |||
208 | lower_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); | ||
209 | upper_addr = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); | ||
210 | |||
211 | if (!(lower_addr || upper_addr)) | ||
212 | return -EINVAL; | ||
213 | |||
214 | return val; | ||
215 | } | ||
216 | |||
217 | static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 encode_int) | ||
218 | { | ||
219 | int val; | ||
220 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
221 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
222 | |||
223 | val = (encode_int << MSI_CAP_MMC_SHIFT); | ||
224 | dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static int dw_pcie_ep_raise_irq(struct pci_epc *epc, | ||
230 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
231 | { | ||
232 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
233 | |||
234 | if (!ep->ops->raise_irq) | ||
235 | return -EINVAL; | ||
236 | |||
237 | return ep->ops->raise_irq(ep, type, interrupt_num); | ||
238 | } | ||
239 | |||
240 | static void dw_pcie_ep_stop(struct pci_epc *epc) | ||
241 | { | ||
242 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
243 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
244 | |||
245 | if (!pci->ops->stop_link) | ||
246 | return; | ||
247 | |||
248 | pci->ops->stop_link(pci); | ||
249 | } | ||
250 | |||
251 | static int dw_pcie_ep_start(struct pci_epc *epc) | ||
252 | { | ||
253 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
254 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
255 | |||
256 | if (!pci->ops->start_link) | ||
257 | return -EINVAL; | ||
258 | |||
259 | return pci->ops->start_link(pci); | ||
260 | } | ||
261 | |||
262 | static const struct pci_epc_ops epc_ops = { | ||
263 | .write_header = dw_pcie_ep_write_header, | ||
264 | .set_bar = dw_pcie_ep_set_bar, | ||
265 | .clear_bar = dw_pcie_ep_clear_bar, | ||
266 | .map_addr = dw_pcie_ep_map_addr, | ||
267 | .unmap_addr = dw_pcie_ep_unmap_addr, | ||
268 | .set_msi = dw_pcie_ep_set_msi, | ||
269 | .get_msi = dw_pcie_ep_get_msi, | ||
270 | .raise_irq = dw_pcie_ep_raise_irq, | ||
271 | .start = dw_pcie_ep_start, | ||
272 | .stop = dw_pcie_ep_stop, | ||
273 | }; | ||
274 | |||
275 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | ||
276 | { | ||
277 | struct pci_epc *epc = ep->epc; | ||
278 | |||
279 | pci_epc_mem_exit(epc); | ||
280 | } | ||
281 | |||
282 | int dw_pcie_ep_init(struct dw_pcie_ep *ep) | ||
283 | { | ||
284 | int ret; | ||
285 | void *addr; | ||
286 | enum pci_barno bar; | ||
287 | struct pci_epc *epc; | ||
288 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
289 | struct device *dev = pci->dev; | ||
290 | struct device_node *np = dev->of_node; | ||
291 | |||
292 | if (!pci->dbi_base || !pci->dbi_base2) { | ||
293 | dev_err(dev, "dbi_base/deb_base2 is not populated\n"); | ||
294 | return -EINVAL; | ||
295 | } | ||
296 | |||
297 | ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); | ||
298 | if (ret < 0) { | ||
299 | dev_err(dev, "unable to read *num-ib-windows* property\n"); | ||
300 | return ret; | ||
301 | } | ||
302 | |||
303 | ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); | ||
304 | if (ret < 0) { | ||
305 | dev_err(dev, "unable to read *num-ob-windows* property\n"); | ||
306 | return ret; | ||
307 | } | ||
308 | |||
309 | addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows, | ||
310 | GFP_KERNEL); | ||
311 | if (!addr) | ||
312 | return -ENOMEM; | ||
313 | ep->outbound_addr = addr; | ||
314 | |||
315 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
316 | dw_pcie_ep_reset_bar(pci, bar); | ||
317 | |||
318 | if (ep->ops->ep_init) | ||
319 | ep->ops->ep_init(ep); | ||
320 | |||
321 | epc = devm_pci_epc_create(dev, &epc_ops); | ||
322 | if (IS_ERR(epc)) { | ||
323 | dev_err(dev, "failed to create epc device\n"); | ||
324 | return PTR_ERR(epc); | ||
325 | } | ||
326 | |||
327 | ret = of_property_read_u8(np, "max-functions", &epc->max_functions); | ||
328 | if (ret < 0) | ||
329 | epc->max_functions = 1; | ||
330 | |||
331 | ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size); | ||
332 | if (ret < 0) { | ||
333 | dev_err(dev, "Failed to initialize address space\n"); | ||
334 | return ret; | ||
335 | } | ||
336 | |||
337 | ep->epc = epc; | ||
338 | epc_set_drvdata(epc, ep); | ||
339 | dw_pcie_setup(pci); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 5ba334938b52..28ed32ba4f1b 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c | |||
@@ -56,24 +56,25 @@ static struct irq_chip dw_msi_irq_chip = { | |||
56 | /* MSI int handler */ | 56 | /* MSI int handler */ |
57 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) | 57 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) |
58 | { | 58 | { |
59 | unsigned long val; | 59 | u32 val; |
60 | int i, pos, irq; | 60 | int i, pos, irq; |
61 | irqreturn_t ret = IRQ_NONE; | 61 | irqreturn_t ret = IRQ_NONE; |
62 | 62 | ||
63 | for (i = 0; i < MAX_MSI_CTRLS; i++) { | 63 | for (i = 0; i < MAX_MSI_CTRLS; i++) { |
64 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, | 64 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, |
65 | (u32 *)&val); | 65 | &val); |
66 | if (val) { | 66 | if (!val) |
67 | ret = IRQ_HANDLED; | 67 | continue; |
68 | pos = 0; | 68 | |
69 | while ((pos = find_next_bit(&val, 32, pos)) != 32) { | 69 | ret = IRQ_HANDLED; |
70 | irq = irq_find_mapping(pp->irq_domain, | 70 | pos = 0; |
71 | i * 32 + pos); | 71 | while ((pos = find_next_bit((unsigned long *) &val, 32, |
72 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + | 72 | pos)) != 32) { |
73 | i * 12, 4, 1 << pos); | 73 | irq = irq_find_mapping(pp->irq_domain, i * 32 + pos); |
74 | generic_handle_irq(irq); | 74 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, |
75 | pos++; | 75 | 4, 1 << pos); |
76 | } | 76 | generic_handle_irq(irq); |
77 | pos++; | ||
77 | } | 78 | } |
78 | } | 79 | } |
79 | 80 | ||
@@ -338,8 +339,9 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
338 | } | 339 | } |
339 | 340 | ||
340 | if (!pci->dbi_base) { | 341 | if (!pci->dbi_base) { |
341 | pci->dbi_base = devm_ioremap(dev, pp->cfg->start, | 342 | pci->dbi_base = devm_pci_remap_cfgspace(dev, |
342 | resource_size(pp->cfg)); | 343 | pp->cfg->start, |
344 | resource_size(pp->cfg)); | ||
343 | if (!pci->dbi_base) { | 345 | if (!pci->dbi_base) { |
344 | dev_err(dev, "error with ioremap\n"); | 346 | dev_err(dev, "error with ioremap\n"); |
345 | ret = -ENOMEM; | 347 | ret = -ENOMEM; |
@@ -350,8 +352,8 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
350 | pp->mem_base = pp->mem->start; | 352 | pp->mem_base = pp->mem->start; |
351 | 353 | ||
352 | if (!pp->va_cfg0_base) { | 354 | if (!pp->va_cfg0_base) { |
353 | pp->va_cfg0_base = devm_ioremap(dev, pp->cfg0_base, | 355 | pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, |
354 | pp->cfg0_size); | 356 | pp->cfg0_base, pp->cfg0_size); |
355 | if (!pp->va_cfg0_base) { | 357 | if (!pp->va_cfg0_base) { |
356 | dev_err(dev, "error with ioremap in function\n"); | 358 | dev_err(dev, "error with ioremap in function\n"); |
357 | ret = -ENOMEM; | 359 | ret = -ENOMEM; |
@@ -360,7 +362,8 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
360 | } | 362 | } |
361 | 363 | ||
362 | if (!pp->va_cfg1_base) { | 364 | if (!pp->va_cfg1_base) { |
363 | pp->va_cfg1_base = devm_ioremap(dev, pp->cfg1_base, | 365 | pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, |
366 | pp->cfg1_base, | ||
364 | pp->cfg1_size); | 367 | pp->cfg1_size); |
365 | if (!pp->va_cfg1_base) { | 368 | if (!pp->va_cfg1_base) { |
366 | dev_err(dev, "error with ioremap\n"); | 369 | dev_err(dev, "error with ioremap\n"); |
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index f20d494922ab..32091b32f6e1 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c | |||
@@ -133,6 +133,7 @@ static struct platform_driver dw_plat_pcie_driver = { | |||
133 | .driver = { | 133 | .driver = { |
134 | .name = "dw-pcie", | 134 | .name = "dw-pcie", |
135 | .of_match_table = dw_plat_pcie_of_match, | 135 | .of_match_table = dw_plat_pcie_of_match, |
136 | .suppress_bind_attrs = true, | ||
136 | }, | 137 | }, |
137 | .probe = dw_plat_pcie_probe, | 138 | .probe = dw_plat_pcie_probe, |
138 | }; | 139 | }; |
diff --git a/drivers/pci/dwc/pcie-designware.c b/drivers/pci/dwc/pcie-designware.c index 7e1fb7d6643c..0e03af279259 100644 --- a/drivers/pci/dwc/pcie-designware.c +++ b/drivers/pci/dwc/pcie-designware.c | |||
@@ -61,91 +61,253 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val) | |||
61 | return PCIBIOS_SUCCESSFUL; | 61 | return PCIBIOS_SUCCESSFUL; |
62 | } | 62 | } |
63 | 63 | ||
64 | u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) | 64 | u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, |
65 | size_t size) | ||
65 | { | 66 | { |
66 | if (pci->ops->readl_dbi) | 67 | int ret; |
67 | return pci->ops->readl_dbi(pci, reg); | 68 | u32 val; |
68 | 69 | ||
69 | return readl(pci->dbi_base + reg); | 70 | if (pci->ops->read_dbi) |
71 | return pci->ops->read_dbi(pci, base, reg, size); | ||
72 | |||
73 | ret = dw_pcie_read(base + reg, size, &val); | ||
74 | if (ret) | ||
75 | dev_err(pci->dev, "read DBI address failed\n"); | ||
76 | |||
77 | return val; | ||
70 | } | 78 | } |
71 | 79 | ||
72 | void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) | 80 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, |
81 | size_t size, u32 val) | ||
73 | { | 82 | { |
74 | if (pci->ops->writel_dbi) | 83 | int ret; |
75 | pci->ops->writel_dbi(pci, reg, val); | 84 | |
76 | else | 85 | if (pci->ops->write_dbi) { |
77 | writel(val, pci->dbi_base + reg); | 86 | pci->ops->write_dbi(pci, base, reg, size, val); |
87 | return; | ||
88 | } | ||
89 | |||
90 | ret = dw_pcie_write(base + reg, size, val); | ||
91 | if (ret) | ||
92 | dev_err(pci->dev, "write DBI address failed\n"); | ||
78 | } | 93 | } |
79 | 94 | ||
80 | static u32 dw_pcie_readl_unroll(struct dw_pcie *pci, u32 index, u32 reg) | 95 | static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) |
81 | { | 96 | { |
82 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); | 97 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); |
83 | 98 | ||
84 | return dw_pcie_readl_dbi(pci, offset + reg); | 99 | return dw_pcie_readl_dbi(pci, offset + reg); |
85 | } | 100 | } |
86 | 101 | ||
87 | static void dw_pcie_writel_unroll(struct dw_pcie *pci, u32 index, u32 reg, | 102 | static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, |
88 | u32 val) | 103 | u32 val) |
89 | { | 104 | { |
90 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); | 105 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); |
91 | 106 | ||
92 | dw_pcie_writel_dbi(pci, offset + reg, val); | 107 | dw_pcie_writel_dbi(pci, offset + reg, val); |
93 | } | 108 | } |
94 | 109 | ||
110 | void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, int type, | ||
111 | u64 cpu_addr, u64 pci_addr, u32 size) | ||
112 | { | ||
113 | u32 retries, val; | ||
114 | |||
115 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, | ||
116 | lower_32_bits(cpu_addr)); | ||
117 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, | ||
118 | upper_32_bits(cpu_addr)); | ||
119 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, | ||
120 | lower_32_bits(cpu_addr + size - 1)); | ||
121 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, | ||
122 | lower_32_bits(pci_addr)); | ||
123 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, | ||
124 | upper_32_bits(pci_addr)); | ||
125 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, | ||
126 | type); | ||
127 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, | ||
128 | PCIE_ATU_ENABLE); | ||
129 | |||
130 | /* | ||
131 | * Make sure ATU enable takes effect before any subsequent config | ||
132 | * and I/O accesses. | ||
133 | */ | ||
134 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
135 | val = dw_pcie_readl_ob_unroll(pci, index, | ||
136 | PCIE_ATU_UNR_REGION_CTRL2); | ||
137 | if (val & PCIE_ATU_ENABLE) | ||
138 | return; | ||
139 | |||
140 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
141 | } | ||
142 | dev_err(pci->dev, "outbound iATU is not being enabled\n"); | ||
143 | } | ||
144 | |||
95 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, | 145 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, |
96 | u64 cpu_addr, u64 pci_addr, u32 size) | 146 | u64 cpu_addr, u64 pci_addr, u32 size) |
97 | { | 147 | { |
98 | u32 retries, val; | 148 | u32 retries, val; |
99 | 149 | ||
150 | if (pci->ops->cpu_addr_fixup) | ||
151 | cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr); | ||
152 | |||
100 | if (pci->iatu_unroll_enabled) { | 153 | if (pci->iatu_unroll_enabled) { |
101 | dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, | 154 | dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, |
102 | lower_32_bits(cpu_addr)); | 155 | pci_addr, size); |
103 | dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, | 156 | return; |
104 | upper_32_bits(cpu_addr)); | ||
105 | dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LIMIT, | ||
106 | lower_32_bits(cpu_addr + size - 1)); | ||
107 | dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, | ||
108 | lower_32_bits(pci_addr)); | ||
109 | dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, | ||
110 | upper_32_bits(pci_addr)); | ||
111 | dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, | ||
112 | type); | ||
113 | dw_pcie_writel_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, | ||
114 | PCIE_ATU_ENABLE); | ||
115 | } else { | ||
116 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, | ||
117 | PCIE_ATU_REGION_OUTBOUND | index); | ||
118 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, | ||
119 | lower_32_bits(cpu_addr)); | ||
120 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, | ||
121 | upper_32_bits(cpu_addr)); | ||
122 | dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, | ||
123 | lower_32_bits(cpu_addr + size - 1)); | ||
124 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, | ||
125 | lower_32_bits(pci_addr)); | ||
126 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, | ||
127 | upper_32_bits(pci_addr)); | ||
128 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); | ||
129 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); | ||
130 | } | 157 | } |
131 | 158 | ||
159 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, | ||
160 | PCIE_ATU_REGION_OUTBOUND | index); | ||
161 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, | ||
162 | lower_32_bits(cpu_addr)); | ||
163 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, | ||
164 | upper_32_bits(cpu_addr)); | ||
165 | dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, | ||
166 | lower_32_bits(cpu_addr + size - 1)); | ||
167 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, | ||
168 | lower_32_bits(pci_addr)); | ||
169 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, | ||
170 | upper_32_bits(pci_addr)); | ||
171 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); | ||
172 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); | ||
173 | |||
132 | /* | 174 | /* |
133 | * Make sure ATU enable takes effect before any subsequent config | 175 | * Make sure ATU enable takes effect before any subsequent config |
134 | * and I/O accesses. | 176 | * and I/O accesses. |
135 | */ | 177 | */ |
136 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | 178 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { |
137 | if (pci->iatu_unroll_enabled) | 179 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); |
138 | val = dw_pcie_readl_unroll(pci, index, | ||
139 | PCIE_ATU_UNR_REGION_CTRL2); | ||
140 | else | ||
141 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); | ||
142 | |||
143 | if (val == PCIE_ATU_ENABLE) | 180 | if (val == PCIE_ATU_ENABLE) |
144 | return; | 181 | return; |
145 | 182 | ||
146 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 183 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); |
147 | } | 184 | } |
148 | dev_err(pci->dev, "iATU is not being enabled\n"); | 185 | dev_err(pci->dev, "outbound iATU is not being enabled\n"); |
186 | } | ||
187 | |||
188 | static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) | ||
189 | { | ||
190 | u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); | ||
191 | |||
192 | return dw_pcie_readl_dbi(pci, offset + reg); | ||
193 | } | ||
194 | |||
195 | static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, | ||
196 | u32 val) | ||
197 | { | ||
198 | u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); | ||
199 | |||
200 | dw_pcie_writel_dbi(pci, offset + reg, val); | ||
201 | } | ||
202 | |||
203 | int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, int bar, | ||
204 | u64 cpu_addr, enum dw_pcie_as_type as_type) | ||
205 | { | ||
206 | int type; | ||
207 | u32 retries, val; | ||
208 | |||
209 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, | ||
210 | lower_32_bits(cpu_addr)); | ||
211 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, | ||
212 | upper_32_bits(cpu_addr)); | ||
213 | |||
214 | switch (as_type) { | ||
215 | case DW_PCIE_AS_MEM: | ||
216 | type = PCIE_ATU_TYPE_MEM; | ||
217 | break; | ||
218 | case DW_PCIE_AS_IO: | ||
219 | type = PCIE_ATU_TYPE_IO; | ||
220 | break; | ||
221 | default: | ||
222 | return -EINVAL; | ||
223 | } | ||
224 | |||
225 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); | ||
226 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, | ||
227 | PCIE_ATU_ENABLE | | ||
228 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); | ||
229 | |||
230 | /* | ||
231 | * Make sure ATU enable takes effect before any subsequent config | ||
232 | * and I/O accesses. | ||
233 | */ | ||
234 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
235 | val = dw_pcie_readl_ib_unroll(pci, index, | ||
236 | PCIE_ATU_UNR_REGION_CTRL2); | ||
237 | if (val & PCIE_ATU_ENABLE) | ||
238 | return 0; | ||
239 | |||
240 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
241 | } | ||
242 | dev_err(pci->dev, "inbound iATU is not being enabled\n"); | ||
243 | |||
244 | return -EBUSY; | ||
245 | } | ||
246 | |||
247 | int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, | ||
248 | u64 cpu_addr, enum dw_pcie_as_type as_type) | ||
249 | { | ||
250 | int type; | ||
251 | u32 retries, val; | ||
252 | |||
253 | if (pci->iatu_unroll_enabled) | ||
254 | return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, | ||
255 | cpu_addr, as_type); | ||
256 | |||
257 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | | ||
258 | index); | ||
259 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); | ||
260 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); | ||
261 | |||
262 | switch (as_type) { | ||
263 | case DW_PCIE_AS_MEM: | ||
264 | type = PCIE_ATU_TYPE_MEM; | ||
265 | break; | ||
266 | case DW_PCIE_AS_IO: | ||
267 | type = PCIE_ATU_TYPE_IO; | ||
268 | break; | ||
269 | default: | ||
270 | return -EINVAL; | ||
271 | } | ||
272 | |||
273 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); | ||
274 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE | ||
275 | | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); | ||
276 | |||
277 | /* | ||
278 | * Make sure ATU enable takes effect before any subsequent config | ||
279 | * and I/O accesses. | ||
280 | */ | ||
281 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
282 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); | ||
283 | if (val & PCIE_ATU_ENABLE) | ||
284 | return 0; | ||
285 | |||
286 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
287 | } | ||
288 | dev_err(pci->dev, "inbound iATU is not being enabled\n"); | ||
289 | |||
290 | return -EBUSY; | ||
291 | } | ||
292 | |||
293 | void dw_pcie_disable_atu(struct dw_pcie *pci, int index, | ||
294 | enum dw_pcie_region_type type) | ||
295 | { | ||
296 | int region; | ||
297 | |||
298 | switch (type) { | ||
299 | case DW_PCIE_REGION_INBOUND: | ||
300 | region = PCIE_ATU_REGION_INBOUND; | ||
301 | break; | ||
302 | case DW_PCIE_REGION_OUTBOUND: | ||
303 | region = PCIE_ATU_REGION_OUTBOUND; | ||
304 | break; | ||
305 | default: | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); | ||
310 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); | ||
149 | } | 311 | } |
150 | 312 | ||
151 | int dw_pcie_wait_for_link(struct dw_pcie *pci) | 313 | int dw_pcie_wait_for_link(struct dw_pcie *pci) |
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index cd3b8713fe50..c6a840575796 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h | |||
@@ -18,6 +18,9 @@ | |||
18 | #include <linux/msi.h> | 18 | #include <linux/msi.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | 20 | ||
21 | #include <linux/pci-epc.h> | ||
22 | #include <linux/pci-epf.h> | ||
23 | |||
21 | /* Parameters for the waiting for link up routine */ | 24 | /* Parameters for the waiting for link up routine */ |
22 | #define LINK_WAIT_MAX_RETRIES 10 | 25 | #define LINK_WAIT_MAX_RETRIES 10 |
23 | #define LINK_WAIT_USLEEP_MIN 90000 | 26 | #define LINK_WAIT_USLEEP_MIN 90000 |
@@ -89,6 +92,16 @@ | |||
89 | #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ | 92 | #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ |
90 | ((0x3 << 20) | ((region) << 9)) | 93 | ((0x3 << 20) | ((region) << 9)) |
91 | 94 | ||
95 | #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ | ||
96 | ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) | ||
97 | |||
98 | #define MSI_MESSAGE_CONTROL 0x52 | ||
99 | #define MSI_CAP_MMC_SHIFT 1 | ||
100 | #define MSI_CAP_MME_SHIFT 4 | ||
101 | #define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) | ||
102 | #define MSI_MESSAGE_ADDR_L32 0x54 | ||
103 | #define MSI_MESSAGE_ADDR_U32 0x58 | ||
104 | |||
92 | /* | 105 | /* |
93 | * Maximum number of MSI IRQs can be 256 per controller. But keep | 106 | * Maximum number of MSI IRQs can be 256 per controller. But keep |
94 | * it 32 as of now. Probably we will never need more than 32. If needed, | 107 | * it 32 as of now. Probably we will never need more than 32. If needed, |
@@ -99,6 +112,20 @@ | |||
99 | 112 | ||
100 | struct pcie_port; | 113 | struct pcie_port; |
101 | struct dw_pcie; | 114 | struct dw_pcie; |
115 | struct dw_pcie_ep; | ||
116 | |||
117 | enum dw_pcie_region_type { | ||
118 | DW_PCIE_REGION_UNKNOWN, | ||
119 | DW_PCIE_REGION_INBOUND, | ||
120 | DW_PCIE_REGION_OUTBOUND, | ||
121 | }; | ||
122 | |||
123 | enum dw_pcie_device_mode { | ||
124 | DW_PCIE_UNKNOWN_TYPE, | ||
125 | DW_PCIE_EP_TYPE, | ||
126 | DW_PCIE_LEG_EP_TYPE, | ||
127 | DW_PCIE_RC_TYPE, | ||
128 | }; | ||
102 | 129 | ||
103 | struct dw_pcie_host_ops { | 130 | struct dw_pcie_host_ops { |
104 | int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); | 131 | int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); |
@@ -142,35 +169,116 @@ struct pcie_port { | |||
142 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); | 169 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); |
143 | }; | 170 | }; |
144 | 171 | ||
172 | enum dw_pcie_as_type { | ||
173 | DW_PCIE_AS_UNKNOWN, | ||
174 | DW_PCIE_AS_MEM, | ||
175 | DW_PCIE_AS_IO, | ||
176 | }; | ||
177 | |||
178 | struct dw_pcie_ep_ops { | ||
179 | void (*ep_init)(struct dw_pcie_ep *ep); | ||
180 | int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type, | ||
181 | u8 interrupt_num); | ||
182 | }; | ||
183 | |||
184 | struct dw_pcie_ep { | ||
185 | struct pci_epc *epc; | ||
186 | struct dw_pcie_ep_ops *ops; | ||
187 | phys_addr_t phys_base; | ||
188 | size_t addr_size; | ||
189 | u8 bar_to_atu[6]; | ||
190 | phys_addr_t *outbound_addr; | ||
191 | unsigned long ib_window_map; | ||
192 | unsigned long ob_window_map; | ||
193 | u32 num_ib_windows; | ||
194 | u32 num_ob_windows; | ||
195 | }; | ||
196 | |||
145 | struct dw_pcie_ops { | 197 | struct dw_pcie_ops { |
146 | u32 (*readl_dbi)(struct dw_pcie *pcie, u32 reg); | 198 | u64 (*cpu_addr_fixup)(u64 cpu_addr); |
147 | void (*writel_dbi)(struct dw_pcie *pcie, u32 reg, u32 val); | 199 | u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, |
200 | size_t size); | ||
201 | void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
202 | size_t size, u32 val); | ||
148 | int (*link_up)(struct dw_pcie *pcie); | 203 | int (*link_up)(struct dw_pcie *pcie); |
204 | int (*start_link)(struct dw_pcie *pcie); | ||
205 | void (*stop_link)(struct dw_pcie *pcie); | ||
149 | }; | 206 | }; |
150 | 207 | ||
151 | struct dw_pcie { | 208 | struct dw_pcie { |
152 | struct device *dev; | 209 | struct device *dev; |
153 | void __iomem *dbi_base; | 210 | void __iomem *dbi_base; |
211 | void __iomem *dbi_base2; | ||
154 | u32 num_viewport; | 212 | u32 num_viewport; |
155 | u8 iatu_unroll_enabled; | 213 | u8 iatu_unroll_enabled; |
156 | struct pcie_port pp; | 214 | struct pcie_port pp; |
215 | struct dw_pcie_ep ep; | ||
157 | const struct dw_pcie_ops *ops; | 216 | const struct dw_pcie_ops *ops; |
158 | }; | 217 | }; |
159 | 218 | ||
160 | #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) | 219 | #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) |
161 | 220 | ||
221 | #define to_dw_pcie_from_ep(endpoint) \ | ||
222 | container_of((endpoint), struct dw_pcie, ep) | ||
223 | |||
162 | int dw_pcie_read(void __iomem *addr, int size, u32 *val); | 224 | int dw_pcie_read(void __iomem *addr, int size, u32 *val); |
163 | int dw_pcie_write(void __iomem *addr, int size, u32 val); | 225 | int dw_pcie_write(void __iomem *addr, int size, u32 val); |
164 | 226 | ||
165 | u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg); | 227 | u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, |
166 | void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val); | 228 | size_t size); |
229 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
230 | size_t size, u32 val); | ||
167 | int dw_pcie_link_up(struct dw_pcie *pci); | 231 | int dw_pcie_link_up(struct dw_pcie *pci); |
168 | int dw_pcie_wait_for_link(struct dw_pcie *pci); | 232 | int dw_pcie_wait_for_link(struct dw_pcie *pci); |
169 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, | 233 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, |
170 | int type, u64 cpu_addr, u64 pci_addr, | 234 | int type, u64 cpu_addr, u64 pci_addr, |
171 | u32 size); | 235 | u32 size); |
236 | int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, | ||
237 | u64 cpu_addr, enum dw_pcie_as_type as_type); | ||
238 | void dw_pcie_disable_atu(struct dw_pcie *pci, int index, | ||
239 | enum dw_pcie_region_type type); | ||
172 | void dw_pcie_setup(struct dw_pcie *pci); | 240 | void dw_pcie_setup(struct dw_pcie *pci); |
173 | 241 | ||
242 | static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) | ||
243 | { | ||
244 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); | ||
245 | } | ||
246 | |||
247 | static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) | ||
248 | { | ||
249 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); | ||
250 | } | ||
251 | |||
252 | static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) | ||
253 | { | ||
254 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); | ||
255 | } | ||
256 | |||
257 | static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) | ||
258 | { | ||
259 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); | ||
260 | } | ||
261 | |||
262 | static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) | ||
263 | { | ||
264 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); | ||
265 | } | ||
266 | |||
267 | static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) | ||
268 | { | ||
269 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); | ||
270 | } | ||
271 | |||
272 | static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) | ||
273 | { | ||
274 | __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); | ||
275 | } | ||
276 | |||
277 | static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) | ||
278 | { | ||
279 | return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); | ||
280 | } | ||
281 | |||
174 | #ifdef CONFIG_PCIE_DW_HOST | 282 | #ifdef CONFIG_PCIE_DW_HOST |
175 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); | 283 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); |
176 | void dw_pcie_msi_init(struct pcie_port *pp); | 284 | void dw_pcie_msi_init(struct pcie_port *pp); |
@@ -195,4 +303,23 @@ static inline int dw_pcie_host_init(struct pcie_port *pp) | |||
195 | return 0; | 303 | return 0; |
196 | } | 304 | } |
197 | #endif | 305 | #endif |
306 | |||
307 | #ifdef CONFIG_PCIE_DW_EP | ||
308 | void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); | ||
309 | int dw_pcie_ep_init(struct dw_pcie_ep *ep); | ||
310 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep); | ||
311 | #else | ||
312 | static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) | ||
313 | { | ||
314 | } | ||
315 | |||
316 | static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) | ||
317 | { | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | ||
322 | { | ||
323 | } | ||
324 | #endif | ||
198 | #endif /* _PCIE_DESIGNWARE_H */ | 325 | #endif /* _PCIE_DESIGNWARE_H */ |
diff --git a/drivers/pci/dwc/pcie-hisi.c b/drivers/pci/dwc/pcie-hisi.c index cf9d6a9d9fd4..e51acee0ddf3 100644 --- a/drivers/pci/dwc/pcie-hisi.c +++ b/drivers/pci/dwc/pcie-hisi.c | |||
@@ -99,7 +99,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg) | |||
99 | return -ENOMEM; | 99 | return -ENOMEM; |
100 | } | 100 | } |
101 | 101 | ||
102 | reg_base = devm_ioremap(dev, res->start, resource_size(res)); | 102 | reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); |
103 | if (!reg_base) | 103 | if (!reg_base) |
104 | return -ENOMEM; | 104 | return -ENOMEM; |
105 | 105 | ||
@@ -296,10 +296,9 @@ static int hisi_pcie_probe(struct platform_device *pdev) | |||
296 | } | 296 | } |
297 | 297 | ||
298 | reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); | 298 | reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); |
299 | pci->dbi_base = devm_ioremap_resource(dev, reg); | 299 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); |
300 | if (IS_ERR(pci->dbi_base)) | 300 | if (IS_ERR(pci->dbi_base)) |
301 | return PTR_ERR(pci->dbi_base); | 301 | return PTR_ERR(pci->dbi_base); |
302 | |||
303 | platform_set_drvdata(pdev, hisi_pcie); | 302 | platform_set_drvdata(pdev, hisi_pcie); |
304 | 303 | ||
305 | ret = hisi_add_pcie_port(hisi_pcie, pdev); | 304 | ret = hisi_add_pcie_port(hisi_pcie, pdev); |
@@ -334,6 +333,7 @@ static struct platform_driver hisi_pcie_driver = { | |||
334 | .driver = { | 333 | .driver = { |
335 | .name = "hisi-pcie", | 334 | .name = "hisi-pcie", |
336 | .of_match_table = hisi_pcie_of_match, | 335 | .of_match_table = hisi_pcie_of_match, |
336 | .suppress_bind_attrs = true, | ||
337 | }, | 337 | }, |
338 | }; | 338 | }; |
339 | builtin_platform_driver(hisi_pcie_driver); | 339 | builtin_platform_driver(hisi_pcie_driver); |
@@ -360,7 +360,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg) | |||
360 | return -EINVAL; | 360 | return -EINVAL; |
361 | } | 361 | } |
362 | 362 | ||
363 | reg_base = devm_ioremap(dev, res->start, resource_size(res)); | 363 | reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); |
364 | if (!reg_base) | 364 | if (!reg_base) |
365 | return -ENOMEM; | 365 | return -ENOMEM; |
366 | 366 | ||
@@ -395,6 +395,7 @@ static struct platform_driver hisi_pcie_almost_ecam_driver = { | |||
395 | .driver = { | 395 | .driver = { |
396 | .name = "hisi-pcie-almost-ecam", | 396 | .name = "hisi-pcie-almost-ecam", |
397 | .of_match_table = hisi_pcie_almost_ecam_of_match, | 397 | .of_match_table = hisi_pcie_almost_ecam_of_match, |
398 | .suppress_bind_attrs = true, | ||
398 | }, | 399 | }, |
399 | }; | 400 | }; |
400 | builtin_platform_driver(hisi_pcie_almost_ecam_driver); | 401 | builtin_platform_driver(hisi_pcie_almost_ecam_driver); |
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index 67eb7f5926dd..5bf23d432fdb 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c | |||
@@ -700,7 +700,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) | |||
700 | return PTR_ERR(pcie->parf); | 700 | return PTR_ERR(pcie->parf); |
701 | 701 | ||
702 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | 702 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); |
703 | pci->dbi_base = devm_ioremap_resource(dev, res); | 703 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); |
704 | if (IS_ERR(pci->dbi_base)) | 704 | if (IS_ERR(pci->dbi_base)) |
705 | return PTR_ERR(pci->dbi_base); | 705 | return PTR_ERR(pci->dbi_base); |
706 | 706 | ||
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index eaa4ea8e2ea4..8ff36b3dbbdf 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c | |||
@@ -273,7 +273,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) | |||
273 | } | 273 | } |
274 | 274 | ||
275 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | 275 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); |
276 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); | 276 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); |
277 | if (IS_ERR(pci->dbi_base)) { | 277 | if (IS_ERR(pci->dbi_base)) { |
278 | dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); | 278 | dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); |
279 | ret = PTR_ERR(pci->dbi_base); | 279 | ret = PTR_ERR(pci->dbi_base); |
@@ -308,6 +308,7 @@ static struct platform_driver spear13xx_pcie_driver = { | |||
308 | .driver = { | 308 | .driver = { |
309 | .name = "spear-pcie", | 309 | .name = "spear-pcie", |
310 | .of_match_table = of_match_ptr(spear13xx_pcie_of_match), | 310 | .of_match_table = of_match_ptr(spear13xx_pcie_of_match), |
311 | .suppress_bind_attrs = true, | ||
311 | }, | 312 | }, |
312 | }; | 313 | }; |
313 | 314 | ||
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c index 2fee61bb6559..c228a2eb7faa 100644 --- a/drivers/pci/ecam.c +++ b/drivers/pci/ecam.c | |||
@@ -84,12 +84,14 @@ struct pci_config_window *pci_ecam_create(struct device *dev, | |||
84 | if (!cfg->winp) | 84 | if (!cfg->winp) |
85 | goto err_exit_malloc; | 85 | goto err_exit_malloc; |
86 | for (i = 0; i < bus_range; i++) { | 86 | for (i = 0; i < bus_range; i++) { |
87 | cfg->winp[i] = ioremap(cfgres->start + i * bsz, bsz); | 87 | cfg->winp[i] = |
88 | pci_remap_cfgspace(cfgres->start + i * bsz, | ||
89 | bsz); | ||
88 | if (!cfg->winp[i]) | 90 | if (!cfg->winp[i]) |
89 | goto err_exit_iomap; | 91 | goto err_exit_iomap; |
90 | } | 92 | } |
91 | } else { | 93 | } else { |
92 | cfg->win = ioremap(cfgres->start, bus_range * bsz); | 94 | cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz); |
93 | if (!cfg->win) | 95 | if (!cfg->win) |
94 | goto err_exit_iomap; | 96 | goto err_exit_iomap; |
95 | } | 97 | } |
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig new file mode 100644 index 000000000000..c23f146fb5a6 --- /dev/null +++ b/drivers/pci/endpoint/Kconfig | |||
@@ -0,0 +1,31 @@ | |||
1 | # | ||
2 | # PCI Endpoint Support | ||
3 | # | ||
4 | |||
5 | menu "PCI Endpoint" | ||
6 | |||
7 | config PCI_ENDPOINT | ||
8 | bool "PCI Endpoint Support" | ||
9 | help | ||
10 | Enable this configuration option to support configurable PCI | ||
11 | endpoint. This should be enabled if the platform has a PCI | ||
12 | controller that can operate in endpoint mode. | ||
13 | |||
14 | Enabling this option will build the endpoint library, which | ||
15 | includes endpoint controller library and endpoint function | ||
16 | library. | ||
17 | |||
18 | If in doubt, say "N" to disable Endpoint support. | ||
19 | |||
20 | config PCI_ENDPOINT_CONFIGFS | ||
21 | bool "PCI Endpoint Configfs Support" | ||
22 | depends on PCI_ENDPOINT | ||
23 | select CONFIGFS_FS | ||
24 | help | ||
25 | This will enable the configfs entry that can be used to | ||
26 | configure the endpoint function and used to bind the | ||
27 | function with a endpoint controller. | ||
28 | |||
29 | source "drivers/pci/endpoint/functions/Kconfig" | ||
30 | |||
31 | endmenu | ||
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile new file mode 100644 index 000000000000..1041f80a4645 --- /dev/null +++ b/drivers/pci/endpoint/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for PCI Endpoint Support | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o | ||
6 | obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\ | ||
7 | pci-epc-mem.o functions/ | ||
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig new file mode 100644 index 000000000000..175edad42d2f --- /dev/null +++ b/drivers/pci/endpoint/functions/Kconfig | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # PCI Endpoint Functions | ||
3 | # | ||
4 | |||
5 | config PCI_EPF_TEST | ||
6 | tristate "PCI Endpoint Test driver" | ||
7 | depends on PCI_ENDPOINT | ||
8 | help | ||
9 | Enable this configuration option to enable the test driver | ||
10 | for PCI Endpoint. | ||
11 | |||
12 | If in doubt, say "N" to disable Endpoint test driver. | ||
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile new file mode 100644 index 000000000000..6d94a4801838 --- /dev/null +++ b/drivers/pci/endpoint/functions/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for PCI Endpoint Functions | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o | ||
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c new file mode 100644 index 000000000000..53fff8030337 --- /dev/null +++ b/drivers/pci/endpoint/functions/pci-epf-test.c | |||
@@ -0,0 +1,510 @@ | |||
1 | /** | ||
2 | * Test driver to test endpoint functionality | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/crc32.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/pci_ids.h> | ||
26 | #include <linux/random.h> | ||
27 | |||
28 | #include <linux/pci-epc.h> | ||
29 | #include <linux/pci-epf.h> | ||
30 | #include <linux/pci_regs.h> | ||
31 | |||
32 | #define COMMAND_RAISE_LEGACY_IRQ BIT(0) | ||
33 | #define COMMAND_RAISE_MSI_IRQ BIT(1) | ||
34 | #define MSI_NUMBER_SHIFT 2 | ||
35 | #define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT) | ||
36 | #define COMMAND_READ BIT(8) | ||
37 | #define COMMAND_WRITE BIT(9) | ||
38 | #define COMMAND_COPY BIT(10) | ||
39 | |||
40 | #define STATUS_READ_SUCCESS BIT(0) | ||
41 | #define STATUS_READ_FAIL BIT(1) | ||
42 | #define STATUS_WRITE_SUCCESS BIT(2) | ||
43 | #define STATUS_WRITE_FAIL BIT(3) | ||
44 | #define STATUS_COPY_SUCCESS BIT(4) | ||
45 | #define STATUS_COPY_FAIL BIT(5) | ||
46 | #define STATUS_IRQ_RAISED BIT(6) | ||
47 | #define STATUS_SRC_ADDR_INVALID BIT(7) | ||
48 | #define STATUS_DST_ADDR_INVALID BIT(8) | ||
49 | |||
50 | #define TIMER_RESOLUTION 1 | ||
51 | |||
52 | static struct workqueue_struct *kpcitest_workqueue; | ||
53 | |||
54 | struct pci_epf_test { | ||
55 | void *reg[6]; | ||
56 | struct pci_epf *epf; | ||
57 | struct delayed_work cmd_handler; | ||
58 | }; | ||
59 | |||
60 | struct pci_epf_test_reg { | ||
61 | u32 magic; | ||
62 | u32 command; | ||
63 | u32 status; | ||
64 | u64 src_addr; | ||
65 | u64 dst_addr; | ||
66 | u32 size; | ||
67 | u32 checksum; | ||
68 | } __packed; | ||
69 | |||
70 | static struct pci_epf_header test_header = { | ||
71 | .vendorid = PCI_ANY_ID, | ||
72 | .deviceid = PCI_ANY_ID, | ||
73 | .baseclass_code = PCI_CLASS_OTHERS, | ||
74 | .interrupt_pin = PCI_INTERRUPT_INTA, | ||
75 | }; | ||
76 | |||
77 | static int bar_size[] = { 512, 1024, 16384, 131072, 1048576 }; | ||
78 | |||
79 | static int pci_epf_test_copy(struct pci_epf_test *epf_test) | ||
80 | { | ||
81 | int ret; | ||
82 | void __iomem *src_addr; | ||
83 | void __iomem *dst_addr; | ||
84 | phys_addr_t src_phys_addr; | ||
85 | phys_addr_t dst_phys_addr; | ||
86 | struct pci_epf *epf = epf_test->epf; | ||
87 | struct device *dev = &epf->dev; | ||
88 | struct pci_epc *epc = epf->epc; | ||
89 | struct pci_epf_test_reg *reg = epf_test->reg[0]; | ||
90 | |||
91 | src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); | ||
92 | if (!src_addr) { | ||
93 | dev_err(dev, "failed to allocate source address\n"); | ||
94 | reg->status = STATUS_SRC_ADDR_INVALID; | ||
95 | ret = -ENOMEM; | ||
96 | goto err; | ||
97 | } | ||
98 | |||
99 | ret = pci_epc_map_addr(epc, src_phys_addr, reg->src_addr, reg->size); | ||
100 | if (ret) { | ||
101 | dev_err(dev, "failed to map source address\n"); | ||
102 | reg->status = STATUS_SRC_ADDR_INVALID; | ||
103 | goto err_src_addr; | ||
104 | } | ||
105 | |||
106 | dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); | ||
107 | if (!dst_addr) { | ||
108 | dev_err(dev, "failed to allocate destination address\n"); | ||
109 | reg->status = STATUS_DST_ADDR_INVALID; | ||
110 | ret = -ENOMEM; | ||
111 | goto err_src_map_addr; | ||
112 | } | ||
113 | |||
114 | ret = pci_epc_map_addr(epc, dst_phys_addr, reg->dst_addr, reg->size); | ||
115 | if (ret) { | ||
116 | dev_err(dev, "failed to map destination address\n"); | ||
117 | reg->status = STATUS_DST_ADDR_INVALID; | ||
118 | goto err_dst_addr; | ||
119 | } | ||
120 | |||
121 | memcpy(dst_addr, src_addr, reg->size); | ||
122 | |||
123 | pci_epc_unmap_addr(epc, dst_phys_addr); | ||
124 | |||
125 | err_dst_addr: | ||
126 | pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); | ||
127 | |||
128 | err_src_map_addr: | ||
129 | pci_epc_unmap_addr(epc, src_phys_addr); | ||
130 | |||
131 | err_src_addr: | ||
132 | pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); | ||
133 | |||
134 | err: | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | static int pci_epf_test_read(struct pci_epf_test *epf_test) | ||
139 | { | ||
140 | int ret; | ||
141 | void __iomem *src_addr; | ||
142 | void *buf; | ||
143 | u32 crc32; | ||
144 | phys_addr_t phys_addr; | ||
145 | struct pci_epf *epf = epf_test->epf; | ||
146 | struct device *dev = &epf->dev; | ||
147 | struct pci_epc *epc = epf->epc; | ||
148 | struct pci_epf_test_reg *reg = epf_test->reg[0]; | ||
149 | |||
150 | src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); | ||
151 | if (!src_addr) { | ||
152 | dev_err(dev, "failed to allocate address\n"); | ||
153 | reg->status = STATUS_SRC_ADDR_INVALID; | ||
154 | ret = -ENOMEM; | ||
155 | goto err; | ||
156 | } | ||
157 | |||
158 | ret = pci_epc_map_addr(epc, phys_addr, reg->src_addr, reg->size); | ||
159 | if (ret) { | ||
160 | dev_err(dev, "failed to map address\n"); | ||
161 | reg->status = STATUS_SRC_ADDR_INVALID; | ||
162 | goto err_addr; | ||
163 | } | ||
164 | |||
165 | buf = kzalloc(reg->size, GFP_KERNEL); | ||
166 | if (!buf) { | ||
167 | ret = -ENOMEM; | ||
168 | goto err_map_addr; | ||
169 | } | ||
170 | |||
171 | memcpy(buf, src_addr, reg->size); | ||
172 | |||
173 | crc32 = crc32_le(~0, buf, reg->size); | ||
174 | if (crc32 != reg->checksum) | ||
175 | ret = -EIO; | ||
176 | |||
177 | kfree(buf); | ||
178 | |||
179 | err_map_addr: | ||
180 | pci_epc_unmap_addr(epc, phys_addr); | ||
181 | |||
182 | err_addr: | ||
183 | pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); | ||
184 | |||
185 | err: | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static int pci_epf_test_write(struct pci_epf_test *epf_test) | ||
190 | { | ||
191 | int ret; | ||
192 | void __iomem *dst_addr; | ||
193 | void *buf; | ||
194 | phys_addr_t phys_addr; | ||
195 | struct pci_epf *epf = epf_test->epf; | ||
196 | struct device *dev = &epf->dev; | ||
197 | struct pci_epc *epc = epf->epc; | ||
198 | struct pci_epf_test_reg *reg = epf_test->reg[0]; | ||
199 | |||
200 | dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); | ||
201 | if (!dst_addr) { | ||
202 | dev_err(dev, "failed to allocate address\n"); | ||
203 | reg->status = STATUS_DST_ADDR_INVALID; | ||
204 | ret = -ENOMEM; | ||
205 | goto err; | ||
206 | } | ||
207 | |||
208 | ret = pci_epc_map_addr(epc, phys_addr, reg->dst_addr, reg->size); | ||
209 | if (ret) { | ||
210 | dev_err(dev, "failed to map address\n"); | ||
211 | reg->status = STATUS_DST_ADDR_INVALID; | ||
212 | goto err_addr; | ||
213 | } | ||
214 | |||
215 | buf = kzalloc(reg->size, GFP_KERNEL); | ||
216 | if (!buf) { | ||
217 | ret = -ENOMEM; | ||
218 | goto err_map_addr; | ||
219 | } | ||
220 | |||
221 | get_random_bytes(buf, reg->size); | ||
222 | reg->checksum = crc32_le(~0, buf, reg->size); | ||
223 | |||
224 | memcpy(dst_addr, buf, reg->size); | ||
225 | |||
226 | /* | ||
227 | * wait 1ms inorder for the write to complete. Without this delay L3 | ||
228 | * error in observed in the host system. | ||
229 | */ | ||
230 | mdelay(1); | ||
231 | |||
232 | kfree(buf); | ||
233 | |||
234 | err_map_addr: | ||
235 | pci_epc_unmap_addr(epc, phys_addr); | ||
236 | |||
237 | err_addr: | ||
238 | pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); | ||
239 | |||
240 | err: | ||
241 | return ret; | ||
242 | } | ||
243 | |||
244 | static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) | ||
245 | { | ||
246 | u8 irq; | ||
247 | u8 msi_count; | ||
248 | struct pci_epf *epf = epf_test->epf; | ||
249 | struct pci_epc *epc = epf->epc; | ||
250 | struct pci_epf_test_reg *reg = epf_test->reg[0]; | ||
251 | |||
252 | reg->status |= STATUS_IRQ_RAISED; | ||
253 | msi_count = pci_epc_get_msi(epc); | ||
254 | irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; | ||
255 | if (irq > msi_count || msi_count <= 0) | ||
256 | pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); | ||
257 | else | ||
258 | pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq); | ||
259 | } | ||
260 | |||
261 | static void pci_epf_test_cmd_handler(struct work_struct *work) | ||
262 | { | ||
263 | int ret; | ||
264 | u8 irq; | ||
265 | u8 msi_count; | ||
266 | struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, | ||
267 | cmd_handler.work); | ||
268 | struct pci_epf *epf = epf_test->epf; | ||
269 | struct pci_epc *epc = epf->epc; | ||
270 | struct pci_epf_test_reg *reg = epf_test->reg[0]; | ||
271 | |||
272 | if (!reg->command) | ||
273 | goto reset_handler; | ||
274 | |||
275 | if (reg->command & COMMAND_RAISE_LEGACY_IRQ) { | ||
276 | reg->status = STATUS_IRQ_RAISED; | ||
277 | pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); | ||
278 | goto reset_handler; | ||
279 | } | ||
280 | |||
281 | if (reg->command & COMMAND_WRITE) { | ||
282 | ret = pci_epf_test_write(epf_test); | ||
283 | if (ret) | ||
284 | reg->status |= STATUS_WRITE_FAIL; | ||
285 | else | ||
286 | reg->status |= STATUS_WRITE_SUCCESS; | ||
287 | pci_epf_test_raise_irq(epf_test); | ||
288 | goto reset_handler; | ||
289 | } | ||
290 | |||
291 | if (reg->command & COMMAND_READ) { | ||
292 | ret = pci_epf_test_read(epf_test); | ||
293 | if (!ret) | ||
294 | reg->status |= STATUS_READ_SUCCESS; | ||
295 | else | ||
296 | reg->status |= STATUS_READ_FAIL; | ||
297 | pci_epf_test_raise_irq(epf_test); | ||
298 | goto reset_handler; | ||
299 | } | ||
300 | |||
301 | if (reg->command & COMMAND_COPY) { | ||
302 | ret = pci_epf_test_copy(epf_test); | ||
303 | if (!ret) | ||
304 | reg->status |= STATUS_COPY_SUCCESS; | ||
305 | else | ||
306 | reg->status |= STATUS_COPY_FAIL; | ||
307 | pci_epf_test_raise_irq(epf_test); | ||
308 | goto reset_handler; | ||
309 | } | ||
310 | |||
311 | if (reg->command & COMMAND_RAISE_MSI_IRQ) { | ||
312 | msi_count = pci_epc_get_msi(epc); | ||
313 | irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT; | ||
314 | if (irq > msi_count || msi_count <= 0) | ||
315 | goto reset_handler; | ||
316 | reg->status = STATUS_IRQ_RAISED; | ||
317 | pci_epc_raise_irq(epc, PCI_EPC_IRQ_MSI, irq); | ||
318 | goto reset_handler; | ||
319 | } | ||
320 | |||
321 | reset_handler: | ||
322 | reg->command = 0; | ||
323 | |||
324 | queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, | ||
325 | msecs_to_jiffies(1)); | ||
326 | } | ||
327 | |||
328 | static void pci_epf_test_linkup(struct pci_epf *epf) | ||
329 | { | ||
330 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); | ||
331 | |||
332 | queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, | ||
333 | msecs_to_jiffies(1)); | ||
334 | } | ||
335 | |||
336 | static void pci_epf_test_unbind(struct pci_epf *epf) | ||
337 | { | ||
338 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); | ||
339 | struct pci_epc *epc = epf->epc; | ||
340 | int bar; | ||
341 | |||
342 | cancel_delayed_work(&epf_test->cmd_handler); | ||
343 | pci_epc_stop(epc); | ||
344 | for (bar = BAR_0; bar <= BAR_5; bar++) { | ||
345 | if (epf_test->reg[bar]) { | ||
346 | pci_epf_free_space(epf, epf_test->reg[bar], bar); | ||
347 | pci_epc_clear_bar(epc, bar); | ||
348 | } | ||
349 | } | ||
350 | } | ||
351 | |||
352 | static int pci_epf_test_set_bar(struct pci_epf *epf) | ||
353 | { | ||
354 | int flags; | ||
355 | int bar; | ||
356 | int ret; | ||
357 | struct pci_epf_bar *epf_bar; | ||
358 | struct pci_epc *epc = epf->epc; | ||
359 | struct device *dev = &epf->dev; | ||
360 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); | ||
361 | |||
362 | flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32; | ||
363 | if (sizeof(dma_addr_t) == 0x8) | ||
364 | flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; | ||
365 | |||
366 | for (bar = BAR_0; bar <= BAR_5; bar++) { | ||
367 | epf_bar = &epf->bar[bar]; | ||
368 | ret = pci_epc_set_bar(epc, bar, epf_bar->phys_addr, | ||
369 | epf_bar->size, flags); | ||
370 | if (ret) { | ||
371 | pci_epf_free_space(epf, epf_test->reg[bar], bar); | ||
372 | dev_err(dev, "failed to set BAR%d\n", bar); | ||
373 | if (bar == BAR_0) | ||
374 | return ret; | ||
375 | } | ||
376 | } | ||
377 | |||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static int pci_epf_test_alloc_space(struct pci_epf *epf) | ||
382 | { | ||
383 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); | ||
384 | struct device *dev = &epf->dev; | ||
385 | void *base; | ||
386 | int bar; | ||
387 | |||
388 | base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), | ||
389 | BAR_0); | ||
390 | if (!base) { | ||
391 | dev_err(dev, "failed to allocated register space\n"); | ||
392 | return -ENOMEM; | ||
393 | } | ||
394 | epf_test->reg[0] = base; | ||
395 | |||
396 | for (bar = BAR_1; bar <= BAR_5; bar++) { | ||
397 | base = pci_epf_alloc_space(epf, bar_size[bar - 1], bar); | ||
398 | if (!base) | ||
399 | dev_err(dev, "failed to allocate space for BAR%d\n", | ||
400 | bar); | ||
401 | epf_test->reg[bar] = base; | ||
402 | } | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | static int pci_epf_test_bind(struct pci_epf *epf) | ||
408 | { | ||
409 | int ret; | ||
410 | struct pci_epf_header *header = epf->header; | ||
411 | struct pci_epc *epc = epf->epc; | ||
412 | struct device *dev = &epf->dev; | ||
413 | |||
414 | if (WARN_ON_ONCE(!epc)) | ||
415 | return -EINVAL; | ||
416 | |||
417 | ret = pci_epc_write_header(epc, header); | ||
418 | if (ret) { | ||
419 | dev_err(dev, "configuration header write failed\n"); | ||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | ret = pci_epf_test_alloc_space(epf); | ||
424 | if (ret) | ||
425 | return ret; | ||
426 | |||
427 | ret = pci_epf_test_set_bar(epf); | ||
428 | if (ret) | ||
429 | return ret; | ||
430 | |||
431 | ret = pci_epc_set_msi(epc, epf->msi_interrupts); | ||
432 | if (ret) | ||
433 | return ret; | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int pci_epf_test_probe(struct pci_epf *epf) | ||
439 | { | ||
440 | struct pci_epf_test *epf_test; | ||
441 | struct device *dev = &epf->dev; | ||
442 | |||
443 | epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); | ||
444 | if (!epf_test) | ||
445 | return -ENOMEM; | ||
446 | |||
447 | epf->header = &test_header; | ||
448 | epf_test->epf = epf; | ||
449 | |||
450 | INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); | ||
451 | |||
452 | epf_set_drvdata(epf, epf_test); | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int pci_epf_test_remove(struct pci_epf *epf) | ||
457 | { | ||
458 | struct pci_epf_test *epf_test = epf_get_drvdata(epf); | ||
459 | |||
460 | kfree(epf_test); | ||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | static struct pci_epf_ops ops = { | ||
465 | .unbind = pci_epf_test_unbind, | ||
466 | .bind = pci_epf_test_bind, | ||
467 | .linkup = pci_epf_test_linkup, | ||
468 | }; | ||
469 | |||
470 | static const struct pci_epf_device_id pci_epf_test_ids[] = { | ||
471 | { | ||
472 | .name = "pci_epf_test", | ||
473 | }, | ||
474 | {}, | ||
475 | }; | ||
476 | |||
477 | static struct pci_epf_driver test_driver = { | ||
478 | .driver.name = "pci_epf_test", | ||
479 | .probe = pci_epf_test_probe, | ||
480 | .remove = pci_epf_test_remove, | ||
481 | .id_table = pci_epf_test_ids, | ||
482 | .ops = &ops, | ||
483 | .owner = THIS_MODULE, | ||
484 | }; | ||
485 | |||
486 | static int __init pci_epf_test_init(void) | ||
487 | { | ||
488 | int ret; | ||
489 | |||
490 | kpcitest_workqueue = alloc_workqueue("kpcitest", | ||
491 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | ||
492 | ret = pci_epf_register_driver(&test_driver); | ||
493 | if (ret) { | ||
494 | pr_err("failed to register pci epf test driver --> %d\n", ret); | ||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | return 0; | ||
499 | } | ||
500 | module_init(pci_epf_test_init); | ||
501 | |||
502 | static void __exit pci_epf_test_exit(void) | ||
503 | { | ||
504 | pci_epf_unregister_driver(&test_driver); | ||
505 | } | ||
506 | module_exit(pci_epf_test_exit); | ||
507 | |||
508 | MODULE_DESCRIPTION("PCI EPF TEST DRIVER"); | ||
509 | MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); | ||
510 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c new file mode 100644 index 000000000000..424fdd6ed1ca --- /dev/null +++ b/drivers/pci/endpoint/pci-ep-cfs.c | |||
@@ -0,0 +1,509 @@ | |||
1 | /** | ||
2 | * configfs to configure the PCI endpoint | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #include <linux/pci-epc.h> | ||
24 | #include <linux/pci-epf.h> | ||
25 | #include <linux/pci-ep-cfs.h> | ||
26 | |||
27 | static struct config_group *functions_group; | ||
28 | static struct config_group *controllers_group; | ||
29 | |||
30 | struct pci_epf_group { | ||
31 | struct config_group group; | ||
32 | struct pci_epf *epf; | ||
33 | }; | ||
34 | |||
35 | struct pci_epc_group { | ||
36 | struct config_group group; | ||
37 | struct pci_epc *epc; | ||
38 | bool start; | ||
39 | unsigned long function_num_map; | ||
40 | }; | ||
41 | |||
42 | static inline struct pci_epf_group *to_pci_epf_group(struct config_item *item) | ||
43 | { | ||
44 | return container_of(to_config_group(item), struct pci_epf_group, group); | ||
45 | } | ||
46 | |||
47 | static inline struct pci_epc_group *to_pci_epc_group(struct config_item *item) | ||
48 | { | ||
49 | return container_of(to_config_group(item), struct pci_epc_group, group); | ||
50 | } | ||
51 | |||
52 | static ssize_t pci_epc_start_store(struct config_item *item, const char *page, | ||
53 | size_t len) | ||
54 | { | ||
55 | int ret; | ||
56 | bool start; | ||
57 | struct pci_epc *epc; | ||
58 | struct pci_epc_group *epc_group = to_pci_epc_group(item); | ||
59 | |||
60 | epc = epc_group->epc; | ||
61 | |||
62 | ret = kstrtobool(page, &start); | ||
63 | if (ret) | ||
64 | return ret; | ||
65 | |||
66 | if (!start) { | ||
67 | pci_epc_stop(epc); | ||
68 | return len; | ||
69 | } | ||
70 | |||
71 | ret = pci_epc_start(epc); | ||
72 | if (ret) { | ||
73 | dev_err(&epc->dev, "failed to start endpoint controller\n"); | ||
74 | return -EINVAL; | ||
75 | } | ||
76 | |||
77 | epc_group->start = start; | ||
78 | |||
79 | return len; | ||
80 | } | ||
81 | |||
82 | static ssize_t pci_epc_start_show(struct config_item *item, char *page) | ||
83 | { | ||
84 | return sprintf(page, "%d\n", | ||
85 | to_pci_epc_group(item)->start); | ||
86 | } | ||
87 | |||
88 | CONFIGFS_ATTR(pci_epc_, start); | ||
89 | |||
90 | static struct configfs_attribute *pci_epc_attrs[] = { | ||
91 | &pci_epc_attr_start, | ||
92 | NULL, | ||
93 | }; | ||
94 | |||
95 | static int pci_epc_epf_link(struct config_item *epc_item, | ||
96 | struct config_item *epf_item) | ||
97 | { | ||
98 | int ret; | ||
99 | u32 func_no = 0; | ||
100 | struct pci_epc *epc; | ||
101 | struct pci_epf *epf; | ||
102 | struct pci_epf_group *epf_group = to_pci_epf_group(epf_item); | ||
103 | struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); | ||
104 | |||
105 | epc = epc_group->epc; | ||
106 | epf = epf_group->epf; | ||
107 | ret = pci_epc_add_epf(epc, epf); | ||
108 | if (ret) | ||
109 | goto err_add_epf; | ||
110 | |||
111 | func_no = find_first_zero_bit(&epc_group->function_num_map, | ||
112 | sizeof(epc_group->function_num_map)); | ||
113 | set_bit(func_no, &epc_group->function_num_map); | ||
114 | epf->func_no = func_no; | ||
115 | |||
116 | ret = pci_epf_bind(epf); | ||
117 | if (ret) | ||
118 | goto err_epf_bind; | ||
119 | |||
120 | return 0; | ||
121 | |||
122 | err_epf_bind: | ||
123 | pci_epc_remove_epf(epc, epf); | ||
124 | |||
125 | err_add_epf: | ||
126 | clear_bit(func_no, &epc_group->function_num_map); | ||
127 | |||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static void pci_epc_epf_unlink(struct config_item *epc_item, | ||
132 | struct config_item *epf_item) | ||
133 | { | ||
134 | struct pci_epc *epc; | ||
135 | struct pci_epf *epf; | ||
136 | struct pci_epf_group *epf_group = to_pci_epf_group(epf_item); | ||
137 | struct pci_epc_group *epc_group = to_pci_epc_group(epc_item); | ||
138 | |||
139 | WARN_ON_ONCE(epc_group->start); | ||
140 | |||
141 | epc = epc_group->epc; | ||
142 | epf = epf_group->epf; | ||
143 | clear_bit(epf->func_no, &epc_group->function_num_map); | ||
144 | pci_epf_unbind(epf); | ||
145 | pci_epc_remove_epf(epc, epf); | ||
146 | } | ||
147 | |||
148 | static struct configfs_item_operations pci_epc_item_ops = { | ||
149 | .allow_link = pci_epc_epf_link, | ||
150 | .drop_link = pci_epc_epf_unlink, | ||
151 | }; | ||
152 | |||
153 | static struct config_item_type pci_epc_type = { | ||
154 | .ct_item_ops = &pci_epc_item_ops, | ||
155 | .ct_attrs = pci_epc_attrs, | ||
156 | .ct_owner = THIS_MODULE, | ||
157 | }; | ||
158 | |||
159 | struct config_group *pci_ep_cfs_add_epc_group(const char *name) | ||
160 | { | ||
161 | int ret; | ||
162 | struct pci_epc *epc; | ||
163 | struct config_group *group; | ||
164 | struct pci_epc_group *epc_group; | ||
165 | |||
166 | epc_group = kzalloc(sizeof(*epc_group), GFP_KERNEL); | ||
167 | if (!epc_group) { | ||
168 | ret = -ENOMEM; | ||
169 | goto err; | ||
170 | } | ||
171 | |||
172 | group = &epc_group->group; | ||
173 | |||
174 | config_group_init_type_name(group, name, &pci_epc_type); | ||
175 | ret = configfs_register_group(controllers_group, group); | ||
176 | if (ret) { | ||
177 | pr_err("failed to register configfs group for %s\n", name); | ||
178 | goto err_register_group; | ||
179 | } | ||
180 | |||
181 | epc = pci_epc_get(name); | ||
182 | if (IS_ERR(epc)) { | ||
183 | ret = PTR_ERR(epc); | ||
184 | goto err_epc_get; | ||
185 | } | ||
186 | |||
187 | epc_group->epc = epc; | ||
188 | |||
189 | return group; | ||
190 | |||
191 | err_epc_get: | ||
192 | configfs_unregister_group(group); | ||
193 | |||
194 | err_register_group: | ||
195 | kfree(epc_group); | ||
196 | |||
197 | err: | ||
198 | return ERR_PTR(ret); | ||
199 | } | ||
200 | EXPORT_SYMBOL(pci_ep_cfs_add_epc_group); | ||
201 | |||
202 | void pci_ep_cfs_remove_epc_group(struct config_group *group) | ||
203 | { | ||
204 | struct pci_epc_group *epc_group; | ||
205 | |||
206 | if (!group) | ||
207 | return; | ||
208 | |||
209 | epc_group = container_of(group, struct pci_epc_group, group); | ||
210 | pci_epc_put(epc_group->epc); | ||
211 | configfs_unregister_group(&epc_group->group); | ||
212 | kfree(epc_group); | ||
213 | } | ||
214 | EXPORT_SYMBOL(pci_ep_cfs_remove_epc_group); | ||
215 | |||
216 | #define PCI_EPF_HEADER_R(_name) \ | ||
217 | static ssize_t pci_epf_##_name##_show(struct config_item *item, char *page) \ | ||
218 | { \ | ||
219 | struct pci_epf *epf = to_pci_epf_group(item)->epf; \ | ||
220 | if (WARN_ON_ONCE(!epf->header)) \ | ||
221 | return -EINVAL; \ | ||
222 | return sprintf(page, "0x%04x\n", epf->header->_name); \ | ||
223 | } | ||
224 | |||
225 | #define PCI_EPF_HEADER_W_u32(_name) \ | ||
226 | static ssize_t pci_epf_##_name##_store(struct config_item *item, \ | ||
227 | const char *page, size_t len) \ | ||
228 | { \ | ||
229 | u32 val; \ | ||
230 | int ret; \ | ||
231 | struct pci_epf *epf = to_pci_epf_group(item)->epf; \ | ||
232 | if (WARN_ON_ONCE(!epf->header)) \ | ||
233 | return -EINVAL; \ | ||
234 | ret = kstrtou32(page, 0, &val); \ | ||
235 | if (ret) \ | ||
236 | return ret; \ | ||
237 | epf->header->_name = val; \ | ||
238 | return len; \ | ||
239 | } | ||
240 | |||
241 | #define PCI_EPF_HEADER_W_u16(_name) \ | ||
242 | static ssize_t pci_epf_##_name##_store(struct config_item *item, \ | ||
243 | const char *page, size_t len) \ | ||
244 | { \ | ||
245 | u16 val; \ | ||
246 | int ret; \ | ||
247 | struct pci_epf *epf = to_pci_epf_group(item)->epf; \ | ||
248 | if (WARN_ON_ONCE(!epf->header)) \ | ||
249 | return -EINVAL; \ | ||
250 | ret = kstrtou16(page, 0, &val); \ | ||
251 | if (ret) \ | ||
252 | return ret; \ | ||
253 | epf->header->_name = val; \ | ||
254 | return len; \ | ||
255 | } | ||
256 | |||
257 | #define PCI_EPF_HEADER_W_u8(_name) \ | ||
258 | static ssize_t pci_epf_##_name##_store(struct config_item *item, \ | ||
259 | const char *page, size_t len) \ | ||
260 | { \ | ||
261 | u8 val; \ | ||
262 | int ret; \ | ||
263 | struct pci_epf *epf = to_pci_epf_group(item)->epf; \ | ||
264 | if (WARN_ON_ONCE(!epf->header)) \ | ||
265 | return -EINVAL; \ | ||
266 | ret = kstrtou8(page, 0, &val); \ | ||
267 | if (ret) \ | ||
268 | return ret; \ | ||
269 | epf->header->_name = val; \ | ||
270 | return len; \ | ||
271 | } | ||
272 | |||
273 | static ssize_t pci_epf_msi_interrupts_store(struct config_item *item, | ||
274 | const char *page, size_t len) | ||
275 | { | ||
276 | u8 val; | ||
277 | int ret; | ||
278 | |||
279 | ret = kstrtou8(page, 0, &val); | ||
280 | if (ret) | ||
281 | return ret; | ||
282 | |||
283 | to_pci_epf_group(item)->epf->msi_interrupts = val; | ||
284 | |||
285 | return len; | ||
286 | } | ||
287 | |||
288 | static ssize_t pci_epf_msi_interrupts_show(struct config_item *item, | ||
289 | char *page) | ||
290 | { | ||
291 | return sprintf(page, "%d\n", | ||
292 | to_pci_epf_group(item)->epf->msi_interrupts); | ||
293 | } | ||
294 | |||
295 | PCI_EPF_HEADER_R(vendorid) | ||
296 | PCI_EPF_HEADER_W_u16(vendorid) | ||
297 | |||
298 | PCI_EPF_HEADER_R(deviceid) | ||
299 | PCI_EPF_HEADER_W_u16(deviceid) | ||
300 | |||
301 | PCI_EPF_HEADER_R(revid) | ||
302 | PCI_EPF_HEADER_W_u8(revid) | ||
303 | |||
304 | PCI_EPF_HEADER_R(progif_code) | ||
305 | PCI_EPF_HEADER_W_u8(progif_code) | ||
306 | |||
307 | PCI_EPF_HEADER_R(subclass_code) | ||
308 | PCI_EPF_HEADER_W_u8(subclass_code) | ||
309 | |||
310 | PCI_EPF_HEADER_R(baseclass_code) | ||
311 | PCI_EPF_HEADER_W_u8(baseclass_code) | ||
312 | |||
313 | PCI_EPF_HEADER_R(cache_line_size) | ||
314 | PCI_EPF_HEADER_W_u8(cache_line_size) | ||
315 | |||
316 | PCI_EPF_HEADER_R(subsys_vendor_id) | ||
317 | PCI_EPF_HEADER_W_u16(subsys_vendor_id) | ||
318 | |||
319 | PCI_EPF_HEADER_R(subsys_id) | ||
320 | PCI_EPF_HEADER_W_u16(subsys_id) | ||
321 | |||
322 | PCI_EPF_HEADER_R(interrupt_pin) | ||
323 | PCI_EPF_HEADER_W_u8(interrupt_pin) | ||
324 | |||
325 | CONFIGFS_ATTR(pci_epf_, vendorid); | ||
326 | CONFIGFS_ATTR(pci_epf_, deviceid); | ||
327 | CONFIGFS_ATTR(pci_epf_, revid); | ||
328 | CONFIGFS_ATTR(pci_epf_, progif_code); | ||
329 | CONFIGFS_ATTR(pci_epf_, subclass_code); | ||
330 | CONFIGFS_ATTR(pci_epf_, baseclass_code); | ||
331 | CONFIGFS_ATTR(pci_epf_, cache_line_size); | ||
332 | CONFIGFS_ATTR(pci_epf_, subsys_vendor_id); | ||
333 | CONFIGFS_ATTR(pci_epf_, subsys_id); | ||
334 | CONFIGFS_ATTR(pci_epf_, interrupt_pin); | ||
335 | CONFIGFS_ATTR(pci_epf_, msi_interrupts); | ||
336 | |||
337 | static struct configfs_attribute *pci_epf_attrs[] = { | ||
338 | &pci_epf_attr_vendorid, | ||
339 | &pci_epf_attr_deviceid, | ||
340 | &pci_epf_attr_revid, | ||
341 | &pci_epf_attr_progif_code, | ||
342 | &pci_epf_attr_subclass_code, | ||
343 | &pci_epf_attr_baseclass_code, | ||
344 | &pci_epf_attr_cache_line_size, | ||
345 | &pci_epf_attr_subsys_vendor_id, | ||
346 | &pci_epf_attr_subsys_id, | ||
347 | &pci_epf_attr_interrupt_pin, | ||
348 | &pci_epf_attr_msi_interrupts, | ||
349 | NULL, | ||
350 | }; | ||
351 | |||
352 | static void pci_epf_release(struct config_item *item) | ||
353 | { | ||
354 | struct pci_epf_group *epf_group = to_pci_epf_group(item); | ||
355 | |||
356 | pci_epf_destroy(epf_group->epf); | ||
357 | kfree(epf_group); | ||
358 | } | ||
359 | |||
360 | static struct configfs_item_operations pci_epf_ops = { | ||
361 | .release = pci_epf_release, | ||
362 | }; | ||
363 | |||
364 | static struct config_item_type pci_epf_type = { | ||
365 | .ct_item_ops = &pci_epf_ops, | ||
366 | .ct_attrs = pci_epf_attrs, | ||
367 | .ct_owner = THIS_MODULE, | ||
368 | }; | ||
369 | |||
370 | static struct config_group *pci_epf_make(struct config_group *group, | ||
371 | const char *name) | ||
372 | { | ||
373 | struct pci_epf_group *epf_group; | ||
374 | struct pci_epf *epf; | ||
375 | |||
376 | epf_group = kzalloc(sizeof(*epf_group), GFP_KERNEL); | ||
377 | if (!epf_group) | ||
378 | return ERR_PTR(-ENOMEM); | ||
379 | |||
380 | config_group_init_type_name(&epf_group->group, name, &pci_epf_type); | ||
381 | |||
382 | epf = pci_epf_create(group->cg_item.ci_name); | ||
383 | if (IS_ERR(epf)) { | ||
384 | pr_err("failed to create endpoint function device\n"); | ||
385 | return ERR_PTR(-EINVAL); | ||
386 | } | ||
387 | |||
388 | epf_group->epf = epf; | ||
389 | |||
390 | return &epf_group->group; | ||
391 | } | ||
392 | |||
393 | static void pci_epf_drop(struct config_group *group, struct config_item *item) | ||
394 | { | ||
395 | config_item_put(item); | ||
396 | } | ||
397 | |||
398 | static struct configfs_group_operations pci_epf_group_ops = { | ||
399 | .make_group = &pci_epf_make, | ||
400 | .drop_item = &pci_epf_drop, | ||
401 | }; | ||
402 | |||
403 | static struct config_item_type pci_epf_group_type = { | ||
404 | .ct_group_ops = &pci_epf_group_ops, | ||
405 | .ct_owner = THIS_MODULE, | ||
406 | }; | ||
407 | |||
408 | struct config_group *pci_ep_cfs_add_epf_group(const char *name) | ||
409 | { | ||
410 | struct config_group *group; | ||
411 | |||
412 | group = configfs_register_default_group(functions_group, name, | ||
413 | &pci_epf_group_type); | ||
414 | if (IS_ERR(group)) | ||
415 | pr_err("failed to register configfs group for %s function\n", | ||
416 | name); | ||
417 | |||
418 | return group; | ||
419 | } | ||
420 | EXPORT_SYMBOL(pci_ep_cfs_add_epf_group); | ||
421 | |||
422 | void pci_ep_cfs_remove_epf_group(struct config_group *group) | ||
423 | { | ||
424 | if (IS_ERR_OR_NULL(group)) | ||
425 | return; | ||
426 | |||
427 | configfs_unregister_default_group(group); | ||
428 | } | ||
429 | EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group); | ||
430 | |||
431 | static struct config_item_type pci_functions_type = { | ||
432 | .ct_owner = THIS_MODULE, | ||
433 | }; | ||
434 | |||
435 | static struct config_item_type pci_controllers_type = { | ||
436 | .ct_owner = THIS_MODULE, | ||
437 | }; | ||
438 | |||
439 | static struct config_item_type pci_ep_type = { | ||
440 | .ct_owner = THIS_MODULE, | ||
441 | }; | ||
442 | |||
443 | static struct configfs_subsystem pci_ep_cfs_subsys = { | ||
444 | .su_group = { | ||
445 | .cg_item = { | ||
446 | .ci_namebuf = "pci_ep", | ||
447 | .ci_type = &pci_ep_type, | ||
448 | }, | ||
449 | }, | ||
450 | .su_mutex = __MUTEX_INITIALIZER(pci_ep_cfs_subsys.su_mutex), | ||
451 | }; | ||
452 | |||
453 | static int __init pci_ep_cfs_init(void) | ||
454 | { | ||
455 | int ret; | ||
456 | struct config_group *root = &pci_ep_cfs_subsys.su_group; | ||
457 | |||
458 | config_group_init(root); | ||
459 | |||
460 | ret = configfs_register_subsystem(&pci_ep_cfs_subsys); | ||
461 | if (ret) { | ||
462 | pr_err("Error %d while registering subsystem %s\n", | ||
463 | ret, root->cg_item.ci_namebuf); | ||
464 | goto err; | ||
465 | } | ||
466 | |||
467 | functions_group = configfs_register_default_group(root, "functions", | ||
468 | &pci_functions_type); | ||
469 | if (IS_ERR(functions_group)) { | ||
470 | ret = PTR_ERR(functions_group); | ||
471 | pr_err("Error %d while registering functions group\n", | ||
472 | ret); | ||
473 | goto err_functions_group; | ||
474 | } | ||
475 | |||
476 | controllers_group = | ||
477 | configfs_register_default_group(root, "controllers", | ||
478 | &pci_controllers_type); | ||
479 | if (IS_ERR(controllers_group)) { | ||
480 | ret = PTR_ERR(controllers_group); | ||
481 | pr_err("Error %d while registering controllers group\n", | ||
482 | ret); | ||
483 | goto err_controllers_group; | ||
484 | } | ||
485 | |||
486 | return 0; | ||
487 | |||
488 | err_controllers_group: | ||
489 | configfs_unregister_default_group(functions_group); | ||
490 | |||
491 | err_functions_group: | ||
492 | configfs_unregister_subsystem(&pci_ep_cfs_subsys); | ||
493 | |||
494 | err: | ||
495 | return ret; | ||
496 | } | ||
497 | module_init(pci_ep_cfs_init); | ||
498 | |||
499 | static void __exit pci_ep_cfs_exit(void) | ||
500 | { | ||
501 | configfs_unregister_default_group(controllers_group); | ||
502 | configfs_unregister_default_group(functions_group); | ||
503 | configfs_unregister_subsystem(&pci_ep_cfs_subsys); | ||
504 | } | ||
505 | module_exit(pci_ep_cfs_exit); | ||
506 | |||
507 | MODULE_DESCRIPTION("PCI EP CONFIGFS"); | ||
508 | MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); | ||
509 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c new file mode 100644 index 000000000000..caa7be10e473 --- /dev/null +++ b/drivers/pci/endpoint/pci-epc-core.c | |||
@@ -0,0 +1,580 @@ | |||
1 | /** | ||
2 | * PCI Endpoint *Controller* (EPC) library | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/device.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/module.h> | ||
24 | |||
25 | #include <linux/pci-epc.h> | ||
26 | #include <linux/pci-epf.h> | ||
27 | #include <linux/pci-ep-cfs.h> | ||
28 | |||
29 | static struct class *pci_epc_class; | ||
30 | |||
31 | static void devm_pci_epc_release(struct device *dev, void *res) | ||
32 | { | ||
33 | struct pci_epc *epc = *(struct pci_epc **)res; | ||
34 | |||
35 | pci_epc_destroy(epc); | ||
36 | } | ||
37 | |||
38 | static int devm_pci_epc_match(struct device *dev, void *res, void *match_data) | ||
39 | { | ||
40 | struct pci_epc **epc = res; | ||
41 | |||
42 | return *epc == match_data; | ||
43 | } | ||
44 | |||
45 | /** | ||
46 | * pci_epc_put() - release the PCI endpoint controller | ||
47 | * @epc: epc returned by pci_epc_get() | ||
48 | * | ||
49 | * release the refcount the caller obtained by invoking pci_epc_get() | ||
50 | */ | ||
51 | void pci_epc_put(struct pci_epc *epc) | ||
52 | { | ||
53 | if (!epc || IS_ERR(epc)) | ||
54 | return; | ||
55 | |||
56 | module_put(epc->ops->owner); | ||
57 | put_device(&epc->dev); | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(pci_epc_put); | ||
60 | |||
61 | /** | ||
62 | * pci_epc_get() - get the PCI endpoint controller | ||
63 | * @epc_name: device name of the endpoint controller | ||
64 | * | ||
65 | * Invoke to get struct pci_epc * corresponding to the device name of the | ||
66 | * endpoint controller | ||
67 | */ | ||
68 | struct pci_epc *pci_epc_get(const char *epc_name) | ||
69 | { | ||
70 | int ret = -EINVAL; | ||
71 | struct pci_epc *epc; | ||
72 | struct device *dev; | ||
73 | struct class_dev_iter iter; | ||
74 | |||
75 | class_dev_iter_init(&iter, pci_epc_class, NULL, NULL); | ||
76 | while ((dev = class_dev_iter_next(&iter))) { | ||
77 | if (strcmp(epc_name, dev_name(dev))) | ||
78 | continue; | ||
79 | |||
80 | epc = to_pci_epc(dev); | ||
81 | if (!try_module_get(epc->ops->owner)) { | ||
82 | ret = -EINVAL; | ||
83 | goto err; | ||
84 | } | ||
85 | |||
86 | class_dev_iter_exit(&iter); | ||
87 | get_device(&epc->dev); | ||
88 | return epc; | ||
89 | } | ||
90 | |||
91 | err: | ||
92 | class_dev_iter_exit(&iter); | ||
93 | return ERR_PTR(ret); | ||
94 | } | ||
95 | EXPORT_SYMBOL_GPL(pci_epc_get); | ||
96 | |||
97 | /** | ||
98 | * pci_epc_stop() - stop the PCI link | ||
99 | * @epc: the link of the EPC device that has to be stopped | ||
100 | * | ||
101 | * Invoke to stop the PCI link | ||
102 | */ | ||
103 | void pci_epc_stop(struct pci_epc *epc) | ||
104 | { | ||
105 | unsigned long flags; | ||
106 | |||
107 | if (IS_ERR(epc) || !epc->ops->stop) | ||
108 | return; | ||
109 | |||
110 | spin_lock_irqsave(&epc->lock, flags); | ||
111 | epc->ops->stop(epc); | ||
112 | spin_unlock_irqrestore(&epc->lock, flags); | ||
113 | } | ||
114 | EXPORT_SYMBOL_GPL(pci_epc_stop); | ||
115 | |||
116 | /** | ||
117 | * pci_epc_start() - start the PCI link | ||
118 | * @epc: the link of *this* EPC device has to be started | ||
119 | * | ||
120 | * Invoke to start the PCI link | ||
121 | */ | ||
122 | int pci_epc_start(struct pci_epc *epc) | ||
123 | { | ||
124 | int ret; | ||
125 | unsigned long flags; | ||
126 | |||
127 | if (IS_ERR(epc)) | ||
128 | return -EINVAL; | ||
129 | |||
130 | if (!epc->ops->start) | ||
131 | return 0; | ||
132 | |||
133 | spin_lock_irqsave(&epc->lock, flags); | ||
134 | ret = epc->ops->start(epc); | ||
135 | spin_unlock_irqrestore(&epc->lock, flags); | ||
136 | |||
137 | return ret; | ||
138 | } | ||
139 | EXPORT_SYMBOL_GPL(pci_epc_start); | ||
140 | |||
141 | /** | ||
142 | * pci_epc_raise_irq() - interrupt the host system | ||
143 | * @epc: the EPC device which has to interrupt the host | ||
144 | * @type: specify the type of interrupt; legacy or MSI | ||
145 | * @interrupt_num: the MSI interrupt number | ||
146 | * | ||
147 | * Invoke to raise an MSI or legacy interrupt | ||
148 | */ | ||
149 | int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type, | ||
150 | u8 interrupt_num) | ||
151 | { | ||
152 | int ret; | ||
153 | unsigned long flags; | ||
154 | |||
155 | if (IS_ERR(epc)) | ||
156 | return -EINVAL; | ||
157 | |||
158 | if (!epc->ops->raise_irq) | ||
159 | return 0; | ||
160 | |||
161 | spin_lock_irqsave(&epc->lock, flags); | ||
162 | ret = epc->ops->raise_irq(epc, type, interrupt_num); | ||
163 | spin_unlock_irqrestore(&epc->lock, flags); | ||
164 | |||
165 | return ret; | ||
166 | } | ||
167 | EXPORT_SYMBOL_GPL(pci_epc_raise_irq); | ||
168 | |||
169 | /** | ||
170 | * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated | ||
171 | * @epc: the EPC device to which MSI interrupts was requested | ||
172 | * | ||
173 | * Invoke to get the number of MSI interrupts allocated by the RC | ||
174 | */ | ||
175 | int pci_epc_get_msi(struct pci_epc *epc) | ||
176 | { | ||
177 | int interrupt; | ||
178 | unsigned long flags; | ||
179 | |||
180 | if (IS_ERR(epc)) | ||
181 | return 0; | ||
182 | |||
183 | if (!epc->ops->get_msi) | ||
184 | return 0; | ||
185 | |||
186 | spin_lock_irqsave(&epc->lock, flags); | ||
187 | interrupt = epc->ops->get_msi(epc); | ||
188 | spin_unlock_irqrestore(&epc->lock, flags); | ||
189 | |||
190 | if (interrupt < 0) | ||
191 | return 0; | ||
192 | |||
193 | interrupt = 1 << interrupt; | ||
194 | |||
195 | return interrupt; | ||
196 | } | ||
197 | EXPORT_SYMBOL_GPL(pci_epc_get_msi); | ||
198 | |||
199 | /** | ||
200 | * pci_epc_set_msi() - set the number of MSI interrupt numbers required | ||
201 | * @epc: the EPC device on which MSI has to be configured | ||
202 | * @interrupts: number of MSI interrupts required by the EPF | ||
203 | * | ||
204 | * Invoke to set the required number of MSI interrupts. | ||
205 | */ | ||
206 | int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts) | ||
207 | { | ||
208 | int ret; | ||
209 | u8 encode_int; | ||
210 | unsigned long flags; | ||
211 | |||
212 | if (IS_ERR(epc)) | ||
213 | return -EINVAL; | ||
214 | |||
215 | if (!epc->ops->set_msi) | ||
216 | return 0; | ||
217 | |||
218 | encode_int = order_base_2(interrupts); | ||
219 | |||
220 | spin_lock_irqsave(&epc->lock, flags); | ||
221 | ret = epc->ops->set_msi(epc, encode_int); | ||
222 | spin_unlock_irqrestore(&epc->lock, flags); | ||
223 | |||
224 | return ret; | ||
225 | } | ||
226 | EXPORT_SYMBOL_GPL(pci_epc_set_msi); | ||
227 | |||
228 | /** | ||
229 | * pci_epc_unmap_addr() - unmap CPU address from PCI address | ||
230 | * @epc: the EPC device on which address is allocated | ||
231 | * @phys_addr: physical address of the local system | ||
232 | * | ||
233 | * Invoke to unmap the CPU address from PCI address. | ||
234 | */ | ||
235 | void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr) | ||
236 | { | ||
237 | unsigned long flags; | ||
238 | |||
239 | if (IS_ERR(epc)) | ||
240 | return; | ||
241 | |||
242 | if (!epc->ops->unmap_addr) | ||
243 | return; | ||
244 | |||
245 | spin_lock_irqsave(&epc->lock, flags); | ||
246 | epc->ops->unmap_addr(epc, phys_addr); | ||
247 | spin_unlock_irqrestore(&epc->lock, flags); | ||
248 | } | ||
249 | EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); | ||
250 | |||
251 | /** | ||
252 | * pci_epc_map_addr() - map CPU address to PCI address | ||
253 | * @epc: the EPC device on which address is allocated | ||
254 | * @phys_addr: physical address of the local system | ||
255 | * @pci_addr: PCI address to which the physical address should be mapped | ||
256 | * @size: the size of the allocation | ||
257 | * | ||
258 | * Invoke to map CPU address with PCI address. | ||
259 | */ | ||
260 | int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr, | ||
261 | u64 pci_addr, size_t size) | ||
262 | { | ||
263 | int ret; | ||
264 | unsigned long flags; | ||
265 | |||
266 | if (IS_ERR(epc)) | ||
267 | return -EINVAL; | ||
268 | |||
269 | if (!epc->ops->map_addr) | ||
270 | return 0; | ||
271 | |||
272 | spin_lock_irqsave(&epc->lock, flags); | ||
273 | ret = epc->ops->map_addr(epc, phys_addr, pci_addr, size); | ||
274 | spin_unlock_irqrestore(&epc->lock, flags); | ||
275 | |||
276 | return ret; | ||
277 | } | ||
278 | EXPORT_SYMBOL_GPL(pci_epc_map_addr); | ||
279 | |||
280 | /** | ||
281 | * pci_epc_clear_bar() - reset the BAR | ||
282 | * @epc: the EPC device for which the BAR has to be cleared | ||
283 | * @bar: the BAR number that has to be reset | ||
284 | * | ||
285 | * Invoke to reset the BAR of the endpoint device. | ||
286 | */ | ||
287 | void pci_epc_clear_bar(struct pci_epc *epc, int bar) | ||
288 | { | ||
289 | unsigned long flags; | ||
290 | |||
291 | if (IS_ERR(epc)) | ||
292 | return; | ||
293 | |||
294 | if (!epc->ops->clear_bar) | ||
295 | return; | ||
296 | |||
297 | spin_lock_irqsave(&epc->lock, flags); | ||
298 | epc->ops->clear_bar(epc, bar); | ||
299 | spin_unlock_irqrestore(&epc->lock, flags); | ||
300 | } | ||
301 | EXPORT_SYMBOL_GPL(pci_epc_clear_bar); | ||
302 | |||
303 | /** | ||
304 | * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space | ||
305 | * @epc: the EPC device on which BAR has to be configured | ||
306 | * @bar: the BAR number that has to be configured | ||
307 | * @size: the size of the addr space | ||
308 | * @flags: specify memory allocation/io allocation/32bit address/64 bit address | ||
309 | * | ||
310 | * Invoke to configure the BAR of the endpoint device. | ||
311 | */ | ||
312 | int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar, | ||
313 | dma_addr_t bar_phys, size_t size, int flags) | ||
314 | { | ||
315 | int ret; | ||
316 | unsigned long irq_flags; | ||
317 | |||
318 | if (IS_ERR(epc)) | ||
319 | return -EINVAL; | ||
320 | |||
321 | if (!epc->ops->set_bar) | ||
322 | return 0; | ||
323 | |||
324 | spin_lock_irqsave(&epc->lock, irq_flags); | ||
325 | ret = epc->ops->set_bar(epc, bar, bar_phys, size, flags); | ||
326 | spin_unlock_irqrestore(&epc->lock, irq_flags); | ||
327 | |||
328 | return ret; | ||
329 | } | ||
330 | EXPORT_SYMBOL_GPL(pci_epc_set_bar); | ||
331 | |||
332 | /** | ||
333 | * pci_epc_write_header() - write standard configuration header | ||
334 | * @epc: the EPC device to which the configuration header should be written | ||
335 | * @header: standard configuration header fields | ||
336 | * | ||
337 | * Invoke to write the configuration header to the endpoint controller. Every | ||
338 | * endpoint controller will have a dedicated location to which the standard | ||
339 | * configuration header would be written. The callback function should write | ||
340 | * the header fields to this dedicated location. | ||
341 | */ | ||
342 | int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *header) | ||
343 | { | ||
344 | int ret; | ||
345 | unsigned long flags; | ||
346 | |||
347 | if (IS_ERR(epc)) | ||
348 | return -EINVAL; | ||
349 | |||
350 | if (!epc->ops->write_header) | ||
351 | return 0; | ||
352 | |||
353 | spin_lock_irqsave(&epc->lock, flags); | ||
354 | ret = epc->ops->write_header(epc, header); | ||
355 | spin_unlock_irqrestore(&epc->lock, flags); | ||
356 | |||
357 | return ret; | ||
358 | } | ||
359 | EXPORT_SYMBOL_GPL(pci_epc_write_header); | ||
360 | |||
361 | /** | ||
362 | * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller | ||
363 | * @epc: the EPC device to which the endpoint function should be added | ||
364 | * @epf: the endpoint function to be added | ||
365 | * | ||
366 | * A PCI endpoint device can have one or more functions. In the case of PCIe, | ||
367 | * the specification allows up to 8 PCIe endpoint functions. Invoke | ||
368 | * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller. | ||
369 | */ | ||
370 | int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf) | ||
371 | { | ||
372 | unsigned long flags; | ||
373 | |||
374 | if (epf->epc) | ||
375 | return -EBUSY; | ||
376 | |||
377 | if (IS_ERR(epc)) | ||
378 | return -EINVAL; | ||
379 | |||
380 | if (epf->func_no > epc->max_functions - 1) | ||
381 | return -EINVAL; | ||
382 | |||
383 | epf->epc = epc; | ||
384 | dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask); | ||
385 | epf->dev.dma_mask = epc->dev.dma_mask; | ||
386 | |||
387 | spin_lock_irqsave(&epc->lock, flags); | ||
388 | list_add_tail(&epf->list, &epc->pci_epf); | ||
389 | spin_unlock_irqrestore(&epc->lock, flags); | ||
390 | |||
391 | return 0; | ||
392 | } | ||
393 | EXPORT_SYMBOL_GPL(pci_epc_add_epf); | ||
394 | |||
395 | /** | ||
396 | * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller | ||
397 | * @epc: the EPC device from which the endpoint function should be removed | ||
398 | * @epf: the endpoint function to be removed | ||
399 | * | ||
400 | * Invoke to remove PCI endpoint function from the endpoint controller. | ||
401 | */ | ||
402 | void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf) | ||
403 | { | ||
404 | unsigned long flags; | ||
405 | |||
406 | if (!epc || IS_ERR(epc)) | ||
407 | return; | ||
408 | |||
409 | spin_lock_irqsave(&epc->lock, flags); | ||
410 | list_del(&epf->list); | ||
411 | spin_unlock_irqrestore(&epc->lock, flags); | ||
412 | } | ||
413 | EXPORT_SYMBOL_GPL(pci_epc_remove_epf); | ||
414 | |||
415 | /** | ||
416 | * pci_epc_linkup() - Notify the EPF device that EPC device has established a | ||
417 | * connection with the Root Complex. | ||
418 | * @epc: the EPC device which has established link with the host | ||
419 | * | ||
420 | * Invoke to Notify the EPF device that the EPC device has established a | ||
421 | * connection with the Root Complex. | ||
422 | */ | ||
423 | void pci_epc_linkup(struct pci_epc *epc) | ||
424 | { | ||
425 | unsigned long flags; | ||
426 | struct pci_epf *epf; | ||
427 | |||
428 | if (!epc || IS_ERR(epc)) | ||
429 | return; | ||
430 | |||
431 | spin_lock_irqsave(&epc->lock, flags); | ||
432 | list_for_each_entry(epf, &epc->pci_epf, list) | ||
433 | pci_epf_linkup(epf); | ||
434 | spin_unlock_irqrestore(&epc->lock, flags); | ||
435 | } | ||
436 | EXPORT_SYMBOL_GPL(pci_epc_linkup); | ||
437 | |||
438 | /** | ||
439 | * pci_epc_destroy() - destroy the EPC device | ||
440 | * @epc: the EPC device that has to be destroyed | ||
441 | * | ||
442 | * Invoke to destroy the PCI EPC device | ||
443 | */ | ||
444 | void pci_epc_destroy(struct pci_epc *epc) | ||
445 | { | ||
446 | pci_ep_cfs_remove_epc_group(epc->group); | ||
447 | device_unregister(&epc->dev); | ||
448 | kfree(epc); | ||
449 | } | ||
450 | EXPORT_SYMBOL_GPL(pci_epc_destroy); | ||
451 | |||
452 | /** | ||
453 | * devm_pci_epc_destroy() - destroy the EPC device | ||
454 | * @dev: device that wants to destroy the EPC | ||
455 | * @epc: the EPC device that has to be destroyed | ||
456 | * | ||
457 | * Invoke to destroy the devres associated with this | ||
458 | * pci_epc and destroy the EPC device. | ||
459 | */ | ||
460 | void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc) | ||
461 | { | ||
462 | int r; | ||
463 | |||
464 | r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match, | ||
465 | epc); | ||
466 | dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n"); | ||
467 | } | ||
468 | EXPORT_SYMBOL_GPL(devm_pci_epc_destroy); | ||
469 | |||
470 | /** | ||
471 | * __pci_epc_create() - create a new endpoint controller (EPC) device | ||
472 | * @dev: device that is creating the new EPC | ||
473 | * @ops: function pointers for performing EPC operations | ||
474 | * @owner: the owner of the module that creates the EPC device | ||
475 | * | ||
476 | * Invoke to create a new EPC device and add it to pci_epc class. | ||
477 | */ | ||
478 | struct pci_epc * | ||
479 | __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, | ||
480 | struct module *owner) | ||
481 | { | ||
482 | int ret; | ||
483 | struct pci_epc *epc; | ||
484 | |||
485 | if (WARN_ON(!dev)) { | ||
486 | ret = -EINVAL; | ||
487 | goto err_ret; | ||
488 | } | ||
489 | |||
490 | epc = kzalloc(sizeof(*epc), GFP_KERNEL); | ||
491 | if (!epc) { | ||
492 | ret = -ENOMEM; | ||
493 | goto err_ret; | ||
494 | } | ||
495 | |||
496 | spin_lock_init(&epc->lock); | ||
497 | INIT_LIST_HEAD(&epc->pci_epf); | ||
498 | |||
499 | device_initialize(&epc->dev); | ||
500 | dma_set_coherent_mask(&epc->dev, dev->coherent_dma_mask); | ||
501 | epc->dev.class = pci_epc_class; | ||
502 | epc->dev.dma_mask = dev->dma_mask; | ||
503 | epc->ops = ops; | ||
504 | |||
505 | ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); | ||
506 | if (ret) | ||
507 | goto put_dev; | ||
508 | |||
509 | ret = device_add(&epc->dev); | ||
510 | if (ret) | ||
511 | goto put_dev; | ||
512 | |||
513 | epc->group = pci_ep_cfs_add_epc_group(dev_name(dev)); | ||
514 | |||
515 | return epc; | ||
516 | |||
517 | put_dev: | ||
518 | put_device(&epc->dev); | ||
519 | kfree(epc); | ||
520 | |||
521 | err_ret: | ||
522 | return ERR_PTR(ret); | ||
523 | } | ||
524 | EXPORT_SYMBOL_GPL(__pci_epc_create); | ||
525 | |||
526 | /** | ||
527 | * __devm_pci_epc_create() - create a new endpoint controller (EPC) device | ||
528 | * @dev: device that is creating the new EPC | ||
529 | * @ops: function pointers for performing EPC operations | ||
530 | * @owner: the owner of the module that creates the EPC device | ||
531 | * | ||
532 | * Invoke to create a new EPC device and add it to pci_epc class. | ||
533 | * While at that, it also associates the device with the pci_epc using devres. | ||
534 | * On driver detach, release function is invoked on the devres data, | ||
535 | * then, devres data is freed. | ||
536 | */ | ||
537 | struct pci_epc * | ||
538 | __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, | ||
539 | struct module *owner) | ||
540 | { | ||
541 | struct pci_epc **ptr, *epc; | ||
542 | |||
543 | ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL); | ||
544 | if (!ptr) | ||
545 | return ERR_PTR(-ENOMEM); | ||
546 | |||
547 | epc = __pci_epc_create(dev, ops, owner); | ||
548 | if (!IS_ERR(epc)) { | ||
549 | *ptr = epc; | ||
550 | devres_add(dev, ptr); | ||
551 | } else { | ||
552 | devres_free(ptr); | ||
553 | } | ||
554 | |||
555 | return epc; | ||
556 | } | ||
557 | EXPORT_SYMBOL_GPL(__devm_pci_epc_create); | ||
558 | |||
559 | static int __init pci_epc_init(void) | ||
560 | { | ||
561 | pci_epc_class = class_create(THIS_MODULE, "pci_epc"); | ||
562 | if (IS_ERR(pci_epc_class)) { | ||
563 | pr_err("failed to create pci epc class --> %ld\n", | ||
564 | PTR_ERR(pci_epc_class)); | ||
565 | return PTR_ERR(pci_epc_class); | ||
566 | } | ||
567 | |||
568 | return 0; | ||
569 | } | ||
570 | module_init(pci_epc_init); | ||
571 | |||
572 | static void __exit pci_epc_exit(void) | ||
573 | { | ||
574 | class_destroy(pci_epc_class); | ||
575 | } | ||
576 | module_exit(pci_epc_exit); | ||
577 | |||
578 | MODULE_DESCRIPTION("PCI EPC Library"); | ||
579 | MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); | ||
580 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c new file mode 100644 index 000000000000..3a94cc1caf22 --- /dev/null +++ b/drivers/pci/endpoint/pci-epc-mem.c | |||
@@ -0,0 +1,143 @@ | |||
1 | /** | ||
2 | * PCI Endpoint *Controller* Address Space Management | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/io.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/slab.h> | ||
23 | |||
24 | #include <linux/pci-epc.h> | ||
25 | |||
26 | /** | ||
27 | * pci_epc_mem_init() - initialize the pci_epc_mem structure | ||
28 | * @epc: the EPC device that invoked pci_epc_mem_init | ||
29 | * @phys_base: the physical address of the base | ||
30 | * @size: the size of the address space | ||
31 | * | ||
32 | * Invoke to initialize the pci_epc_mem structure used by the | ||
33 | * endpoint functions to allocate mapped PCI address. | ||
34 | */ | ||
35 | int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size) | ||
36 | { | ||
37 | int ret; | ||
38 | struct pci_epc_mem *mem; | ||
39 | unsigned long *bitmap; | ||
40 | int pages = size >> PAGE_SHIFT; | ||
41 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
42 | |||
43 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); | ||
44 | if (!mem) { | ||
45 | ret = -ENOMEM; | ||
46 | goto err; | ||
47 | } | ||
48 | |||
49 | bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
50 | if (!bitmap) { | ||
51 | ret = -ENOMEM; | ||
52 | goto err_mem; | ||
53 | } | ||
54 | |||
55 | mem->bitmap = bitmap; | ||
56 | mem->phys_base = phys_base; | ||
57 | mem->pages = pages; | ||
58 | mem->size = size; | ||
59 | |||
60 | epc->mem = mem; | ||
61 | |||
62 | return 0; | ||
63 | |||
64 | err_mem: | ||
65 | kfree(mem); | ||
66 | |||
67 | err: | ||
68 | return ret; | ||
69 | } | ||
70 | EXPORT_SYMBOL_GPL(pci_epc_mem_init); | ||
71 | |||
72 | /** | ||
73 | * pci_epc_mem_exit() - cleanup the pci_epc_mem structure | ||
74 | * @epc: the EPC device that invoked pci_epc_mem_exit | ||
75 | * | ||
76 | * Invoke to cleanup the pci_epc_mem structure allocated in | ||
77 | * pci_epc_mem_init(). | ||
78 | */ | ||
79 | void pci_epc_mem_exit(struct pci_epc *epc) | ||
80 | { | ||
81 | struct pci_epc_mem *mem = epc->mem; | ||
82 | |||
83 | epc->mem = NULL; | ||
84 | kfree(mem->bitmap); | ||
85 | kfree(mem); | ||
86 | } | ||
87 | EXPORT_SYMBOL_GPL(pci_epc_mem_exit); | ||
88 | |||
89 | /** | ||
90 | * pci_epc_mem_alloc_addr() - allocate memory address from EPC addr space | ||
91 | * @epc: the EPC device on which memory has to be allocated | ||
92 | * @phys_addr: populate the allocated physical address here | ||
93 | * @size: the size of the address space that has to be allocated | ||
94 | * | ||
95 | * Invoke to allocate memory address from the EPC address space. This | ||
96 | * is usually done to map the remote RC address into the local system. | ||
97 | */ | ||
98 | void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, | ||
99 | phys_addr_t *phys_addr, size_t size) | ||
100 | { | ||
101 | int pageno; | ||
102 | void __iomem *virt_addr; | ||
103 | struct pci_epc_mem *mem = epc->mem; | ||
104 | int order = get_order(size); | ||
105 | |||
106 | pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order); | ||
107 | if (pageno < 0) | ||
108 | return NULL; | ||
109 | |||
110 | *phys_addr = mem->phys_base + (pageno << PAGE_SHIFT); | ||
111 | virt_addr = ioremap(*phys_addr, size); | ||
112 | if (!virt_addr) | ||
113 | bitmap_release_region(mem->bitmap, pageno, order); | ||
114 | |||
115 | return virt_addr; | ||
116 | } | ||
117 | EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr); | ||
118 | |||
119 | /** | ||
120 | * pci_epc_mem_free_addr() - free the allocated memory address | ||
121 | * @epc: the EPC device on which memory was allocated | ||
122 | * @phys_addr: the allocated physical address | ||
123 | * @virt_addr: virtual address of the allocated mem space | ||
124 | * @size: the size of the allocated address space | ||
125 | * | ||
126 | * Invoke to free the memory allocated using pci_epc_mem_alloc_addr. | ||
127 | */ | ||
128 | void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, | ||
129 | void __iomem *virt_addr, size_t size) | ||
130 | { | ||
131 | int pageno; | ||
132 | int order = get_order(size); | ||
133 | struct pci_epc_mem *mem = epc->mem; | ||
134 | |||
135 | iounmap(virt_addr); | ||
136 | pageno = (phys_addr - mem->phys_base) >> PAGE_SHIFT; | ||
137 | bitmap_release_region(mem->bitmap, pageno, order); | ||
138 | } | ||
139 | EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr); | ||
140 | |||
141 | MODULE_DESCRIPTION("PCI EPC Address Space Management"); | ||
142 | MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); | ||
143 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c new file mode 100644 index 000000000000..6877d6a5bcc9 --- /dev/null +++ b/drivers/pci/endpoint/pci-epf-core.c | |||
@@ -0,0 +1,359 @@ | |||
1 | /** | ||
2 | * PCI Endpoint *Function* (EPF) library | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/device.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/module.h> | ||
24 | |||
25 | #include <linux/pci-epc.h> | ||
26 | #include <linux/pci-epf.h> | ||
27 | #include <linux/pci-ep-cfs.h> | ||
28 | |||
29 | static struct bus_type pci_epf_bus_type; | ||
30 | static struct device_type pci_epf_type; | ||
31 | |||
32 | /** | ||
33 | * pci_epf_linkup() - Notify the function driver that EPC device has | ||
34 | * established a connection with the Root Complex. | ||
35 | * @epf: the EPF device bound to the EPC device which has established | ||
36 | * the connection with the host | ||
37 | * | ||
38 | * Invoke to notify the function driver that EPC device has established | ||
39 | * a connection with the Root Complex. | ||
40 | */ | ||
41 | void pci_epf_linkup(struct pci_epf *epf) | ||
42 | { | ||
43 | if (!epf->driver) { | ||
44 | dev_WARN(&epf->dev, "epf device not bound to driver\n"); | ||
45 | return; | ||
46 | } | ||
47 | |||
48 | epf->driver->ops->linkup(epf); | ||
49 | } | ||
50 | EXPORT_SYMBOL_GPL(pci_epf_linkup); | ||
51 | |||
52 | /** | ||
53 | * pci_epf_unbind() - Notify the function driver that the binding between the | ||
54 | * EPF device and EPC device has been lost | ||
55 | * @epf: the EPF device which has lost the binding with the EPC device | ||
56 | * | ||
57 | * Invoke to notify the function driver that the binding between the EPF device | ||
58 | * and EPC device has been lost. | ||
59 | */ | ||
60 | void pci_epf_unbind(struct pci_epf *epf) | ||
61 | { | ||
62 | if (!epf->driver) { | ||
63 | dev_WARN(&epf->dev, "epf device not bound to driver\n"); | ||
64 | return; | ||
65 | } | ||
66 | |||
67 | epf->driver->ops->unbind(epf); | ||
68 | module_put(epf->driver->owner); | ||
69 | } | ||
70 | EXPORT_SYMBOL_GPL(pci_epf_unbind); | ||
71 | |||
72 | /** | ||
73 | * pci_epf_bind() - Notify the function driver that the EPF device has been | ||
74 | * bound to a EPC device | ||
75 | * @epf: the EPF device which has been bound to the EPC device | ||
76 | * | ||
77 | * Invoke to notify the function driver that it has been bound to a EPC device | ||
78 | */ | ||
79 | int pci_epf_bind(struct pci_epf *epf) | ||
80 | { | ||
81 | if (!epf->driver) { | ||
82 | dev_WARN(&epf->dev, "epf device not bound to driver\n"); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
86 | if (!try_module_get(epf->driver->owner)) | ||
87 | return -EAGAIN; | ||
88 | |||
89 | return epf->driver->ops->bind(epf); | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(pci_epf_bind); | ||
92 | |||
93 | /** | ||
94 | * pci_epf_free_space() - free the allocated PCI EPF register space | ||
95 | * @addr: the virtual address of the PCI EPF register space | ||
96 | * @bar: the BAR number corresponding to the register space | ||
97 | * | ||
98 | * Invoke to free the allocated PCI EPF register space. | ||
99 | */ | ||
100 | void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar) | ||
101 | { | ||
102 | struct device *dev = &epf->dev; | ||
103 | |||
104 | if (!addr) | ||
105 | return; | ||
106 | |||
107 | dma_free_coherent(dev, epf->bar[bar].size, addr, | ||
108 | epf->bar[bar].phys_addr); | ||
109 | |||
110 | epf->bar[bar].phys_addr = 0; | ||
111 | epf->bar[bar].size = 0; | ||
112 | } | ||
113 | EXPORT_SYMBOL_GPL(pci_epf_free_space); | ||
114 | |||
115 | /** | ||
116 | * pci_epf_alloc_space() - allocate memory for the PCI EPF register space | ||
117 | * @size: the size of the memory that has to be allocated | ||
118 | * @bar: the BAR number corresponding to the allocated register space | ||
119 | * | ||
120 | * Invoke to allocate memory for the PCI EPF register space. | ||
121 | */ | ||
122 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) | ||
123 | { | ||
124 | void *space; | ||
125 | struct device *dev = &epf->dev; | ||
126 | dma_addr_t phys_addr; | ||
127 | |||
128 | if (size < 128) | ||
129 | size = 128; | ||
130 | size = roundup_pow_of_two(size); | ||
131 | |||
132 | space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); | ||
133 | if (!space) { | ||
134 | dev_err(dev, "failed to allocate mem space\n"); | ||
135 | return NULL; | ||
136 | } | ||
137 | |||
138 | epf->bar[bar].phys_addr = phys_addr; | ||
139 | epf->bar[bar].size = size; | ||
140 | |||
141 | return space; | ||
142 | } | ||
143 | EXPORT_SYMBOL_GPL(pci_epf_alloc_space); | ||
144 | |||
145 | /** | ||
146 | * pci_epf_unregister_driver() - unregister the PCI EPF driver | ||
147 | * @driver: the PCI EPF driver that has to be unregistered | ||
148 | * | ||
149 | * Invoke to unregister the PCI EPF driver. | ||
150 | */ | ||
151 | void pci_epf_unregister_driver(struct pci_epf_driver *driver) | ||
152 | { | ||
153 | pci_ep_cfs_remove_epf_group(driver->group); | ||
154 | driver_unregister(&driver->driver); | ||
155 | } | ||
156 | EXPORT_SYMBOL_GPL(pci_epf_unregister_driver); | ||
157 | |||
158 | /** | ||
159 | * __pci_epf_register_driver() - register a new PCI EPF driver | ||
160 | * @driver: structure representing PCI EPF driver | ||
161 | * @owner: the owner of the module that registers the PCI EPF driver | ||
162 | * | ||
163 | * Invoke to register a new PCI EPF driver. | ||
164 | */ | ||
165 | int __pci_epf_register_driver(struct pci_epf_driver *driver, | ||
166 | struct module *owner) | ||
167 | { | ||
168 | int ret; | ||
169 | |||
170 | if (!driver->ops) | ||
171 | return -EINVAL; | ||
172 | |||
173 | if (!driver->ops->bind || !driver->ops->unbind || !driver->ops->linkup) | ||
174 | return -EINVAL; | ||
175 | |||
176 | driver->driver.bus = &pci_epf_bus_type; | ||
177 | driver->driver.owner = owner; | ||
178 | |||
179 | ret = driver_register(&driver->driver); | ||
180 | if (ret) | ||
181 | return ret; | ||
182 | |||
183 | driver->group = pci_ep_cfs_add_epf_group(driver->driver.name); | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(__pci_epf_register_driver); | ||
188 | |||
189 | /** | ||
190 | * pci_epf_destroy() - destroy the created PCI EPF device | ||
191 | * @epf: the PCI EPF device that has to be destroyed. | ||
192 | * | ||
193 | * Invoke to destroy the PCI EPF device created by invoking pci_epf_create(). | ||
194 | */ | ||
195 | void pci_epf_destroy(struct pci_epf *epf) | ||
196 | { | ||
197 | device_unregister(&epf->dev); | ||
198 | } | ||
199 | EXPORT_SYMBOL_GPL(pci_epf_destroy); | ||
200 | |||
201 | /** | ||
202 | * pci_epf_create() - create a new PCI EPF device | ||
203 | * @name: the name of the PCI EPF device. This name will be used to bind the | ||
204 | * the EPF device to a EPF driver | ||
205 | * | ||
206 | * Invoke to create a new PCI EPF device by providing the name of the function | ||
207 | * device. | ||
208 | */ | ||
209 | struct pci_epf *pci_epf_create(const char *name) | ||
210 | { | ||
211 | int ret; | ||
212 | struct pci_epf *epf; | ||
213 | struct device *dev; | ||
214 | char *func_name; | ||
215 | char *buf; | ||
216 | |||
217 | epf = kzalloc(sizeof(*epf), GFP_KERNEL); | ||
218 | if (!epf) { | ||
219 | ret = -ENOMEM; | ||
220 | goto err_ret; | ||
221 | } | ||
222 | |||
223 | buf = kstrdup(name, GFP_KERNEL); | ||
224 | if (!buf) { | ||
225 | ret = -ENOMEM; | ||
226 | goto free_epf; | ||
227 | } | ||
228 | |||
229 | func_name = buf; | ||
230 | buf = strchrnul(buf, '.'); | ||
231 | *buf = '\0'; | ||
232 | |||
233 | epf->name = kstrdup(func_name, GFP_KERNEL); | ||
234 | if (!epf->name) { | ||
235 | ret = -ENOMEM; | ||
236 | goto free_func_name; | ||
237 | } | ||
238 | |||
239 | dev = &epf->dev; | ||
240 | device_initialize(dev); | ||
241 | dev->bus = &pci_epf_bus_type; | ||
242 | dev->type = &pci_epf_type; | ||
243 | |||
244 | ret = dev_set_name(dev, "%s", name); | ||
245 | if (ret) | ||
246 | goto put_dev; | ||
247 | |||
248 | ret = device_add(dev); | ||
249 | if (ret) | ||
250 | goto put_dev; | ||
251 | |||
252 | kfree(func_name); | ||
253 | return epf; | ||
254 | |||
255 | put_dev: | ||
256 | put_device(dev); | ||
257 | kfree(epf->name); | ||
258 | |||
259 | free_func_name: | ||
260 | kfree(func_name); | ||
261 | |||
262 | free_epf: | ||
263 | kfree(epf); | ||
264 | |||
265 | err_ret: | ||
266 | return ERR_PTR(ret); | ||
267 | } | ||
268 | EXPORT_SYMBOL_GPL(pci_epf_create); | ||
269 | |||
270 | static void pci_epf_dev_release(struct device *dev) | ||
271 | { | ||
272 | struct pci_epf *epf = to_pci_epf(dev); | ||
273 | |||
274 | kfree(epf->name); | ||
275 | kfree(epf); | ||
276 | } | ||
277 | |||
278 | static struct device_type pci_epf_type = { | ||
279 | .release = pci_epf_dev_release, | ||
280 | }; | ||
281 | |||
282 | static int | ||
283 | pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf) | ||
284 | { | ||
285 | while (id->name[0]) { | ||
286 | if (strcmp(epf->name, id->name) == 0) | ||
287 | return true; | ||
288 | id++; | ||
289 | } | ||
290 | |||
291 | return false; | ||
292 | } | ||
293 | |||
294 | static int pci_epf_device_match(struct device *dev, struct device_driver *drv) | ||
295 | { | ||
296 | struct pci_epf *epf = to_pci_epf(dev); | ||
297 | struct pci_epf_driver *driver = to_pci_epf_driver(drv); | ||
298 | |||
299 | if (driver->id_table) | ||
300 | return pci_epf_match_id(driver->id_table, epf); | ||
301 | |||
302 | return !strcmp(epf->name, drv->name); | ||
303 | } | ||
304 | |||
305 | static int pci_epf_device_probe(struct device *dev) | ||
306 | { | ||
307 | struct pci_epf *epf = to_pci_epf(dev); | ||
308 | struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver); | ||
309 | |||
310 | if (!driver->probe) | ||
311 | return -ENODEV; | ||
312 | |||
313 | epf->driver = driver; | ||
314 | |||
315 | return driver->probe(epf); | ||
316 | } | ||
317 | |||
318 | static int pci_epf_device_remove(struct device *dev) | ||
319 | { | ||
320 | int ret; | ||
321 | struct pci_epf *epf = to_pci_epf(dev); | ||
322 | struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver); | ||
323 | |||
324 | ret = driver->remove(epf); | ||
325 | epf->driver = NULL; | ||
326 | |||
327 | return ret; | ||
328 | } | ||
329 | |||
330 | static struct bus_type pci_epf_bus_type = { | ||
331 | .name = "pci-epf", | ||
332 | .match = pci_epf_device_match, | ||
333 | .probe = pci_epf_device_probe, | ||
334 | .remove = pci_epf_device_remove, | ||
335 | }; | ||
336 | |||
337 | static int __init pci_epf_init(void) | ||
338 | { | ||
339 | int ret; | ||
340 | |||
341 | ret = bus_register(&pci_epf_bus_type); | ||
342 | if (ret) { | ||
343 | pr_err("failed to register pci epf bus --> %d\n", ret); | ||
344 | return ret; | ||
345 | } | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | module_init(pci_epf_init); | ||
350 | |||
351 | static void __exit pci_epf_exit(void) | ||
352 | { | ||
353 | bus_unregister(&pci_epf_bus_type); | ||
354 | } | ||
355 | module_exit(pci_epf_exit); | ||
356 | |||
357 | MODULE_DESCRIPTION("PCI EPF Library"); | ||
358 | MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); | ||
359 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index f7c1d4d5c665..7f47cd5e10a5 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig | |||
@@ -27,6 +27,12 @@ config PCIE_XILINX_NWL | |||
27 | or End Point. The current option selection will only | 27 | or End Point. The current option selection will only |
28 | support root port enabling. | 28 | support root port enabling. |
29 | 29 | ||
30 | config PCI_FTPCI100 | ||
31 | bool "Faraday Technology FTPCI100 PCI controller" | ||
32 | depends on OF | ||
33 | depends on ARM | ||
34 | default ARCH_GEMINI | ||
35 | |||
30 | config PCI_TEGRA | 36 | config PCI_TEGRA |
31 | bool "NVIDIA Tegra PCIe controller" | 37 | bool "NVIDIA Tegra PCIe controller" |
32 | depends on ARCH_TEGRA | 38 | depends on ARCH_TEGRA |
@@ -95,6 +101,7 @@ config PCI_VERSATILE | |||
95 | 101 | ||
96 | config PCIE_IPROC | 102 | config PCIE_IPROC |
97 | tristate | 103 | tristate |
104 | select PCI_DOMAINS | ||
98 | help | 105 | help |
99 | This enables the iProc PCIe core controller support for Broadcom's | 106 | This enables the iProc PCIe core controller support for Broadcom's |
100 | iProc family of SoCs. An appropriate bus interface driver needs | 107 | iProc family of SoCs. An appropriate bus interface driver needs |
@@ -115,7 +122,6 @@ config PCIE_IPROC_BCMA | |||
115 | depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST) | 122 | depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST) |
116 | select PCIE_IPROC | 123 | select PCIE_IPROC |
117 | select BCMA | 124 | select BCMA |
118 | select PCI_DOMAINS | ||
119 | default ARCH_BCM_5301X | 125 | default ARCH_BCM_5301X |
120 | help | 126 | help |
121 | Say Y here if you want to use the Broadcom iProc PCIe controller | 127 | Say Y here if you want to use the Broadcom iProc PCIe controller |
@@ -164,7 +170,7 @@ config PCI_HOST_THUNDER_ECAM | |||
164 | Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. | 170 | Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. |
165 | 171 | ||
166 | config PCIE_ROCKCHIP | 172 | config PCIE_ROCKCHIP |
167 | bool "Rockchip PCIe controller" | 173 | tristate "Rockchip PCIe controller" |
168 | depends on ARCH_ROCKCHIP || COMPILE_TEST | 174 | depends on ARCH_ROCKCHIP || COMPILE_TEST |
169 | depends on OF | 175 | depends on OF |
170 | depends on PCI_MSI_IRQ_DOMAIN | 176 | depends on PCI_MSI_IRQ_DOMAIN |
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 4d3686676cc3..cab879578003 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o | ||
1 | obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o | 2 | obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o |
2 | obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o | 3 | obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o |
3 | obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o | 4 | obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o |
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 4fce494271cc..37d0bcd31f8a 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c | |||
@@ -200,10 +200,12 @@ struct advk_pcie { | |||
200 | struct list_head resources; | 200 | struct list_head resources; |
201 | struct irq_domain *irq_domain; | 201 | struct irq_domain *irq_domain; |
202 | struct irq_chip irq_chip; | 202 | struct irq_chip irq_chip; |
203 | struct msi_controller msi; | ||
204 | struct irq_domain *msi_domain; | 203 | struct irq_domain *msi_domain; |
204 | struct irq_domain *msi_inner_domain; | ||
205 | struct irq_chip msi_bottom_irq_chip; | ||
205 | struct irq_chip msi_irq_chip; | 206 | struct irq_chip msi_irq_chip; |
206 | DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM); | 207 | struct msi_domain_info msi_domain_info; |
208 | DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); | ||
207 | struct mutex msi_used_lock; | 209 | struct mutex msi_used_lock; |
208 | u16 msi_msg; | 210 | u16 msi_msg; |
209 | int root_bus_nr; | 211 | int root_bus_nr; |
@@ -545,94 +547,64 @@ static struct pci_ops advk_pcie_ops = { | |||
545 | .write = advk_pcie_wr_conf, | 547 | .write = advk_pcie_wr_conf, |
546 | }; | 548 | }; |
547 | 549 | ||
548 | static int advk_pcie_alloc_msi(struct advk_pcie *pcie) | 550 | static void advk_msi_irq_compose_msi_msg(struct irq_data *data, |
551 | struct msi_msg *msg) | ||
549 | { | 552 | { |
550 | int hwirq; | 553 | struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); |
554 | phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); | ||
551 | 555 | ||
552 | mutex_lock(&pcie->msi_used_lock); | 556 | msg->address_lo = lower_32_bits(msi_msg); |
553 | hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM); | 557 | msg->address_hi = upper_32_bits(msi_msg); |
554 | if (hwirq >= MSI_IRQ_NUM) | 558 | msg->data = data->irq; |
555 | hwirq = -ENOSPC; | ||
556 | else | ||
557 | set_bit(hwirq, pcie->msi_irq_in_use); | ||
558 | mutex_unlock(&pcie->msi_used_lock); | ||
559 | |||
560 | return hwirq; | ||
561 | } | 559 | } |
562 | 560 | ||
563 | static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq) | 561 | static int advk_msi_set_affinity(struct irq_data *irq_data, |
562 | const struct cpumask *mask, bool force) | ||
564 | { | 563 | { |
565 | struct device *dev = &pcie->pdev->dev; | 564 | return -EINVAL; |
566 | |||
567 | mutex_lock(&pcie->msi_used_lock); | ||
568 | if (!test_bit(hwirq, pcie->msi_irq_in_use)) | ||
569 | dev_err(dev, "trying to free unused MSI#%d\n", hwirq); | ||
570 | else | ||
571 | clear_bit(hwirq, pcie->msi_irq_in_use); | ||
572 | mutex_unlock(&pcie->msi_used_lock); | ||
573 | } | 565 | } |
574 | 566 | ||
575 | static int advk_pcie_setup_msi_irq(struct msi_controller *chip, | 567 | static int advk_msi_irq_domain_alloc(struct irq_domain *domain, |
576 | struct pci_dev *pdev, | 568 | unsigned int virq, |
577 | struct msi_desc *desc) | 569 | unsigned int nr_irqs, void *args) |
578 | { | 570 | { |
579 | struct advk_pcie *pcie = pdev->bus->sysdata; | 571 | struct advk_pcie *pcie = domain->host_data; |
580 | struct msi_msg msg; | 572 | int hwirq, i; |
581 | int virq, hwirq; | ||
582 | phys_addr_t msi_msg_phys; | ||
583 | |||
584 | /* We support MSI, but not MSI-X */ | ||
585 | if (desc->msi_attrib.is_msix) | ||
586 | return -EINVAL; | ||
587 | |||
588 | hwirq = advk_pcie_alloc_msi(pcie); | ||
589 | if (hwirq < 0) | ||
590 | return hwirq; | ||
591 | 573 | ||
592 | virq = irq_create_mapping(pcie->msi_domain, hwirq); | 574 | mutex_lock(&pcie->msi_used_lock); |
593 | if (!virq) { | 575 | hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, |
594 | advk_pcie_free_msi(pcie, hwirq); | 576 | 0, nr_irqs, 0); |
595 | return -EINVAL; | 577 | if (hwirq >= MSI_IRQ_NUM) { |
578 | mutex_unlock(&pcie->msi_used_lock); | ||
579 | return -ENOSPC; | ||
596 | } | 580 | } |
597 | 581 | ||
598 | irq_set_msi_desc(virq, desc); | 582 | bitmap_set(pcie->msi_used, hwirq, nr_irqs); |
599 | 583 | mutex_unlock(&pcie->msi_used_lock); | |
600 | msi_msg_phys = virt_to_phys(&pcie->msi_msg); | ||
601 | |||
602 | msg.address_lo = lower_32_bits(msi_msg_phys); | ||
603 | msg.address_hi = upper_32_bits(msi_msg_phys); | ||
604 | msg.data = virq; | ||
605 | |||
606 | pci_write_msi_msg(virq, &msg); | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | 584 | ||
611 | static void advk_pcie_teardown_msi_irq(struct msi_controller *chip, | 585 | for (i = 0; i < nr_irqs; i++) |
612 | unsigned int irq) | 586 | irq_domain_set_info(domain, virq + i, hwirq + i, |
613 | { | 587 | &pcie->msi_bottom_irq_chip, |
614 | struct irq_data *d = irq_get_irq_data(irq); | 588 | domain->host_data, handle_simple_irq, |
615 | struct msi_desc *msi = irq_data_get_msi_desc(d); | 589 | NULL, NULL); |
616 | struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi); | ||
617 | unsigned long hwirq = d->hwirq; | ||
618 | 590 | ||
619 | irq_dispose_mapping(irq); | 591 | return hwirq; |
620 | advk_pcie_free_msi(pcie, hwirq); | ||
621 | } | 592 | } |
622 | 593 | ||
623 | static int advk_pcie_msi_map(struct irq_domain *domain, | 594 | static void advk_msi_irq_domain_free(struct irq_domain *domain, |
624 | unsigned int virq, irq_hw_number_t hw) | 595 | unsigned int virq, unsigned int nr_irqs) |
625 | { | 596 | { |
597 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | ||
626 | struct advk_pcie *pcie = domain->host_data; | 598 | struct advk_pcie *pcie = domain->host_data; |
627 | 599 | ||
628 | irq_set_chip_and_handler(virq, &pcie->msi_irq_chip, | 600 | mutex_lock(&pcie->msi_used_lock); |
629 | handle_simple_irq); | 601 | bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); |
630 | 602 | mutex_unlock(&pcie->msi_used_lock); | |
631 | return 0; | ||
632 | } | 603 | } |
633 | 604 | ||
634 | static const struct irq_domain_ops advk_pcie_msi_irq_ops = { | 605 | static const struct irq_domain_ops advk_msi_domain_ops = { |
635 | .map = advk_pcie_msi_map, | 606 | .alloc = advk_msi_irq_domain_alloc, |
607 | .free = advk_msi_irq_domain_free, | ||
636 | }; | 608 | }; |
637 | 609 | ||
638 | static void advk_pcie_irq_mask(struct irq_data *d) | 610 | static void advk_pcie_irq_mask(struct irq_data *d) |
@@ -680,30 +652,25 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) | |||
680 | { | 652 | { |
681 | struct device *dev = &pcie->pdev->dev; | 653 | struct device *dev = &pcie->pdev->dev; |
682 | struct device_node *node = dev->of_node; | 654 | struct device_node *node = dev->of_node; |
683 | struct irq_chip *msi_irq_chip; | 655 | struct irq_chip *bottom_ic, *msi_ic; |
684 | struct msi_controller *msi; | 656 | struct msi_domain_info *msi_di; |
685 | phys_addr_t msi_msg_phys; | 657 | phys_addr_t msi_msg_phys; |
686 | int ret; | ||
687 | 658 | ||
688 | msi_irq_chip = &pcie->msi_irq_chip; | 659 | mutex_init(&pcie->msi_used_lock); |
689 | 660 | ||
690 | msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi", | 661 | bottom_ic = &pcie->msi_bottom_irq_chip; |
691 | dev_name(dev)); | ||
692 | if (!msi_irq_chip->name) | ||
693 | return -ENOMEM; | ||
694 | 662 | ||
695 | msi_irq_chip->irq_enable = pci_msi_unmask_irq; | 663 | bottom_ic->name = "MSI"; |
696 | msi_irq_chip->irq_disable = pci_msi_mask_irq; | 664 | bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; |
697 | msi_irq_chip->irq_mask = pci_msi_mask_irq; | 665 | bottom_ic->irq_set_affinity = advk_msi_set_affinity; |
698 | msi_irq_chip->irq_unmask = pci_msi_unmask_irq; | ||
699 | 666 | ||
700 | msi = &pcie->msi; | 667 | msi_ic = &pcie->msi_irq_chip; |
668 | msi_ic->name = "advk-MSI"; | ||
701 | 669 | ||
702 | msi->setup_irq = advk_pcie_setup_msi_irq; | 670 | msi_di = &pcie->msi_domain_info; |
703 | msi->teardown_irq = advk_pcie_teardown_msi_irq; | 671 | msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
704 | msi->of_node = node; | 672 | MSI_FLAG_MULTI_PCI_MSI; |
705 | 673 | msi_di->chip = msi_ic; | |
706 | mutex_init(&pcie->msi_used_lock); | ||
707 | 674 | ||
708 | msi_msg_phys = virt_to_phys(&pcie->msi_msg); | 675 | msi_msg_phys = virt_to_phys(&pcie->msi_msg); |
709 | 676 | ||
@@ -712,16 +679,18 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) | |||
712 | advk_writel(pcie, upper_32_bits(msi_msg_phys), | 679 | advk_writel(pcie, upper_32_bits(msi_msg_phys), |
713 | PCIE_MSI_ADDR_HIGH_REG); | 680 | PCIE_MSI_ADDR_HIGH_REG); |
714 | 681 | ||
715 | pcie->msi_domain = | 682 | pcie->msi_inner_domain = |
716 | irq_domain_add_linear(NULL, MSI_IRQ_NUM, | 683 | irq_domain_add_linear(NULL, MSI_IRQ_NUM, |
717 | &advk_pcie_msi_irq_ops, pcie); | 684 | &advk_msi_domain_ops, pcie); |
718 | if (!pcie->msi_domain) | 685 | if (!pcie->msi_inner_domain) |
719 | return -ENOMEM; | 686 | return -ENOMEM; |
720 | 687 | ||
721 | ret = of_pci_msi_chip_add(msi); | 688 | pcie->msi_domain = |
722 | if (ret < 0) { | 689 | pci_msi_create_irq_domain(of_node_to_fwnode(node), |
723 | irq_domain_remove(pcie->msi_domain); | 690 | msi_di, pcie->msi_inner_domain); |
724 | return ret; | 691 | if (!pcie->msi_domain) { |
692 | irq_domain_remove(pcie->msi_inner_domain); | ||
693 | return -ENOMEM; | ||
725 | } | 694 | } |
726 | 695 | ||
727 | return 0; | 696 | return 0; |
@@ -729,8 +698,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) | |||
729 | 698 | ||
730 | static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) | 699 | static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) |
731 | { | 700 | { |
732 | of_pci_msi_chip_remove(&pcie->msi); | ||
733 | irq_domain_remove(pcie->msi_domain); | 701 | irq_domain_remove(pcie->msi_domain); |
702 | irq_domain_remove(pcie->msi_inner_domain); | ||
734 | } | 703 | } |
735 | 704 | ||
736 | static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) | 705 | static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) |
@@ -917,8 +886,6 @@ static int advk_pcie_probe(struct platform_device *pdev) | |||
917 | struct advk_pcie *pcie; | 886 | struct advk_pcie *pcie; |
918 | struct resource *res; | 887 | struct resource *res; |
919 | struct pci_bus *bus, *child; | 888 | struct pci_bus *bus, *child; |
920 | struct msi_controller *msi; | ||
921 | struct device_node *msi_node; | ||
922 | int ret, irq; | 889 | int ret, irq; |
923 | 890 | ||
924 | pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL); | 891 | pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL); |
@@ -962,14 +929,8 @@ static int advk_pcie_probe(struct platform_device *pdev) | |||
962 | return ret; | 929 | return ret; |
963 | } | 930 | } |
964 | 931 | ||
965 | msi_node = of_parse_phandle(dev->of_node, "msi-parent", 0); | 932 | bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops, |
966 | if (msi_node) | 933 | pcie, &pcie->resources); |
967 | msi = of_pci_find_msi_chip_by_node(msi_node); | ||
968 | else | ||
969 | msi = NULL; | ||
970 | |||
971 | bus = pci_scan_root_bus_msi(dev, 0, &advk_pcie_ops, | ||
972 | pcie, &pcie->resources, &pcie->msi); | ||
973 | if (!bus) { | 934 | if (!bus) { |
974 | advk_pcie_remove_msi_irq_domain(pcie); | 935 | advk_pcie_remove_msi_irq_domain(pcie); |
975 | advk_pcie_remove_irq_domain(pcie); | 936 | advk_pcie_remove_irq_domain(pcie); |
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c new file mode 100644 index 000000000000..d26501c4145a --- /dev/null +++ b/drivers/pci/host/pci-ftpci100.c | |||
@@ -0,0 +1,563 @@ | |||
1 | /* | ||
2 | * Support for Faraday Technology FTPC100 PCI Controller | ||
3 | * | ||
4 | * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> | ||
5 | * | ||
6 | * Based on the out-of-tree OpenWRT patch for Cortina Gemini: | ||
7 | * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com> | ||
8 | * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> | ||
9 | * Based on SL2312 PCI controller code | ||
10 | * Storlink (C) 2003 | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/of_address.h> | ||
18 | #include <linux/of_device.h> | ||
19 | #include <linux/of_irq.h> | ||
20 | #include <linux/of_pci.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/irqdomain.h> | ||
25 | #include <linux/irqchip/chained_irq.h> | ||
26 | #include <linux/bitops.h> | ||
27 | #include <linux/irq.h> | ||
28 | |||
29 | /* | ||
30 | * Special configuration registers directly in the first few words | ||
31 | * in I/O space. | ||
32 | */ | ||
33 | #define PCI_IOSIZE 0x00 | ||
34 | #define PCI_PROT 0x04 /* AHB protection */ | ||
35 | #define PCI_CTRL 0x08 /* PCI control signal */ | ||
36 | #define PCI_SOFTRST 0x10 /* Soft reset counter and response error enable */ | ||
37 | #define PCI_CONFIG 0x28 /* PCI configuration command register */ | ||
38 | #define PCI_DATA 0x2C | ||
39 | |||
40 | #define FARADAY_PCI_PMC 0x40 /* Power management control */ | ||
41 | #define FARADAY_PCI_PMCSR 0x44 /* Power management status */ | ||
42 | #define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ | ||
43 | #define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */ | ||
44 | #define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */ | ||
45 | #define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ | ||
46 | #define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ | ||
47 | |||
48 | /* Bits 31..28 gives INTD..INTA status */ | ||
49 | #define PCI_CTRL2_INTSTS_SHIFT 28 | ||
50 | #define PCI_CTRL2_INTMASK_CMDERR BIT(27) | ||
51 | #define PCI_CTRL2_INTMASK_PARERR BIT(26) | ||
52 | /* Bits 25..22 masks INTD..INTA */ | ||
53 | #define PCI_CTRL2_INTMASK_SHIFT 22 | ||
54 | #define PCI_CTRL2_INTMASK_MABRT_RX BIT(21) | ||
55 | #define PCI_CTRL2_INTMASK_TABRT_RX BIT(20) | ||
56 | #define PCI_CTRL2_INTMASK_TABRT_TX BIT(19) | ||
57 | #define PCI_CTRL2_INTMASK_RETRY4 BIT(18) | ||
58 | #define PCI_CTRL2_INTMASK_SERR_RX BIT(17) | ||
59 | #define PCI_CTRL2_INTMASK_PERR_RX BIT(16) | ||
60 | /* Bit 15 reserved */ | ||
61 | #define PCI_CTRL2_MSTPRI_REQ6 BIT(14) | ||
62 | #define PCI_CTRL2_MSTPRI_REQ5 BIT(13) | ||
63 | #define PCI_CTRL2_MSTPRI_REQ4 BIT(12) | ||
64 | #define PCI_CTRL2_MSTPRI_REQ3 BIT(11) | ||
65 | #define PCI_CTRL2_MSTPRI_REQ2 BIT(10) | ||
66 | #define PCI_CTRL2_MSTPRI_REQ1 BIT(9) | ||
67 | #define PCI_CTRL2_MSTPRI_REQ0 BIT(8) | ||
68 | /* Bits 7..4 reserved */ | ||
69 | /* Bits 3..0 TRDYW */ | ||
70 | |||
71 | /* | ||
72 | * Memory configs: | ||
73 | * Bit 31..20 defines the PCI side memory base | ||
74 | * Bit 19..16 (4 bits) defines the size per below | ||
75 | */ | ||
76 | #define FARADAY_PCI_MEMBASE_MASK 0xfff00000 | ||
77 | #define FARADAY_PCI_MEMSIZE_1MB 0x0 | ||
78 | #define FARADAY_PCI_MEMSIZE_2MB 0x1 | ||
79 | #define FARADAY_PCI_MEMSIZE_4MB 0x2 | ||
80 | #define FARADAY_PCI_MEMSIZE_8MB 0x3 | ||
81 | #define FARADAY_PCI_MEMSIZE_16MB 0x4 | ||
82 | #define FARADAY_PCI_MEMSIZE_32MB 0x5 | ||
83 | #define FARADAY_PCI_MEMSIZE_64MB 0x6 | ||
84 | #define FARADAY_PCI_MEMSIZE_128MB 0x7 | ||
85 | #define FARADAY_PCI_MEMSIZE_256MB 0x8 | ||
86 | #define FARADAY_PCI_MEMSIZE_512MB 0x9 | ||
87 | #define FARADAY_PCI_MEMSIZE_1GB 0xa | ||
88 | #define FARADAY_PCI_MEMSIZE_2GB 0xb | ||
89 | #define FARADAY_PCI_MEMSIZE_SHIFT 16 | ||
90 | |||
91 | /* | ||
92 | * The DMA base is set to 0x0 for all memory segments, it reflects the | ||
93 | * fact that the memory of the host system starts at 0x0. | ||
94 | */ | ||
95 | #define FARADAY_PCI_DMA_MEM1_BASE 0x00000000 | ||
96 | #define FARADAY_PCI_DMA_MEM2_BASE 0x00000000 | ||
97 | #define FARADAY_PCI_DMA_MEM3_BASE 0x00000000 | ||
98 | |||
99 | /* Defines for PCI configuration command register */ | ||
100 | #define PCI_CONF_ENABLE BIT(31) | ||
101 | #define PCI_CONF_WHERE(r) ((r) & 0xFC) | ||
102 | #define PCI_CONF_BUS(b) (((b) & 0xFF) << 16) | ||
103 | #define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11) | ||
104 | #define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8) | ||
105 | |||
106 | /** | ||
107 | * struct faraday_pci_variant - encodes IP block differences | ||
108 | * @cascaded_irq: this host has cascaded IRQs from an interrupt controller | ||
109 | * embedded in the host bridge. | ||
110 | */ | ||
111 | struct faraday_pci_variant { | ||
112 | bool cascaded_irq; | ||
113 | }; | ||
114 | |||
115 | struct faraday_pci { | ||
116 | struct device *dev; | ||
117 | void __iomem *base; | ||
118 | struct irq_domain *irqdomain; | ||
119 | struct pci_bus *bus; | ||
120 | }; | ||
121 | |||
122 | static int faraday_res_to_memcfg(resource_size_t mem_base, | ||
123 | resource_size_t mem_size, u32 *val) | ||
124 | { | ||
125 | u32 outval; | ||
126 | |||
127 | switch (mem_size) { | ||
128 | case SZ_1M: | ||
129 | outval = FARADAY_PCI_MEMSIZE_1MB; | ||
130 | break; | ||
131 | case SZ_2M: | ||
132 | outval = FARADAY_PCI_MEMSIZE_2MB; | ||
133 | break; | ||
134 | case SZ_4M: | ||
135 | outval = FARADAY_PCI_MEMSIZE_4MB; | ||
136 | break; | ||
137 | case SZ_8M: | ||
138 | outval = FARADAY_PCI_MEMSIZE_8MB; | ||
139 | break; | ||
140 | case SZ_16M: | ||
141 | outval = FARADAY_PCI_MEMSIZE_16MB; | ||
142 | break; | ||
143 | case SZ_32M: | ||
144 | outval = FARADAY_PCI_MEMSIZE_32MB; | ||
145 | break; | ||
146 | case SZ_64M: | ||
147 | outval = FARADAY_PCI_MEMSIZE_64MB; | ||
148 | break; | ||
149 | case SZ_128M: | ||
150 | outval = FARADAY_PCI_MEMSIZE_128MB; | ||
151 | break; | ||
152 | case SZ_256M: | ||
153 | outval = FARADAY_PCI_MEMSIZE_256MB; | ||
154 | break; | ||
155 | case SZ_512M: | ||
156 | outval = FARADAY_PCI_MEMSIZE_512MB; | ||
157 | break; | ||
158 | case SZ_1G: | ||
159 | outval = FARADAY_PCI_MEMSIZE_1GB; | ||
160 | break; | ||
161 | case SZ_2G: | ||
162 | outval = FARADAY_PCI_MEMSIZE_2GB; | ||
163 | break; | ||
164 | default: | ||
165 | return -EINVAL; | ||
166 | } | ||
167 | outval <<= FARADAY_PCI_MEMSIZE_SHIFT; | ||
168 | |||
169 | /* This is probably not good */ | ||
170 | if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK)) | ||
171 | pr_warn("truncated PCI memory base\n"); | ||
172 | /* Translate to bridge side address space */ | ||
173 | outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK); | ||
174 | pr_debug("Translated pci base @%pap, size %pap to config %08x\n", | ||
175 | &mem_base, &mem_size, outval); | ||
176 | |||
177 | *val = outval; | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, | ||
182 | int config, int size, u32 *value) | ||
183 | { | ||
184 | struct faraday_pci *p = bus->sysdata; | ||
185 | |||
186 | writel(PCI_CONF_BUS(bus->number) | | ||
187 | PCI_CONF_DEVICE(PCI_SLOT(fn)) | | ||
188 | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | | ||
189 | PCI_CONF_WHERE(config) | | ||
190 | PCI_CONF_ENABLE, | ||
191 | p->base + PCI_CONFIG); | ||
192 | |||
193 | *value = readl(p->base + PCI_DATA); | ||
194 | |||
195 | if (size == 1) | ||
196 | *value = (*value >> (8 * (config & 3))) & 0xFF; | ||
197 | else if (size == 2) | ||
198 | *value = (*value >> (8 * (config & 3))) & 0xFFFF; | ||
199 | |||
200 | dev_dbg(&bus->dev, | ||
201 | "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | ||
202 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); | ||
203 | |||
204 | return PCIBIOS_SUCCESSFUL; | ||
205 | } | ||
206 | |||
207 | static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, | ||
208 | int config, int size, u32 value) | ||
209 | { | ||
210 | struct faraday_pci *p = bus->sysdata; | ||
211 | int ret = PCIBIOS_SUCCESSFUL; | ||
212 | |||
213 | dev_dbg(&bus->dev, | ||
214 | "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | ||
215 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); | ||
216 | |||
217 | writel(PCI_CONF_BUS(bus->number) | | ||
218 | PCI_CONF_DEVICE(PCI_SLOT(fn)) | | ||
219 | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | | ||
220 | PCI_CONF_WHERE(config) | | ||
221 | PCI_CONF_ENABLE, | ||
222 | p->base + PCI_CONFIG); | ||
223 | |||
224 | switch (size) { | ||
225 | case 4: | ||
226 | writel(value, p->base + PCI_DATA); | ||
227 | break; | ||
228 | case 2: | ||
229 | writew(value, p->base + PCI_DATA + (config & 3)); | ||
230 | break; | ||
231 | case 1: | ||
232 | writeb(value, p->base + PCI_DATA + (config & 3)); | ||
233 | break; | ||
234 | default: | ||
235 | ret = PCIBIOS_BAD_REGISTER_NUMBER; | ||
236 | } | ||
237 | |||
238 | return ret; | ||
239 | } | ||
240 | |||
241 | static struct pci_ops faraday_pci_ops = { | ||
242 | .read = faraday_pci_read_config, | ||
243 | .write = faraday_pci_write_config, | ||
244 | }; | ||
245 | |||
246 | static void faraday_pci_ack_irq(struct irq_data *d) | ||
247 | { | ||
248 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | ||
249 | unsigned int reg; | ||
250 | |||
251 | faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); | ||
252 | reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); | ||
253 | reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); | ||
254 | faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); | ||
255 | } | ||
256 | |||
257 | static void faraday_pci_mask_irq(struct irq_data *d) | ||
258 | { | ||
259 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | ||
260 | unsigned int reg; | ||
261 | |||
262 | faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); | ||
263 | reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) | ||
264 | | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); | ||
265 | faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); | ||
266 | } | ||
267 | |||
268 | static void faraday_pci_unmask_irq(struct irq_data *d) | ||
269 | { | ||
270 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | ||
271 | unsigned int reg; | ||
272 | |||
273 | faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); | ||
274 | reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); | ||
275 | reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); | ||
276 | faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); | ||
277 | } | ||
278 | |||
279 | static void faraday_pci_irq_handler(struct irq_desc *desc) | ||
280 | { | ||
281 | struct faraday_pci *p = irq_desc_get_handler_data(desc); | ||
282 | struct irq_chip *irqchip = irq_desc_get_chip(desc); | ||
283 | unsigned int irq_stat, reg, i; | ||
284 | |||
285 | faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); | ||
286 | irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; | ||
287 | |||
288 | chained_irq_enter(irqchip, desc); | ||
289 | |||
290 | for (i = 0; i < 4; i++) { | ||
291 | if ((irq_stat & BIT(i)) == 0) | ||
292 | continue; | ||
293 | generic_handle_irq(irq_find_mapping(p->irqdomain, i)); | ||
294 | } | ||
295 | |||
296 | chained_irq_exit(irqchip, desc); | ||
297 | } | ||
298 | |||
299 | static struct irq_chip faraday_pci_irq_chip = { | ||
300 | .name = "PCI", | ||
301 | .irq_ack = faraday_pci_ack_irq, | ||
302 | .irq_mask = faraday_pci_mask_irq, | ||
303 | .irq_unmask = faraday_pci_unmask_irq, | ||
304 | }; | ||
305 | |||
306 | static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq, | ||
307 | irq_hw_number_t hwirq) | ||
308 | { | ||
309 | irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq); | ||
310 | irq_set_chip_data(irq, domain->host_data); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static const struct irq_domain_ops faraday_pci_irqdomain_ops = { | ||
316 | .map = faraday_pci_irq_map, | ||
317 | }; | ||
318 | |||
319 | static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) | ||
320 | { | ||
321 | struct device_node *intc = of_get_next_child(p->dev->of_node, NULL); | ||
322 | int irq; | ||
323 | int i; | ||
324 | |||
325 | if (!intc) { | ||
326 | dev_err(p->dev, "missing child interrupt-controller node\n"); | ||
327 | return -EINVAL; | ||
328 | } | ||
329 | |||
330 | /* All PCI IRQs cascade off this one */ | ||
331 | irq = of_irq_get(intc, 0); | ||
332 | if (!irq) { | ||
333 | dev_err(p->dev, "failed to get parent IRQ\n"); | ||
334 | return -EINVAL; | ||
335 | } | ||
336 | |||
337 | p->irqdomain = irq_domain_add_linear(intc, 4, | ||
338 | &faraday_pci_irqdomain_ops, p); | ||
339 | if (!p->irqdomain) { | ||
340 | dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); | ||
341 | return -EINVAL; | ||
342 | } | ||
343 | |||
344 | irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p); | ||
345 | |||
346 | for (i = 0; i < 4; i++) | ||
347 | irq_create_mapping(p->irqdomain, i); | ||
348 | |||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, | ||
353 | struct device_node *node) | ||
354 | { | ||
355 | const int na = 3, ns = 2; | ||
356 | int rlen; | ||
357 | |||
358 | parser->node = node; | ||
359 | parser->pna = of_n_addr_cells(node); | ||
360 | parser->np = parser->pna + na + ns; | ||
361 | |||
362 | parser->range = of_get_property(node, "dma-ranges", &rlen); | ||
363 | if (!parser->range) | ||
364 | return -ENOENT; | ||
365 | parser->end = parser->range + rlen / sizeof(__be32); | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p, | ||
371 | struct device_node *np) | ||
372 | { | ||
373 | struct of_pci_range range; | ||
374 | struct of_pci_range_parser parser; | ||
375 | struct device *dev = p->dev; | ||
376 | u32 confreg[3] = { | ||
377 | FARADAY_PCI_MEM1_BASE_SIZE, | ||
378 | FARADAY_PCI_MEM2_BASE_SIZE, | ||
379 | FARADAY_PCI_MEM3_BASE_SIZE, | ||
380 | }; | ||
381 | int i = 0; | ||
382 | u32 val; | ||
383 | |||
384 | if (pci_dma_range_parser_init(&parser, np)) { | ||
385 | dev_err(dev, "missing dma-ranges property\n"); | ||
386 | return -EINVAL; | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Get the dma-ranges from the device tree | ||
391 | */ | ||
392 | for_each_of_pci_range(&parser, &range) { | ||
393 | u64 end = range.pci_addr + range.size - 1; | ||
394 | int ret; | ||
395 | |||
396 | ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val); | ||
397 | if (ret) { | ||
398 | dev_err(dev, | ||
399 | "DMA range %d: illegal MEM resource size\n", i); | ||
400 | return -EINVAL; | ||
401 | } | ||
402 | |||
403 | dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", | ||
404 | i + 1, range.pci_addr, end, val); | ||
405 | if (i <= 2) { | ||
406 | faraday_pci_write_config(p->bus, 0, confreg[i], | ||
407 | 4, val); | ||
408 | } else { | ||
409 | dev_err(dev, "ignore extraneous dma-range %d\n", i); | ||
410 | break; | ||
411 | } | ||
412 | |||
413 | i++; | ||
414 | } | ||
415 | |||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | static int faraday_pci_probe(struct platform_device *pdev) | ||
420 | { | ||
421 | struct device *dev = &pdev->dev; | ||
422 | const struct faraday_pci_variant *variant = | ||
423 | of_device_get_match_data(dev); | ||
424 | struct resource *regs; | ||
425 | resource_size_t io_base; | ||
426 | struct resource_entry *win; | ||
427 | struct faraday_pci *p; | ||
428 | struct resource *mem; | ||
429 | struct resource *io; | ||
430 | struct pci_host_bridge *host; | ||
431 | int ret; | ||
432 | u32 val; | ||
433 | LIST_HEAD(res); | ||
434 | |||
435 | host = pci_alloc_host_bridge(sizeof(*p)); | ||
436 | if (!host) | ||
437 | return -ENOMEM; | ||
438 | |||
439 | host->dev.parent = dev; | ||
440 | host->ops = &faraday_pci_ops; | ||
441 | host->busnr = 0; | ||
442 | host->msi = NULL; | ||
443 | p = pci_host_bridge_priv(host); | ||
444 | host->sysdata = p; | ||
445 | p->dev = dev; | ||
446 | |||
447 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
448 | p->base = devm_ioremap_resource(dev, regs); | ||
449 | if (IS_ERR(p->base)) | ||
450 | return PTR_ERR(p->base); | ||
451 | |||
452 | ret = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, | ||
453 | &res, &io_base); | ||
454 | if (ret) | ||
455 | return ret; | ||
456 | |||
457 | ret = devm_request_pci_bus_resources(dev, &res); | ||
458 | if (ret) | ||
459 | return ret; | ||
460 | |||
461 | /* Get the I/O and memory ranges from DT */ | ||
462 | resource_list_for_each_entry(win, &res) { | ||
463 | switch (resource_type(win->res)) { | ||
464 | case IORESOURCE_IO: | ||
465 | io = win->res; | ||
466 | io->name = "Gemini PCI I/O"; | ||
467 | if (!faraday_res_to_memcfg(io->start - win->offset, | ||
468 | resource_size(io), &val)) { | ||
469 | /* setup I/O space size */ | ||
470 | writel(val, p->base + PCI_IOSIZE); | ||
471 | } else { | ||
472 | dev_err(dev, "illegal IO mem size\n"); | ||
473 | return -EINVAL; | ||
474 | } | ||
475 | ret = pci_remap_iospace(io, io_base); | ||
476 | if (ret) { | ||
477 | dev_warn(dev, "error %d: failed to map resource %pR\n", | ||
478 | ret, io); | ||
479 | continue; | ||
480 | } | ||
481 | break; | ||
482 | case IORESOURCE_MEM: | ||
483 | mem = win->res; | ||
484 | mem->name = "Gemini PCI MEM"; | ||
485 | break; | ||
486 | case IORESOURCE_BUS: | ||
487 | break; | ||
488 | default: | ||
489 | break; | ||
490 | } | ||
491 | } | ||
492 | |||
493 | /* Setup hostbridge */ | ||
494 | val = readl(p->base + PCI_CTRL); | ||
495 | val |= PCI_COMMAND_IO; | ||
496 | val |= PCI_COMMAND_MEMORY; | ||
497 | val |= PCI_COMMAND_MASTER; | ||
498 | writel(val, p->base + PCI_CTRL); | ||
499 | |||
500 | list_splice_init(&res, &host->windows); | ||
501 | ret = pci_register_host_bridge(host); | ||
502 | if (ret) { | ||
503 | dev_err(dev, "failed to register host: %d\n", ret); | ||
504 | return ret; | ||
505 | } | ||
506 | p->bus = host->bus; | ||
507 | |||
508 | /* Mask and clear all interrupts */ | ||
509 | faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); | ||
510 | if (variant->cascaded_irq) { | ||
511 | ret = faraday_pci_setup_cascaded_irq(p); | ||
512 | if (ret) { | ||
513 | dev_err(dev, "failed to setup cascaded IRQ\n"); | ||
514 | return ret; | ||
515 | } | ||
516 | } | ||
517 | |||
518 | ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); | ||
519 | if (ret) | ||
520 | return ret; | ||
521 | |||
522 | pci_scan_child_bus(p->bus); | ||
523 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | ||
524 | pci_bus_assign_resources(p->bus); | ||
525 | pci_bus_add_devices(p->bus); | ||
526 | pci_free_resource_list(&res); | ||
527 | |||
528 | return 0; | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * We encode bridge variants here, we have at least two so it doesn't | ||
533 | * hurt to have infrastructure to encompass future variants as well. | ||
534 | */ | ||
535 | const struct faraday_pci_variant faraday_regular = { | ||
536 | .cascaded_irq = true, | ||
537 | }; | ||
538 | |||
539 | const struct faraday_pci_variant faraday_dual = { | ||
540 | .cascaded_irq = false, | ||
541 | }; | ||
542 | |||
543 | static const struct of_device_id faraday_pci_of_match[] = { | ||
544 | { | ||
545 | .compatible = "faraday,ftpci100", | ||
546 | .data = &faraday_regular, | ||
547 | }, | ||
548 | { | ||
549 | .compatible = "faraday,ftpci100-dual", | ||
550 | .data = &faraday_dual, | ||
551 | }, | ||
552 | {}, | ||
553 | }; | ||
554 | |||
555 | static struct platform_driver faraday_pci_driver = { | ||
556 | .driver = { | ||
557 | .name = "ftpci100", | ||
558 | .of_match_table = of_match_ptr(faraday_pci_of_match), | ||
559 | .suppress_bind_attrs = true, | ||
560 | }, | ||
561 | .probe = faraday_pci_probe, | ||
562 | }; | ||
563 | builtin_platform_driver(faraday_pci_driver); | ||
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c index c05ea9d72f69..7d709a7e0aa8 100644 --- a/drivers/pci/host/pci-host-generic.c +++ b/drivers/pci/host/pci-host-generic.c | |||
@@ -60,6 +60,7 @@ static struct platform_driver gen_pci_driver = { | |||
60 | .driver = { | 60 | .driver = { |
61 | .name = "pci-host-generic", | 61 | .name = "pci-host-generic", |
62 | .of_match_table = gen_pci_of_match, | 62 | .of_match_table = gen_pci_of_match, |
63 | .suppress_bind_attrs = true, | ||
63 | }, | 64 | }, |
64 | .probe = gen_pci_probe, | 65 | .probe = gen_pci_probe, |
65 | }; | 66 | }; |
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index ada98569b78e..84936383e269 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <asm/apic.h> | 56 | #include <asm/apic.h> |
57 | #include <linux/msi.h> | 57 | #include <linux/msi.h> |
58 | #include <linux/hyperv.h> | 58 | #include <linux/hyperv.h> |
59 | #include <linux/refcount.h> | ||
59 | #include <asm/mshyperv.h> | 60 | #include <asm/mshyperv.h> |
60 | 61 | ||
61 | /* | 62 | /* |
@@ -72,6 +73,7 @@ enum { | |||
72 | PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 | 73 | PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 |
73 | }; | 74 | }; |
74 | 75 | ||
76 | #define CPU_AFFINITY_ALL -1ULL | ||
75 | #define PCI_CONFIG_MMIO_LENGTH 0x2000 | 77 | #define PCI_CONFIG_MMIO_LENGTH 0x2000 |
76 | #define CFG_PAGE_OFFSET 0x1000 | 78 | #define CFG_PAGE_OFFSET 0x1000 |
77 | #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) | 79 | #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) |
@@ -350,6 +352,7 @@ enum hv_pcibus_state { | |||
350 | hv_pcibus_init = 0, | 352 | hv_pcibus_init = 0, |
351 | hv_pcibus_probed, | 353 | hv_pcibus_probed, |
352 | hv_pcibus_installed, | 354 | hv_pcibus_installed, |
355 | hv_pcibus_removed, | ||
353 | hv_pcibus_maximum | 356 | hv_pcibus_maximum |
354 | }; | 357 | }; |
355 | 358 | ||
@@ -421,7 +424,7 @@ enum hv_pcidev_ref_reason { | |||
421 | struct hv_pci_dev { | 424 | struct hv_pci_dev { |
422 | /* List protected by pci_rescan_remove_lock */ | 425 | /* List protected by pci_rescan_remove_lock */ |
423 | struct list_head list_entry; | 426 | struct list_head list_entry; |
424 | atomic_t refs; | 427 | refcount_t refs; |
425 | enum hv_pcichild_state state; | 428 | enum hv_pcichild_state state; |
426 | struct pci_function_description desc; | 429 | struct pci_function_description desc; |
427 | bool reported_missing; | 430 | bool reported_missing; |
@@ -876,7 +879,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
876 | hv_int_desc_free(hpdev, int_desc); | 879 | hv_int_desc_free(hpdev, int_desc); |
877 | } | 880 | } |
878 | 881 | ||
879 | int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL); | 882 | int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); |
880 | if (!int_desc) | 883 | if (!int_desc) |
881 | goto drop_reference; | 884 | goto drop_reference; |
882 | 885 | ||
@@ -897,9 +900,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
897 | * processors because Hyper-V only supports 64 in a guest. | 900 | * processors because Hyper-V only supports 64 in a guest. |
898 | */ | 901 | */ |
899 | affinity = irq_data_get_affinity_mask(data); | 902 | affinity = irq_data_get_affinity_mask(data); |
900 | for_each_cpu_and(cpu, affinity, cpu_online_mask) { | 903 | if (cpumask_weight(affinity) >= 32) { |
901 | int_pkt->int_desc.cpu_mask |= | 904 | int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; |
902 | (1ULL << vmbus_cpu_number_to_vp_number(cpu)); | 905 | } else { |
906 | for_each_cpu_and(cpu, affinity, cpu_online_mask) { | ||
907 | int_pkt->int_desc.cpu_mask |= | ||
908 | (1ULL << vmbus_cpu_number_to_vp_number(cpu)); | ||
909 | } | ||
903 | } | 910 | } |
904 | 911 | ||
905 | ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, | 912 | ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, |
@@ -1208,9 +1215,11 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) | |||
1208 | hbus->pci_bus->msi = &hbus->msi_chip; | 1215 | hbus->pci_bus->msi = &hbus->msi_chip; |
1209 | hbus->pci_bus->msi->dev = &hbus->hdev->device; | 1216 | hbus->pci_bus->msi->dev = &hbus->hdev->device; |
1210 | 1217 | ||
1218 | pci_lock_rescan_remove(); | ||
1211 | pci_scan_child_bus(hbus->pci_bus); | 1219 | pci_scan_child_bus(hbus->pci_bus); |
1212 | pci_bus_assign_resources(hbus->pci_bus); | 1220 | pci_bus_assign_resources(hbus->pci_bus); |
1213 | pci_bus_add_devices(hbus->pci_bus); | 1221 | pci_bus_add_devices(hbus->pci_bus); |
1222 | pci_unlock_rescan_remove(); | ||
1214 | hbus->state = hv_pcibus_installed; | 1223 | hbus->state = hv_pcibus_installed; |
1215 | return 0; | 1224 | return 0; |
1216 | } | 1225 | } |
@@ -1254,13 +1263,13 @@ static void q_resource_requirements(void *context, struct pci_response *resp, | |||
1254 | static void get_pcichild(struct hv_pci_dev *hpdev, | 1263 | static void get_pcichild(struct hv_pci_dev *hpdev, |
1255 | enum hv_pcidev_ref_reason reason) | 1264 | enum hv_pcidev_ref_reason reason) |
1256 | { | 1265 | { |
1257 | atomic_inc(&hpdev->refs); | 1266 | refcount_inc(&hpdev->refs); |
1258 | } | 1267 | } |
1259 | 1268 | ||
1260 | static void put_pcichild(struct hv_pci_dev *hpdev, | 1269 | static void put_pcichild(struct hv_pci_dev *hpdev, |
1261 | enum hv_pcidev_ref_reason reason) | 1270 | enum hv_pcidev_ref_reason reason) |
1262 | { | 1271 | { |
1263 | if (atomic_dec_and_test(&hpdev->refs)) | 1272 | if (refcount_dec_and_test(&hpdev->refs)) |
1264 | kfree(hpdev); | 1273 | kfree(hpdev); |
1265 | } | 1274 | } |
1266 | 1275 | ||
@@ -1314,7 +1323,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, | |||
1314 | wait_for_completion(&comp_pkt.host_event); | 1323 | wait_for_completion(&comp_pkt.host_event); |
1315 | 1324 | ||
1316 | hpdev->desc = *desc; | 1325 | hpdev->desc = *desc; |
1317 | get_pcichild(hpdev, hv_pcidev_ref_initial); | 1326 | refcount_set(&hpdev->refs, 1); |
1318 | get_pcichild(hpdev, hv_pcidev_ref_childlist); | 1327 | get_pcichild(hpdev, hv_pcidev_ref_childlist); |
1319 | spin_lock_irqsave(&hbus->device_list_lock, flags); | 1328 | spin_lock_irqsave(&hbus->device_list_lock, flags); |
1320 | 1329 | ||
@@ -1504,13 +1513,24 @@ static void pci_devices_present_work(struct work_struct *work) | |||
1504 | put_pcichild(hpdev, hv_pcidev_ref_initial); | 1513 | put_pcichild(hpdev, hv_pcidev_ref_initial); |
1505 | } | 1514 | } |
1506 | 1515 | ||
1507 | /* Tell the core to rescan bus because there may have been changes. */ | 1516 | switch(hbus->state) { |
1508 | if (hbus->state == hv_pcibus_installed) { | 1517 | case hv_pcibus_installed: |
1518 | /* | ||
1519 | * Tell the core to rescan bus | ||
1520 | * because there may have been changes. | ||
1521 | */ | ||
1509 | pci_lock_rescan_remove(); | 1522 | pci_lock_rescan_remove(); |
1510 | pci_scan_child_bus(hbus->pci_bus); | 1523 | pci_scan_child_bus(hbus->pci_bus); |
1511 | pci_unlock_rescan_remove(); | 1524 | pci_unlock_rescan_remove(); |
1512 | } else { | 1525 | break; |
1526 | |||
1527 | case hv_pcibus_init: | ||
1528 | case hv_pcibus_probed: | ||
1513 | survey_child_resources(hbus); | 1529 | survey_child_resources(hbus); |
1530 | break; | ||
1531 | |||
1532 | default: | ||
1533 | break; | ||
1514 | } | 1534 | } |
1515 | 1535 | ||
1516 | up(&hbus->enum_sem); | 1536 | up(&hbus->enum_sem); |
@@ -1600,8 +1620,10 @@ static void hv_eject_device_work(struct work_struct *work) | |||
1600 | pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, | 1620 | pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, |
1601 | wslot); | 1621 | wslot); |
1602 | if (pdev) { | 1622 | if (pdev) { |
1623 | pci_lock_rescan_remove(); | ||
1603 | pci_stop_and_remove_bus_device(pdev); | 1624 | pci_stop_and_remove_bus_device(pdev); |
1604 | pci_dev_put(pdev); | 1625 | pci_dev_put(pdev); |
1626 | pci_unlock_rescan_remove(); | ||
1605 | } | 1627 | } |
1606 | 1628 | ||
1607 | spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); | 1629 | spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); |
@@ -2185,6 +2207,7 @@ static int hv_pci_probe(struct hv_device *hdev, | |||
2185 | hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); | 2207 | hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); |
2186 | if (!hbus) | 2208 | if (!hbus) |
2187 | return -ENOMEM; | 2209 | return -ENOMEM; |
2210 | hbus->state = hv_pcibus_init; | ||
2188 | 2211 | ||
2189 | /* | 2212 | /* |
2190 | * The PCI bus "domain" is what is called "segment" in ACPI and | 2213 | * The PCI bus "domain" is what is called "segment" in ACPI and |
@@ -2348,6 +2371,7 @@ static int hv_pci_remove(struct hv_device *hdev) | |||
2348 | pci_stop_root_bus(hbus->pci_bus); | 2371 | pci_stop_root_bus(hbus->pci_bus); |
2349 | pci_remove_root_bus(hbus->pci_bus); | 2372 | pci_remove_root_bus(hbus->pci_bus); |
2350 | pci_unlock_rescan_remove(); | 2373 | pci_unlock_rescan_remove(); |
2374 | hbus->state = hv_pcibus_removed; | ||
2351 | } | 2375 | } |
2352 | 2376 | ||
2353 | hv_pci_bus_exit(hdev); | 2377 | hv_pci_bus_exit(hdev); |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index cd7d51988738..f353a6eb2f01 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -752,10 +752,11 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, | |||
752 | * If the mask is 0xffff0000, then we only want to write | 752 | * If the mask is 0xffff0000, then we only want to write |
753 | * the link control register, rather than clearing the | 753 | * the link control register, rather than clearing the |
754 | * RW1C bits in the link status register. Mask out the | 754 | * RW1C bits in the link status register. Mask out the |
755 | * status register bits. | 755 | * RW1C status register bits. |
756 | */ | 756 | */ |
757 | if (mask == 0xffff0000) | 757 | if (mask == 0xffff0000) |
758 | value &= 0xffff; | 758 | value &= ~((PCI_EXP_LNKSTA_LABS | |
759 | PCI_EXP_LNKSTA_LBMS) << 16); | ||
759 | 760 | ||
760 | mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); | 761 | mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); |
761 | break; | 762 | break; |
@@ -1005,22 +1006,6 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn, | |||
1005 | return -ENOENT; | 1006 | return -ENOENT; |
1006 | } | 1007 | } |
1007 | 1008 | ||
1008 | static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie) | ||
1009 | { | ||
1010 | struct device_node *msi_node; | ||
1011 | |||
1012 | msi_node = of_parse_phandle(pcie->pdev->dev.of_node, | ||
1013 | "msi-parent", 0); | ||
1014 | if (!msi_node) | ||
1015 | return; | ||
1016 | |||
1017 | pcie->msi = of_pci_find_msi_chip_by_node(msi_node); | ||
1018 | of_node_put(msi_node); | ||
1019 | |||
1020 | if (pcie->msi) | ||
1021 | pcie->msi->dev = &pcie->pdev->dev; | ||
1022 | } | ||
1023 | |||
1024 | #ifdef CONFIG_PM_SLEEP | 1009 | #ifdef CONFIG_PM_SLEEP |
1025 | static int mvebu_pcie_suspend(struct device *dev) | 1010 | static int mvebu_pcie_suspend(struct device *dev) |
1026 | { | 1011 | { |
@@ -1298,7 +1283,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev) | |||
1298 | for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K) | 1283 | for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K) |
1299 | pci_ioremap_io(i, pcie->io.start + i); | 1284 | pci_ioremap_io(i, pcie->io.start + i); |
1300 | 1285 | ||
1301 | mvebu_pcie_msi_enable(pcie); | ||
1302 | mvebu_pcie_enable(pcie); | 1286 | mvebu_pcie_enable(pcie); |
1303 | 1287 | ||
1304 | platform_set_drvdata(pdev, pcie); | 1288 | platform_set_drvdata(pdev, pcie); |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index ed8a93f2bfb5..2618f875a600 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
@@ -380,7 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie, | |||
380 | unsigned int busnr) | 380 | unsigned int busnr) |
381 | { | 381 | { |
382 | struct device *dev = pcie->dev; | 382 | struct device *dev = pcie->dev; |
383 | pgprot_t prot = pgprot_device(PAGE_KERNEL); | 383 | pgprot_t prot = pgprot_noncached(PAGE_KERNEL); |
384 | phys_addr_t cs = pcie->cs->start; | 384 | phys_addr_t cs = pcie->cs->start; |
385 | struct tegra_pcie_bus *bus; | 385 | struct tegra_pcie_bus *bus; |
386 | unsigned int i; | 386 | unsigned int i; |
@@ -1962,7 +1962,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) | |||
1962 | rp->pcie = pcie; | 1962 | rp->pcie = pcie; |
1963 | rp->np = port; | 1963 | rp->np = port; |
1964 | 1964 | ||
1965 | rp->base = devm_ioremap_resource(dev, &rp->regs); | 1965 | rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); |
1966 | if (IS_ERR(rp->base)) | 1966 | if (IS_ERR(rp->base)) |
1967 | return PTR_ERR(rp->base); | 1967 | return PTR_ERR(rp->base); |
1968 | 1968 | ||
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c index 3f54a43bbbea..fc0ca03f280e 100644 --- a/drivers/pci/host/pci-thunder-ecam.c +++ b/drivers/pci/host/pci-thunder-ecam.c | |||
@@ -373,6 +373,7 @@ static struct platform_driver thunder_ecam_driver = { | |||
373 | .driver = { | 373 | .driver = { |
374 | .name = KBUILD_MODNAME, | 374 | .name = KBUILD_MODNAME, |
375 | .of_match_table = thunder_ecam_of_match, | 375 | .of_match_table = thunder_ecam_of_match, |
376 | .suppress_bind_attrs = true, | ||
376 | }, | 377 | }, |
377 | .probe = thunder_ecam_probe, | 378 | .probe = thunder_ecam_probe, |
378 | }; | 379 | }; |
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index 6e031b522529..6e066f8b74df 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c | |||
@@ -474,6 +474,7 @@ static struct platform_driver thunder_pem_driver = { | |||
474 | .driver = { | 474 | .driver = { |
475 | .name = KBUILD_MODNAME, | 475 | .name = KBUILD_MODNAME, |
476 | .of_match_table = thunder_pem_of_match, | 476 | .of_match_table = thunder_pem_of_match, |
477 | .suppress_bind_attrs = true, | ||
477 | }, | 478 | }, |
478 | .probe = thunder_pem_probe, | 479 | .probe = thunder_pem_probe, |
479 | }; | 480 | }; |
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index 5ebee7d37ff5..9281eee2d000 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c | |||
@@ -138,7 +138,8 @@ static int versatile_pci_probe(struct platform_device *pdev) | |||
138 | return PTR_ERR(versatile_cfg_base[0]); | 138 | return PTR_ERR(versatile_cfg_base[0]); |
139 | 139 | ||
140 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | 140 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); |
141 | versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res); | 141 | versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev, |
142 | res); | ||
142 | if (IS_ERR(versatile_cfg_base[1])) | 143 | if (IS_ERR(versatile_cfg_base[1])) |
143 | return PTR_ERR(versatile_cfg_base[1]); | 144 | return PTR_ERR(versatile_cfg_base[1]); |
144 | 145 | ||
@@ -221,6 +222,7 @@ static struct platform_driver versatile_pci_driver = { | |||
221 | .driver = { | 222 | .driver = { |
222 | .name = "versatile-pci", | 223 | .name = "versatile-pci", |
223 | .of_match_table = versatile_pci_of_match, | 224 | .of_match_table = versatile_pci_of_match, |
225 | .suppress_bind_attrs = true, | ||
224 | }, | 226 | }, |
225 | .probe = versatile_pci_probe, | 227 | .probe = versatile_pci_probe, |
226 | }; | 228 | }; |
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 1a6108788f6f..8cae013e7188 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c | |||
@@ -248,7 +248,7 @@ static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion) | |||
248 | dev_err(dev, "can't get CSR resource\n"); | 248 | dev_err(dev, "can't get CSR resource\n"); |
249 | return ret; | 249 | return ret; |
250 | } | 250 | } |
251 | port->csr_base = devm_ioremap_resource(dev, &csr); | 251 | port->csr_base = devm_pci_remap_cfg_resource(dev, &csr); |
252 | if (IS_ERR(port->csr_base)) | 252 | if (IS_ERR(port->csr_base)) |
253 | return PTR_ERR(port->csr_base); | 253 | return PTR_ERR(port->csr_base); |
254 | 254 | ||
@@ -359,7 +359,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port, | |||
359 | struct resource *res; | 359 | struct resource *res; |
360 | 360 | ||
361 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); | 361 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); |
362 | port->csr_base = devm_ioremap_resource(dev, res); | 362 | port->csr_base = devm_pci_remap_cfg_resource(dev, res); |
363 | if (IS_ERR(port->csr_base)) | 363 | if (IS_ERR(port->csr_base)) |
364 | return PTR_ERR(port->csr_base); | 364 | return PTR_ERR(port->csr_base); |
365 | 365 | ||
@@ -697,6 +697,7 @@ static struct platform_driver xgene_pcie_driver = { | |||
697 | .driver = { | 697 | .driver = { |
698 | .name = "xgene-pcie", | 698 | .name = "xgene-pcie", |
699 | .of_match_table = of_match_ptr(xgene_pcie_match_table), | 699 | .of_match_table = of_match_ptr(xgene_pcie_match_table), |
700 | .suppress_bind_attrs = true, | ||
700 | }, | 701 | }, |
701 | .probe = xgene_pcie_probe_bridge, | 702 | .probe = xgene_pcie_probe_bridge, |
702 | }; | 703 | }; |
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index 8c6a327ca6cd..90d2bdd94e41 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c | |||
@@ -67,7 +67,8 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) | |||
67 | return ret; | 67 | return ret; |
68 | } | 68 | } |
69 | 69 | ||
70 | pcie->base = devm_ioremap(dev, reg.start, resource_size(®)); | 70 | pcie->base = devm_pci_remap_cfgspace(dev, reg.start, |
71 | resource_size(®)); | ||
71 | if (!pcie->base) { | 72 | if (!pcie->base) { |
72 | dev_err(dev, "unable to map controller registers\n"); | 73 | dev_err(dev, "unable to map controller registers\n"); |
73 | return -ENOMEM; | 74 | return -ENOMEM; |
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index 26ddd3535272..0e020b6e0943 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/irqdomain.h> | 26 | #include <linux/irqdomain.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/mfd/syscon.h> | 28 | #include <linux/mfd/syscon.h> |
29 | #include <linux/module.h> | ||
29 | #include <linux/of_address.h> | 30 | #include <linux/of_address.h> |
30 | #include <linux/of_device.h> | 31 | #include <linux/of_device.h> |
31 | #include <linux/of_pci.h> | 32 | #include <linux/of_pci.h> |
@@ -223,9 +224,11 @@ struct rockchip_pcie { | |||
223 | int link_gen; | 224 | int link_gen; |
224 | struct device *dev; | 225 | struct device *dev; |
225 | struct irq_domain *irq_domain; | 226 | struct irq_domain *irq_domain; |
226 | u32 io_size; | ||
227 | int offset; | 227 | int offset; |
228 | struct pci_bus *root_bus; | ||
229 | struct resource *io; | ||
228 | phys_addr_t io_bus_addr; | 230 | phys_addr_t io_bus_addr; |
231 | u32 io_size; | ||
229 | void __iomem *msg_region; | 232 | void __iomem *msg_region; |
230 | u32 mem_size; | 233 | u32 mem_size; |
231 | phys_addr_t msg_bus_addr; | 234 | phys_addr_t msg_bus_addr; |
@@ -425,7 +428,8 @@ static struct pci_ops rockchip_pcie_ops = { | |||
425 | 428 | ||
426 | static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) | 429 | static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) |
427 | { | 430 | { |
428 | u32 status, curr, scale, power; | 431 | int curr; |
432 | u32 status, scale, power; | ||
429 | 433 | ||
430 | if (IS_ERR(rockchip->vpcie3v3)) | 434 | if (IS_ERR(rockchip->vpcie3v3)) |
431 | return; | 435 | return; |
@@ -437,24 +441,25 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) | |||
437 | * to the actual power supply. | 441 | * to the actual power supply. |
438 | */ | 442 | */ |
439 | curr = regulator_get_current_limit(rockchip->vpcie3v3); | 443 | curr = regulator_get_current_limit(rockchip->vpcie3v3); |
440 | if (curr > 0) { | 444 | if (curr <= 0) |
441 | scale = 3; /* 0.001x */ | 445 | return; |
442 | curr = curr / 1000; /* convert to mA */ | ||
443 | power = (curr * 3300) / 1000; /* milliwatt */ | ||
444 | while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { | ||
445 | if (!scale) { | ||
446 | dev_warn(rockchip->dev, "invalid power supply\n"); | ||
447 | return; | ||
448 | } | ||
449 | scale--; | ||
450 | power = power / 10; | ||
451 | } | ||
452 | 446 | ||
453 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); | 447 | scale = 3; /* 0.001x */ |
454 | status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | | 448 | curr = curr / 1000; /* convert to mA */ |
455 | (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); | 449 | power = (curr * 3300) / 1000; /* milliwatt */ |
456 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); | 450 | while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { |
451 | if (!scale) { | ||
452 | dev_warn(rockchip->dev, "invalid power supply\n"); | ||
453 | return; | ||
454 | } | ||
455 | scale--; | ||
456 | power = power / 10; | ||
457 | } | 457 | } |
458 | |||
459 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); | ||
460 | status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | | ||
461 | (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); | ||
462 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); | ||
458 | } | 463 | } |
459 | 464 | ||
460 | /** | 465 | /** |
@@ -596,7 +601,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) | |||
596 | 601 | ||
597 | /* Set RC's clock architecture as common clock */ | 602 | /* Set RC's clock architecture as common clock */ |
598 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); | 603 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); |
599 | status |= PCI_EXP_LNKCTL_CCC; | 604 | status |= PCI_EXP_LNKSTA_SLC << 16; |
605 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); | ||
606 | |||
607 | /* Set RC's RCB to 128 */ | ||
608 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); | ||
609 | status |= PCI_EXP_LNKCTL_RCB; | ||
600 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); | 610 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); |
601 | 611 | ||
602 | /* Enable Gen1 training */ | 612 | /* Enable Gen1 training */ |
@@ -822,7 +832,7 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) | |||
822 | regs = platform_get_resource_byname(pdev, | 832 | regs = platform_get_resource_byname(pdev, |
823 | IORESOURCE_MEM, | 833 | IORESOURCE_MEM, |
824 | "axi-base"); | 834 | "axi-base"); |
825 | rockchip->reg_base = devm_ioremap_resource(dev, regs); | 835 | rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs); |
826 | if (IS_ERR(rockchip->reg_base)) | 836 | if (IS_ERR(rockchip->reg_base)) |
827 | return PTR_ERR(rockchip->reg_base); | 837 | return PTR_ERR(rockchip->reg_base); |
828 | 838 | ||
@@ -1359,6 +1369,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) | |||
1359 | err, io); | 1369 | err, io); |
1360 | continue; | 1370 | continue; |
1361 | } | 1371 | } |
1372 | rockchip->io = io; | ||
1362 | break; | 1373 | break; |
1363 | case IORESOURCE_MEM: | 1374 | case IORESOURCE_MEM: |
1364 | mem = win->res; | 1375 | mem = win->res; |
@@ -1390,6 +1401,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) | |||
1390 | err = -ENOMEM; | 1401 | err = -ENOMEM; |
1391 | goto err_free_res; | 1402 | goto err_free_res; |
1392 | } | 1403 | } |
1404 | rockchip->root_bus = bus; | ||
1393 | 1405 | ||
1394 | pci_bus_size_bridges(bus); | 1406 | pci_bus_size_bridges(bus); |
1395 | pci_bus_assign_resources(bus); | 1407 | pci_bus_assign_resources(bus); |
@@ -1397,7 +1409,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) | |||
1397 | pcie_bus_configure_settings(child); | 1409 | pcie_bus_configure_settings(child); |
1398 | 1410 | ||
1399 | pci_bus_add_devices(bus); | 1411 | pci_bus_add_devices(bus); |
1400 | return err; | 1412 | return 0; |
1401 | 1413 | ||
1402 | err_free_res: | 1414 | err_free_res: |
1403 | pci_free_resource_list(&res); | 1415 | pci_free_resource_list(&res); |
@@ -1420,6 +1432,34 @@ err_aclk_pcie: | |||
1420 | return err; | 1432 | return err; |
1421 | } | 1433 | } |
1422 | 1434 | ||
1435 | static int rockchip_pcie_remove(struct platform_device *pdev) | ||
1436 | { | ||
1437 | struct device *dev = &pdev->dev; | ||
1438 | struct rockchip_pcie *rockchip = dev_get_drvdata(dev); | ||
1439 | |||
1440 | pci_stop_root_bus(rockchip->root_bus); | ||
1441 | pci_remove_root_bus(rockchip->root_bus); | ||
1442 | pci_unmap_iospace(rockchip->io); | ||
1443 | irq_domain_remove(rockchip->irq_domain); | ||
1444 | |||
1445 | phy_power_off(rockchip->phy); | ||
1446 | phy_exit(rockchip->phy); | ||
1447 | |||
1448 | clk_disable_unprepare(rockchip->clk_pcie_pm); | ||
1449 | clk_disable_unprepare(rockchip->hclk_pcie); | ||
1450 | clk_disable_unprepare(rockchip->aclk_perf_pcie); | ||
1451 | clk_disable_unprepare(rockchip->aclk_pcie); | ||
1452 | |||
1453 | if (!IS_ERR(rockchip->vpcie3v3)) | ||
1454 | regulator_disable(rockchip->vpcie3v3); | ||
1455 | if (!IS_ERR(rockchip->vpcie1v8)) | ||
1456 | regulator_disable(rockchip->vpcie1v8); | ||
1457 | if (!IS_ERR(rockchip->vpcie0v9)) | ||
1458 | regulator_disable(rockchip->vpcie0v9); | ||
1459 | |||
1460 | return 0; | ||
1461 | } | ||
1462 | |||
1423 | static const struct dev_pm_ops rockchip_pcie_pm_ops = { | 1463 | static const struct dev_pm_ops rockchip_pcie_pm_ops = { |
1424 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, | 1464 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, |
1425 | rockchip_pcie_resume_noirq) | 1465 | rockchip_pcie_resume_noirq) |
@@ -1429,6 +1469,7 @@ static const struct of_device_id rockchip_pcie_of_match[] = { | |||
1429 | { .compatible = "rockchip,rk3399-pcie", }, | 1469 | { .compatible = "rockchip,rk3399-pcie", }, |
1430 | {} | 1470 | {} |
1431 | }; | 1471 | }; |
1472 | MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); | ||
1432 | 1473 | ||
1433 | static struct platform_driver rockchip_pcie_driver = { | 1474 | static struct platform_driver rockchip_pcie_driver = { |
1434 | .driver = { | 1475 | .driver = { |
@@ -1437,6 +1478,10 @@ static struct platform_driver rockchip_pcie_driver = { | |||
1437 | .pm = &rockchip_pcie_pm_ops, | 1478 | .pm = &rockchip_pcie_pm_ops, |
1438 | }, | 1479 | }, |
1439 | .probe = rockchip_pcie_probe, | 1480 | .probe = rockchip_pcie_probe, |
1440 | 1481 | .remove = rockchip_pcie_remove, | |
1441 | }; | 1482 | }; |
1442 | builtin_platform_driver(rockchip_pcie_driver); | 1483 | module_platform_driver(rockchip_pcie_driver); |
1484 | |||
1485 | MODULE_AUTHOR("Rockchip Inc"); | ||
1486 | MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); | ||
1487 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 4c3e0ab35496..4b16b26ae909 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c | |||
@@ -761,7 +761,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, | |||
761 | pcie->phys_pcie_reg_base = res->start; | 761 | pcie->phys_pcie_reg_base = res->start; |
762 | 762 | ||
763 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); | 763 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); |
764 | pcie->ecam_base = devm_ioremap_resource(dev, res); | 764 | pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res); |
765 | if (IS_ERR(pcie->ecam_base)) | 765 | if (IS_ERR(pcie->ecam_base)) |
766 | return PTR_ERR(pcie->ecam_base); | 766 | return PTR_ERR(pcie->ecam_base); |
767 | pcie->phys_ecam_base = res->start; | 767 | pcie->phys_ecam_base = res->start; |
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index 7f030f5d750b..2fe2df51f9f8 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c | |||
@@ -606,7 +606,7 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) | |||
606 | return err; | 606 | return err; |
607 | } | 607 | } |
608 | 608 | ||
609 | port->reg_base = devm_ioremap_resource(dev, ®s); | 609 | port->reg_base = devm_pci_remap_cfg_resource(dev, ®s); |
610 | if (IS_ERR(port->reg_base)) | 610 | if (IS_ERR(port->reg_base)) |
611 | return PTR_ERR(port->reg_base); | 611 | return PTR_ERR(port->reg_base); |
612 | 612 | ||
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 9e69403be632..19f30a9f461d 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -109,6 +109,12 @@ int pciehp_unconfigure_device(struct slot *p_slot) | |||
109 | break; | 109 | break; |
110 | } | 110 | } |
111 | } | 111 | } |
112 | if (!presence) { | ||
113 | pci_dev_set_disconnected(dev, NULL); | ||
114 | if (pci_has_subordinate(dev)) | ||
115 | pci_walk_bus(dev->subordinate, | ||
116 | pci_dev_set_disconnected, NULL); | ||
117 | } | ||
112 | pci_stop_and_remove_bus_device(dev); | 118 | pci_stop_and_remove_bus_device(dev); |
113 | /* | 119 | /* |
114 | * Ensure that no new Requests will be generated from | 120 | * Ensure that no new Requests will be generated from |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 2479ae876482..d9dc7363ac77 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -450,6 +450,7 @@ found: | |||
450 | iov->total_VFs = total; | 450 | iov->total_VFs = total; |
451 | iov->pgsz = pgsz; | 451 | iov->pgsz = pgsz; |
452 | iov->self = dev; | 452 | iov->self = dev; |
453 | iov->drivers_autoprobe = true; | ||
453 | pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); | 454 | pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); |
454 | pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); | 455 | pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); |
455 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) | 456 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) |
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index f9f2a0324ecc..83d30953ce19 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * PCI IRQ failure handing code | 2 | * PCI IRQ handling code |
3 | * | 3 | * |
4 | * Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> | 4 | * Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> |
5 | * Copyright (C) 2017 Christoph Hellwig. | ||
5 | */ | 6 | */ |
6 | 7 | ||
7 | #include <linux/acpi.h> | 8 | #include <linux/acpi.h> |
@@ -59,3 +60,61 @@ enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev) | |||
59 | return PCI_LOST_IRQ_NO_INFORMATION; | 60 | return PCI_LOST_IRQ_NO_INFORMATION; |
60 | } | 61 | } |
61 | EXPORT_SYMBOL(pci_lost_interrupt); | 62 | EXPORT_SYMBOL(pci_lost_interrupt); |
63 | |||
64 | /** | ||
65 | * pci_request_irq - allocate an interrupt line for a PCI device | ||
66 | * @dev: PCI device to operate on | ||
67 | * @nr: device-relative interrupt vector index (0-based). | ||
68 | * @handler: Function to be called when the IRQ occurs. | ||
69 | * Primary handler for threaded interrupts. | ||
70 | * If NULL and thread_fn != NULL the default primary handler is | ||
71 | * installed. | ||
72 | * @thread_fn: Function called from the IRQ handler thread | ||
73 | * If NULL, no IRQ thread is created | ||
74 | * @dev_id: Cookie passed back to the handler function | ||
75 | * @fmt: Printf-like format string naming the handler | ||
76 | * | ||
77 | * This call allocates interrupt resources and enables the interrupt line and | ||
78 | * IRQ handling. From the point this call is made @handler and @thread_fn may | ||
79 | * be invoked. All interrupts requested using this function might be shared. | ||
80 | * | ||
81 | * @dev_id must not be NULL and must be globally unique. | ||
82 | */ | ||
83 | int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, | ||
84 | irq_handler_t thread_fn, void *dev_id, const char *fmt, ...) | ||
85 | { | ||
86 | va_list ap; | ||
87 | int ret; | ||
88 | char *devname; | ||
89 | |||
90 | va_start(ap, fmt); | ||
91 | devname = kvasprintf(GFP_KERNEL, fmt, ap); | ||
92 | va_end(ap); | ||
93 | |||
94 | ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn, | ||
95 | IRQF_SHARED, devname, dev_id); | ||
96 | if (ret) | ||
97 | kfree(devname); | ||
98 | return ret; | ||
99 | } | ||
100 | EXPORT_SYMBOL(pci_request_irq); | ||
101 | |||
102 | /** | ||
103 | * pci_free_irq - free an interrupt allocated with pci_request_irq | ||
104 | * @dev: PCI device to operate on | ||
105 | * @nr: device-relative interrupt vector index (0-based). | ||
106 | * @dev_id: Device identity to free | ||
107 | * | ||
108 | * Remove an interrupt handler. The handler is removed and if the interrupt | ||
109 | * line is no longer in use by any driver it is disabled. The caller must | ||
110 | * ensure the interrupt is disabled on the device before calling this function. | ||
111 | * The function does not return until any executing interrupts for this IRQ | ||
112 | * have completed. | ||
113 | * | ||
114 | * This function must not be called from interrupt context. | ||
115 | */ | ||
116 | void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id) | ||
117 | { | ||
118 | kfree(free_irq(pci_irq_vector(dev, nr), dev_id)); | ||
119 | } | ||
120 | EXPORT_SYMBOL(pci_free_irq); | ||
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c new file mode 100644 index 000000000000..9a5e5a9055eb --- /dev/null +++ b/drivers/pci/mmap.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * mmap.c — generic PCI resource mmap helper | ||
3 | * | ||
4 | * Copyright © 2017 Amazon.com, Inc. or its affiliates. | ||
5 | * | ||
6 | * Author: David Woodhouse <dwmw2@infradead.org> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/pci.h> | ||
16 | |||
17 | #ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE | ||
18 | |||
19 | /* | ||
20 | * Modern setup: generic pci_mmap_resource_range(), and implement the legacy | ||
21 | * pci_mmap_page_range() (if needed) as a wrapper round it. | ||
22 | */ | ||
23 | |||
24 | #ifdef HAVE_PCI_MMAP | ||
25 | int pci_mmap_page_range(struct pci_dev *pdev, int bar, | ||
26 | struct vm_area_struct *vma, | ||
27 | enum pci_mmap_state mmap_state, int write_combine) | ||
28 | { | ||
29 | resource_size_t start, end; | ||
30 | |||
31 | pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end); | ||
32 | |||
33 | /* Adjust vm_pgoff to be the offset within the resource */ | ||
34 | vma->vm_pgoff -= start >> PAGE_SHIFT; | ||
35 | return pci_mmap_resource_range(pdev, bar, vma, mmap_state, | ||
36 | write_combine); | ||
37 | } | ||
38 | #endif | ||
39 | |||
40 | static const struct vm_operations_struct pci_phys_vm_ops = { | ||
41 | #ifdef CONFIG_HAVE_IOREMAP_PROT | ||
42 | .access = generic_access_phys, | ||
43 | #endif | ||
44 | }; | ||
45 | |||
46 | int pci_mmap_resource_range(struct pci_dev *pdev, int bar, | ||
47 | struct vm_area_struct *vma, | ||
48 | enum pci_mmap_state mmap_state, int write_combine) | ||
49 | { | ||
50 | unsigned long size; | ||
51 | int ret; | ||
52 | |||
53 | size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1; | ||
54 | if (vma->vm_pgoff + vma_pages(vma) > size) | ||
55 | return -EINVAL; | ||
56 | |||
57 | if (write_combine) | ||
58 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
59 | else | ||
60 | vma->vm_page_prot = pgprot_device(vma->vm_page_prot); | ||
61 | |||
62 | if (mmap_state == pci_mmap_io) { | ||
63 | ret = pci_iobar_pfn(pdev, bar, vma); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | } else | ||
67 | vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); | ||
68 | |||
69 | vma->vm_ops = &pci_phys_vm_ops; | ||
70 | |||
71 | return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
72 | vma->vm_end - vma->vm_start, | ||
73 | vma->vm_page_prot); | ||
74 | } | ||
75 | |||
76 | #elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */ | ||
77 | |||
78 | /* | ||
79 | * Legacy setup: Impement pci_mmap_resource_range() as a wrapper around | ||
80 | * the architecture's pci_mmap_page_range(), converting to "user visible" | ||
81 | * addresses as necessary. | ||
82 | */ | ||
83 | |||
84 | int pci_mmap_resource_range(struct pci_dev *pdev, int bar, | ||
85 | struct vm_area_struct *vma, | ||
86 | enum pci_mmap_state mmap_state, int write_combine) | ||
87 | { | ||
88 | resource_size_t start, end; | ||
89 | |||
90 | /* | ||
91 | * pci_mmap_page_range() expects the same kind of entry as coming | ||
92 | * from /proc/bus/pci/ which is a "user visible" value. If this is | ||
93 | * different from the resource itself, arch will do necessary fixup. | ||
94 | */ | ||
95 | pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end); | ||
96 | vma->vm_pgoff += start >> PAGE_SHIFT; | ||
97 | return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine); | ||
98 | } | ||
99 | #endif | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 0042c365b29b..ba44fdfda66b 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
@@ -298,7 +298,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | |||
298 | { | 298 | { |
299 | struct pci_dev *dev = msi_desc_to_pci_dev(entry); | 299 | struct pci_dev *dev = msi_desc_to_pci_dev(entry); |
300 | 300 | ||
301 | if (dev->current_state != PCI_D0) { | 301 | if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { |
302 | /* Don't touch the hardware now */ | 302 | /* Don't touch the hardware now */ |
303 | } else if (entry->msi_attrib.is_msix) { | 303 | } else if (entry->msi_attrib.is_msix) { |
304 | void __iomem *base = pci_msix_desc_addr(entry); | 304 | void __iomem *base = pci_msix_desc_addr(entry); |
@@ -541,7 +541,8 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) | |||
541 | if (affd) { | 541 | if (affd) { |
542 | masks = irq_create_affinity_masks(nvec, affd); | 542 | masks = irq_create_affinity_masks(nvec, affd); |
543 | if (!masks) | 543 | if (!masks) |
544 | pr_err("Unable to allocate affinity masks, ignoring\n"); | 544 | dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n", |
545 | nvec); | ||
545 | } | 546 | } |
546 | 547 | ||
547 | /* MSI Entry Initialization */ | 548 | /* MSI Entry Initialization */ |
@@ -681,7 +682,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | |||
681 | if (affd) { | 682 | if (affd) { |
682 | masks = irq_create_affinity_masks(nvec, affd); | 683 | masks = irq_create_affinity_masks(nvec, affd); |
683 | if (!masks) | 684 | if (!masks) |
684 | pr_err("Unable to allocate affinity masks, ignoring\n"); | 685 | dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n", |
686 | nvec); | ||
685 | } | 687 | } |
686 | 688 | ||
687 | for (i = 0, curmsk = masks; i < nvec; i++) { | 689 | for (i = 0, curmsk = masks; i < nvec; i++) { |
@@ -882,7 +884,7 @@ int pci_msi_vec_count(struct pci_dev *dev) | |||
882 | } | 884 | } |
883 | EXPORT_SYMBOL(pci_msi_vec_count); | 885 | EXPORT_SYMBOL(pci_msi_vec_count); |
884 | 886 | ||
885 | void pci_msi_shutdown(struct pci_dev *dev) | 887 | static void pci_msi_shutdown(struct pci_dev *dev) |
886 | { | 888 | { |
887 | struct msi_desc *desc; | 889 | struct msi_desc *desc; |
888 | u32 mask; | 890 | u32 mask; |
@@ -973,13 +975,18 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | |||
973 | return msix_capability_init(dev, entries, nvec, affd); | 975 | return msix_capability_init(dev, entries, nvec, affd); |
974 | } | 976 | } |
975 | 977 | ||
976 | void pci_msix_shutdown(struct pci_dev *dev) | 978 | static void pci_msix_shutdown(struct pci_dev *dev) |
977 | { | 979 | { |
978 | struct msi_desc *entry; | 980 | struct msi_desc *entry; |
979 | 981 | ||
980 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | 982 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
981 | return; | 983 | return; |
982 | 984 | ||
985 | if (pci_dev_is_disconnected(dev)) { | ||
986 | dev->msix_enabled = 0; | ||
987 | return; | ||
988 | } | ||
989 | |||
983 | /* Return the device with MSI-X masked as initial states */ | 990 | /* Return the device with MSI-X masked as initial states */ |
984 | for_each_pci_msi_entry(entry, dev) { | 991 | for_each_pci_msi_entry(entry, dev) { |
985 | /* Keep cached states to be restored */ | 992 | /* Keep cached states to be restored */ |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index afa72717a979..192e7b681b96 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -394,6 +394,18 @@ void __weak pcibios_free_irq(struct pci_dev *dev) | |||
394 | { | 394 | { |
395 | } | 395 | } |
396 | 396 | ||
397 | #ifdef CONFIG_PCI_IOV | ||
398 | static inline bool pci_device_can_probe(struct pci_dev *pdev) | ||
399 | { | ||
400 | return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe); | ||
401 | } | ||
402 | #else | ||
403 | static inline bool pci_device_can_probe(struct pci_dev *pdev) | ||
404 | { | ||
405 | return true; | ||
406 | } | ||
407 | #endif | ||
408 | |||
397 | static int pci_device_probe(struct device *dev) | 409 | static int pci_device_probe(struct device *dev) |
398 | { | 410 | { |
399 | int error; | 411 | int error; |
@@ -405,10 +417,12 @@ static int pci_device_probe(struct device *dev) | |||
405 | return error; | 417 | return error; |
406 | 418 | ||
407 | pci_dev_get(pci_dev); | 419 | pci_dev_get(pci_dev); |
408 | error = __pci_device_probe(drv, pci_dev); | 420 | if (pci_device_can_probe(pci_dev)) { |
409 | if (error) { | 421 | error = __pci_device_probe(drv, pci_dev); |
410 | pcibios_free_irq(pci_dev); | 422 | if (error) { |
411 | pci_dev_put(pci_dev); | 423 | pcibios_free_irq(pci_dev); |
424 | pci_dev_put(pci_dev); | ||
425 | } | ||
412 | } | 426 | } |
413 | 427 | ||
414 | return error; | 428 | return error; |
@@ -461,8 +475,6 @@ static void pci_device_shutdown(struct device *dev) | |||
461 | 475 | ||
462 | if (drv && drv->shutdown) | 476 | if (drv && drv->shutdown) |
463 | drv->shutdown(pci_dev); | 477 | drv->shutdown(pci_dev); |
464 | pci_msi_shutdown(pci_dev); | ||
465 | pci_msix_shutdown(pci_dev); | ||
466 | 478 | ||
467 | /* | 479 | /* |
468 | * If this is a kexec reboot, turn off Bus Master bit on the | 480 | * If this is a kexec reboot, turn off Bus Master bit on the |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 25d010d449a3..31e99613a12e 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -526,10 +526,37 @@ exit: | |||
526 | return count; | 526 | return count; |
527 | } | 527 | } |
528 | 528 | ||
529 | static ssize_t sriov_drivers_autoprobe_show(struct device *dev, | ||
530 | struct device_attribute *attr, | ||
531 | char *buf) | ||
532 | { | ||
533 | struct pci_dev *pdev = to_pci_dev(dev); | ||
534 | |||
535 | return sprintf(buf, "%u\n", pdev->sriov->drivers_autoprobe); | ||
536 | } | ||
537 | |||
538 | static ssize_t sriov_drivers_autoprobe_store(struct device *dev, | ||
539 | struct device_attribute *attr, | ||
540 | const char *buf, size_t count) | ||
541 | { | ||
542 | struct pci_dev *pdev = to_pci_dev(dev); | ||
543 | bool drivers_autoprobe; | ||
544 | |||
545 | if (kstrtobool(buf, &drivers_autoprobe) < 0) | ||
546 | return -EINVAL; | ||
547 | |||
548 | pdev->sriov->drivers_autoprobe = drivers_autoprobe; | ||
549 | |||
550 | return count; | ||
551 | } | ||
552 | |||
529 | static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); | 553 | static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs); |
530 | static struct device_attribute sriov_numvfs_attr = | 554 | static struct device_attribute sriov_numvfs_attr = |
531 | __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), | 555 | __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP), |
532 | sriov_numvfs_show, sriov_numvfs_store); | 556 | sriov_numvfs_show, sriov_numvfs_store); |
557 | static struct device_attribute sriov_drivers_autoprobe_attr = | ||
558 | __ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP), | ||
559 | sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store); | ||
533 | #endif /* CONFIG_PCI_IOV */ | 560 | #endif /* CONFIG_PCI_IOV */ |
534 | 561 | ||
535 | static ssize_t driver_override_store(struct device *dev, | 562 | static ssize_t driver_override_store(struct device *dev, |
@@ -980,20 +1007,24 @@ void pci_remove_legacy_files(struct pci_bus *b) | |||
980 | } | 1007 | } |
981 | #endif /* HAVE_PCI_LEGACY */ | 1008 | #endif /* HAVE_PCI_LEGACY */ |
982 | 1009 | ||
983 | #ifdef HAVE_PCI_MMAP | 1010 | #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) |
984 | 1011 | ||
985 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, | 1012 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, |
986 | enum pci_mmap_api mmap_api) | 1013 | enum pci_mmap_api mmap_api) |
987 | { | 1014 | { |
988 | unsigned long nr, start, size, pci_start; | 1015 | unsigned long nr, start, size; |
1016 | resource_size_t pci_start = 0, pci_end; | ||
989 | 1017 | ||
990 | if (pci_resource_len(pdev, resno) == 0) | 1018 | if (pci_resource_len(pdev, resno) == 0) |
991 | return 0; | 1019 | return 0; |
992 | nr = vma_pages(vma); | 1020 | nr = vma_pages(vma); |
993 | start = vma->vm_pgoff; | 1021 | start = vma->vm_pgoff; |
994 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; | 1022 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; |
995 | pci_start = (mmap_api == PCI_MMAP_PROCFS) ? | 1023 | if (mmap_api == PCI_MMAP_PROCFS) { |
996 | pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; | 1024 | pci_resource_to_user(pdev, resno, &pdev->resource[resno], |
1025 | &pci_start, &pci_end); | ||
1026 | pci_start >>= PAGE_SHIFT; | ||
1027 | } | ||
997 | if (start >= pci_start && start < pci_start + size && | 1028 | if (start >= pci_start && start < pci_start + size && |
998 | start + nr <= pci_start + size) | 1029 | start + nr <= pci_start + size) |
999 | return 1; | 1030 | return 1; |
@@ -1013,37 +1044,24 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
1013 | struct vm_area_struct *vma, int write_combine) | 1044 | struct vm_area_struct *vma, int write_combine) |
1014 | { | 1045 | { |
1015 | struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); | 1046 | struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); |
1016 | struct resource *res = attr->private; | 1047 | int bar = (unsigned long)attr->private; |
1017 | enum pci_mmap_state mmap_type; | 1048 | enum pci_mmap_state mmap_type; |
1018 | resource_size_t start, end; | 1049 | struct resource *res = &pdev->resource[bar]; |
1019 | int i; | ||
1020 | |||
1021 | for (i = 0; i < PCI_ROM_RESOURCE; i++) | ||
1022 | if (res == &pdev->resource[i]) | ||
1023 | break; | ||
1024 | if (i >= PCI_ROM_RESOURCE) | ||
1025 | return -ENODEV; | ||
1026 | 1050 | ||
1027 | if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) | 1051 | if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) |
1028 | return -EINVAL; | 1052 | return -EINVAL; |
1029 | 1053 | ||
1030 | if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { | 1054 | if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) { |
1031 | WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", | 1055 | WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", |
1032 | current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, | 1056 | current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, |
1033 | pci_name(pdev), i, | 1057 | pci_name(pdev), bar, |
1034 | (u64)pci_resource_start(pdev, i), | 1058 | (u64)pci_resource_start(pdev, bar), |
1035 | (u64)pci_resource_len(pdev, i)); | 1059 | (u64)pci_resource_len(pdev, bar)); |
1036 | return -EINVAL; | 1060 | return -EINVAL; |
1037 | } | 1061 | } |
1038 | |||
1039 | /* pci_mmap_page_range() expects the same kind of entry as coming | ||
1040 | * from /proc/bus/pci/ which is a "user visible" value. If this is | ||
1041 | * different from the resource itself, arch will do necessary fixup. | ||
1042 | */ | ||
1043 | pci_resource_to_user(pdev, i, res, &start, &end); | ||
1044 | vma->vm_pgoff += start >> PAGE_SHIFT; | ||
1045 | mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; | 1062 | mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; |
1046 | return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); | 1063 | |
1064 | return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); | ||
1047 | } | 1065 | } |
1048 | 1066 | ||
1049 | static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, | 1067 | static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, |
@@ -1065,22 +1083,18 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, | |||
1065 | loff_t off, size_t count, bool write) | 1083 | loff_t off, size_t count, bool write) |
1066 | { | 1084 | { |
1067 | struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); | 1085 | struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); |
1068 | struct resource *res = attr->private; | 1086 | int bar = (unsigned long)attr->private; |
1087 | struct resource *res; | ||
1069 | unsigned long port = off; | 1088 | unsigned long port = off; |
1070 | int i; | ||
1071 | 1089 | ||
1072 | for (i = 0; i < PCI_ROM_RESOURCE; i++) | 1090 | res = &pdev->resource[bar]; |
1073 | if (res == &pdev->resource[i]) | ||
1074 | break; | ||
1075 | if (i >= PCI_ROM_RESOURCE) | ||
1076 | return -ENODEV; | ||
1077 | 1091 | ||
1078 | port += pci_resource_start(pdev, i); | 1092 | port += pci_resource_start(pdev, bar); |
1079 | 1093 | ||
1080 | if (port > pci_resource_end(pdev, i)) | 1094 | if (port > pci_resource_end(pdev, bar)) |
1081 | return 0; | 1095 | return 0; |
1082 | 1096 | ||
1083 | if (port + count - 1 > pci_resource_end(pdev, i)) | 1097 | if (port + count - 1 > pci_resource_end(pdev, bar)) |
1084 | return -EINVAL; | 1098 | return -EINVAL; |
1085 | 1099 | ||
1086 | switch (count) { | 1100 | switch (count) { |
@@ -1170,16 +1184,19 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) | |||
1170 | } else { | 1184 | } else { |
1171 | pdev->res_attr[num] = res_attr; | 1185 | pdev->res_attr[num] = res_attr; |
1172 | sprintf(res_attr_name, "resource%d", num); | 1186 | sprintf(res_attr_name, "resource%d", num); |
1173 | res_attr->mmap = pci_mmap_resource_uc; | 1187 | if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { |
1174 | } | 1188 | res_attr->read = pci_read_resource_io; |
1175 | if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { | 1189 | res_attr->write = pci_write_resource_io; |
1176 | res_attr->read = pci_read_resource_io; | 1190 | if (arch_can_pci_mmap_io()) |
1177 | res_attr->write = pci_write_resource_io; | 1191 | res_attr->mmap = pci_mmap_resource_uc; |
1192 | } else { | ||
1193 | res_attr->mmap = pci_mmap_resource_uc; | ||
1194 | } | ||
1178 | } | 1195 | } |
1179 | res_attr->attr.name = res_attr_name; | 1196 | res_attr->attr.name = res_attr_name; |
1180 | res_attr->attr.mode = S_IRUSR | S_IWUSR; | 1197 | res_attr->attr.mode = S_IRUSR | S_IWUSR; |
1181 | res_attr->size = pci_resource_len(pdev, num); | 1198 | res_attr->size = pci_resource_len(pdev, num); |
1182 | res_attr->private = &pdev->resource[num]; | 1199 | res_attr->private = (void *)(unsigned long)num; |
1183 | retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); | 1200 | retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); |
1184 | if (retval) | 1201 | if (retval) |
1185 | kfree(res_attr); | 1202 | kfree(res_attr); |
@@ -1207,9 +1224,9 @@ static int pci_create_resource_files(struct pci_dev *pdev) | |||
1207 | 1224 | ||
1208 | retval = pci_create_attr(pdev, i, 0); | 1225 | retval = pci_create_attr(pdev, i, 0); |
1209 | /* for prefetchable resources, create a WC mappable file */ | 1226 | /* for prefetchable resources, create a WC mappable file */ |
1210 | if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH) | 1227 | if (!retval && arch_can_pci_mmap_wc() && |
1228 | pdev->resource[i].flags & IORESOURCE_PREFETCH) | ||
1211 | retval = pci_create_attr(pdev, i, 1); | 1229 | retval = pci_create_attr(pdev, i, 1); |
1212 | |||
1213 | if (retval) { | 1230 | if (retval) { |
1214 | pci_remove_resource_files(pdev); | 1231 | pci_remove_resource_files(pdev); |
1215 | return retval; | 1232 | return retval; |
@@ -1549,6 +1566,7 @@ static struct attribute_group pci_dev_hp_attr_group = { | |||
1549 | static struct attribute *sriov_dev_attrs[] = { | 1566 | static struct attribute *sriov_dev_attrs[] = { |
1550 | &sriov_totalvfs_attr.attr, | 1567 | &sriov_totalvfs_attr.attr, |
1551 | &sriov_numvfs_attr.attr, | 1568 | &sriov_numvfs_attr.attr, |
1569 | &sriov_drivers_autoprobe_attr.attr, | ||
1552 | NULL, | 1570 | NULL, |
1553 | }; | 1571 | }; |
1554 | 1572 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7904d02ffdb9..b01bd5bba8e6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -66,7 +66,8 @@ static void pci_dev_d3_sleep(struct pci_dev *dev) | |||
66 | if (delay < pci_pm_d3_delay) | 66 | if (delay < pci_pm_d3_delay) |
67 | delay = pci_pm_d3_delay; | 67 | delay = pci_pm_d3_delay; |
68 | 68 | ||
69 | msleep(delay); | 69 | if (delay) |
70 | msleep(delay); | ||
70 | } | 71 | } |
71 | 72 | ||
72 | #ifdef CONFIG_PCI_DOMAINS | 73 | #ifdef CONFIG_PCI_DOMAINS |
@@ -827,7 +828,8 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) | |||
827 | * because have already delayed for the bridge. | 828 | * because have already delayed for the bridge. |
828 | */ | 829 | */ |
829 | if (dev->runtime_d3cold) { | 830 | if (dev->runtime_d3cold) { |
830 | msleep(dev->d3cold_delay); | 831 | if (dev->d3cold_delay) |
832 | msleep(dev->d3cold_delay); | ||
831 | /* | 833 | /* |
832 | * When powering on a bridge from D3cold, the | 834 | * When powering on a bridge from D3cold, the |
833 | * whole hierarchy may be powered on into | 835 | * whole hierarchy may be powered on into |
@@ -1782,8 +1784,8 @@ static void pci_pme_list_scan(struct work_struct *work) | |||
1782 | } | 1784 | } |
1783 | } | 1785 | } |
1784 | if (!list_empty(&pci_pme_list)) | 1786 | if (!list_empty(&pci_pme_list)) |
1785 | schedule_delayed_work(&pci_pme_work, | 1787 | queue_delayed_work(system_freezable_wq, &pci_pme_work, |
1786 | msecs_to_jiffies(PME_TIMEOUT)); | 1788 | msecs_to_jiffies(PME_TIMEOUT)); |
1787 | mutex_unlock(&pci_pme_list_mutex); | 1789 | mutex_unlock(&pci_pme_list_mutex); |
1788 | } | 1790 | } |
1789 | 1791 | ||
@@ -1848,8 +1850,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
1848 | mutex_lock(&pci_pme_list_mutex); | 1850 | mutex_lock(&pci_pme_list_mutex); |
1849 | list_add(&pme_dev->list, &pci_pme_list); | 1851 | list_add(&pme_dev->list, &pci_pme_list); |
1850 | if (list_is_singular(&pci_pme_list)) | 1852 | if (list_is_singular(&pci_pme_list)) |
1851 | schedule_delayed_work(&pci_pme_work, | 1853 | queue_delayed_work(system_freezable_wq, |
1852 | msecs_to_jiffies(PME_TIMEOUT)); | 1854 | &pci_pme_work, |
1855 | msecs_to_jiffies(PME_TIMEOUT)); | ||
1853 | mutex_unlock(&pci_pme_list_mutex); | 1856 | mutex_unlock(&pci_pme_list_mutex); |
1854 | } else { | 1857 | } else { |
1855 | mutex_lock(&pci_pme_list_mutex); | 1858 | mutex_lock(&pci_pme_list_mutex); |
@@ -3363,7 +3366,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address) | |||
3363 | * Only architectures that have memory mapped IO functions defined | 3366 | * Only architectures that have memory mapped IO functions defined |
3364 | * (and the PCI_IOBASE value defined) should call this function. | 3367 | * (and the PCI_IOBASE value defined) should call this function. |
3365 | */ | 3368 | */ |
3366 | int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) | 3369 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) |
3367 | { | 3370 | { |
3368 | #if defined(PCI_IOBASE) && defined(CONFIG_MMU) | 3371 | #if defined(PCI_IOBASE) && defined(CONFIG_MMU) |
3369 | unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; | 3372 | unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; |
@@ -3383,6 +3386,7 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) | |||
3383 | return -ENODEV; | 3386 | return -ENODEV; |
3384 | #endif | 3387 | #endif |
3385 | } | 3388 | } |
3389 | EXPORT_SYMBOL(pci_remap_iospace); | ||
3386 | 3390 | ||
3387 | /** | 3391 | /** |
3388 | * pci_unmap_iospace - Unmap the memory mapped I/O space | 3392 | * pci_unmap_iospace - Unmap the memory mapped I/O space |
@@ -3400,6 +3404,89 @@ void pci_unmap_iospace(struct resource *res) | |||
3400 | unmap_kernel_range(vaddr, resource_size(res)); | 3404 | unmap_kernel_range(vaddr, resource_size(res)); |
3401 | #endif | 3405 | #endif |
3402 | } | 3406 | } |
3407 | EXPORT_SYMBOL(pci_unmap_iospace); | ||
3408 | |||
3409 | /** | ||
3410 | * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() | ||
3411 | * @dev: Generic device to remap IO address for | ||
3412 | * @offset: Resource address to map | ||
3413 | * @size: Size of map | ||
3414 | * | ||
3415 | * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver | ||
3416 | * detach. | ||
3417 | */ | ||
3418 | void __iomem *devm_pci_remap_cfgspace(struct device *dev, | ||
3419 | resource_size_t offset, | ||
3420 | resource_size_t size) | ||
3421 | { | ||
3422 | void __iomem **ptr, *addr; | ||
3423 | |||
3424 | ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); | ||
3425 | if (!ptr) | ||
3426 | return NULL; | ||
3427 | |||
3428 | addr = pci_remap_cfgspace(offset, size); | ||
3429 | if (addr) { | ||
3430 | *ptr = addr; | ||
3431 | devres_add(dev, ptr); | ||
3432 | } else | ||
3433 | devres_free(ptr); | ||
3434 | |||
3435 | return addr; | ||
3436 | } | ||
3437 | EXPORT_SYMBOL(devm_pci_remap_cfgspace); | ||
3438 | |||
3439 | /** | ||
3440 | * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource | ||
3441 | * @dev: generic device to handle the resource for | ||
3442 | * @res: configuration space resource to be handled | ||
3443 | * | ||
3444 | * Checks that a resource is a valid memory region, requests the memory | ||
3445 | * region and ioremaps with pci_remap_cfgspace() API that ensures the | ||
3446 | * proper PCI configuration space memory attributes are guaranteed. | ||
3447 | * | ||
3448 | * All operations are managed and will be undone on driver detach. | ||
3449 | * | ||
3450 | * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code | ||
3451 | * on failure. Usage example: | ||
3452 | * | ||
3453 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
3454 | * base = devm_pci_remap_cfg_resource(&pdev->dev, res); | ||
3455 | * if (IS_ERR(base)) | ||
3456 | * return PTR_ERR(base); | ||
3457 | */ | ||
3458 | void __iomem *devm_pci_remap_cfg_resource(struct device *dev, | ||
3459 | struct resource *res) | ||
3460 | { | ||
3461 | resource_size_t size; | ||
3462 | const char *name; | ||
3463 | void __iomem *dest_ptr; | ||
3464 | |||
3465 | BUG_ON(!dev); | ||
3466 | |||
3467 | if (!res || resource_type(res) != IORESOURCE_MEM) { | ||
3468 | dev_err(dev, "invalid resource\n"); | ||
3469 | return IOMEM_ERR_PTR(-EINVAL); | ||
3470 | } | ||
3471 | |||
3472 | size = resource_size(res); | ||
3473 | name = res->name ?: dev_name(dev); | ||
3474 | |||
3475 | if (!devm_request_mem_region(dev, res->start, size, name)) { | ||
3476 | dev_err(dev, "can't request region for resource %pR\n", res); | ||
3477 | return IOMEM_ERR_PTR(-EBUSY); | ||
3478 | } | ||
3479 | |||
3480 | dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); | ||
3481 | if (!dest_ptr) { | ||
3482 | dev_err(dev, "ioremap failed for resource %pR\n", res); | ||
3483 | devm_release_mem_region(dev, res->start, size); | ||
3484 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); | ||
3485 | } | ||
3486 | |||
3487 | return dest_ptr; | ||
3488 | } | ||
3489 | EXPORT_SYMBOL(devm_pci_remap_cfg_resource); | ||
3403 | 3490 | ||
3404 | static void __pci_set_master(struct pci_dev *dev, bool enable) | 3491 | static void __pci_set_master(struct pci_dev *dev, bool enable) |
3405 | { | 3492 | { |
@@ -3773,24 +3860,41 @@ static void pci_flr_wait(struct pci_dev *dev) | |||
3773 | (i - 1) * 100); | 3860 | (i - 1) * 100); |
3774 | } | 3861 | } |
3775 | 3862 | ||
3776 | static int pcie_flr(struct pci_dev *dev, int probe) | 3863 | /** |
3864 | * pcie_has_flr - check if a device supports function level resets | ||
3865 | * @dev: device to check | ||
3866 | * | ||
3867 | * Returns true if the device advertises support for PCIe function level | ||
3868 | * resets. | ||
3869 | */ | ||
3870 | static bool pcie_has_flr(struct pci_dev *dev) | ||
3777 | { | 3871 | { |
3778 | u32 cap; | 3872 | u32 cap; |
3779 | 3873 | ||
3780 | pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); | 3874 | if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) |
3781 | if (!(cap & PCI_EXP_DEVCAP_FLR)) | 3875 | return false; |
3782 | return -ENOTTY; | ||
3783 | 3876 | ||
3784 | if (probe) | 3877 | pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); |
3785 | return 0; | 3878 | return cap & PCI_EXP_DEVCAP_FLR; |
3879 | } | ||
3786 | 3880 | ||
3881 | /** | ||
3882 | * pcie_flr - initiate a PCIe function level reset | ||
3883 | * @dev: device to reset | ||
3884 | * | ||
3885 | * Initiate a function level reset on @dev. The caller should ensure the | ||
3886 | * device supports FLR before calling this function, e.g. by using the | ||
3887 | * pcie_has_flr() helper. | ||
3888 | */ | ||
3889 | void pcie_flr(struct pci_dev *dev) | ||
3890 | { | ||
3787 | if (!pci_wait_for_pending_transaction(dev)) | 3891 | if (!pci_wait_for_pending_transaction(dev)) |
3788 | dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); | 3892 | dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); |
3789 | 3893 | ||
3790 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); | 3894 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); |
3791 | pci_flr_wait(dev); | 3895 | pci_flr_wait(dev); |
3792 | return 0; | ||
3793 | } | 3896 | } |
3897 | EXPORT_SYMBOL_GPL(pcie_flr); | ||
3794 | 3898 | ||
3795 | static int pci_af_flr(struct pci_dev *dev, int probe) | 3899 | static int pci_af_flr(struct pci_dev *dev, int probe) |
3796 | { | 3900 | { |
@@ -3801,6 +3905,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe) | |||
3801 | if (!pos) | 3905 | if (!pos) |
3802 | return -ENOTTY; | 3906 | return -ENOTTY; |
3803 | 3907 | ||
3908 | if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) | ||
3909 | return -ENOTTY; | ||
3910 | |||
3804 | pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); | 3911 | pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); |
3805 | if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) | 3912 | if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) |
3806 | return -ENOTTY; | 3913 | return -ENOTTY; |
@@ -3971,9 +4078,12 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe) | |||
3971 | if (rc != -ENOTTY) | 4078 | if (rc != -ENOTTY) |
3972 | goto done; | 4079 | goto done; |
3973 | 4080 | ||
3974 | rc = pcie_flr(dev, probe); | 4081 | if (pcie_has_flr(dev)) { |
3975 | if (rc != -ENOTTY) | 4082 | if (!probe) |
4083 | pcie_flr(dev); | ||
4084 | rc = 0; | ||
3976 | goto done; | 4085 | goto done; |
4086 | } | ||
3977 | 4087 | ||
3978 | rc = pci_af_flr(dev, probe); | 4088 | rc = pci_af_flr(dev, probe); |
3979 | if (rc != -ENOTTY) | 4089 | if (rc != -ENOTTY) |
@@ -4932,6 +5042,8 @@ bool pci_device_is_present(struct pci_dev *pdev) | |||
4932 | { | 5042 | { |
4933 | u32 v; | 5043 | u32 v; |
4934 | 5044 | ||
5045 | if (pci_dev_is_disconnected(pdev)) | ||
5046 | return false; | ||
4935 | return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); | 5047 | return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); |
4936 | } | 5048 | } |
4937 | EXPORT_SYMBOL_GPL(pci_device_is_present); | 5049 | EXPORT_SYMBOL_GPL(pci_device_is_present); |
@@ -4947,6 +5059,11 @@ void pci_ignore_hotplug(struct pci_dev *dev) | |||
4947 | } | 5059 | } |
4948 | EXPORT_SYMBOL_GPL(pci_ignore_hotplug); | 5060 | EXPORT_SYMBOL_GPL(pci_ignore_hotplug); |
4949 | 5061 | ||
5062 | resource_size_t __weak pcibios_default_alignment(void) | ||
5063 | { | ||
5064 | return 0; | ||
5065 | } | ||
5066 | |||
4950 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE | 5067 | #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE |
4951 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; | 5068 | static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; |
4952 | static DEFINE_SPINLOCK(resource_alignment_lock); | 5069 | static DEFINE_SPINLOCK(resource_alignment_lock); |
@@ -4954,22 +5071,25 @@ static DEFINE_SPINLOCK(resource_alignment_lock); | |||
4954 | /** | 5071 | /** |
4955 | * pci_specified_resource_alignment - get resource alignment specified by user. | 5072 | * pci_specified_resource_alignment - get resource alignment specified by user. |
4956 | * @dev: the PCI device to get | 5073 | * @dev: the PCI device to get |
5074 | * @resize: whether or not to change resources' size when reassigning alignment | ||
4957 | * | 5075 | * |
4958 | * RETURNS: Resource alignment if it is specified. | 5076 | * RETURNS: Resource alignment if it is specified. |
4959 | * Zero if it is not specified. | 5077 | * Zero if it is not specified. |
4960 | */ | 5078 | */ |
4961 | static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) | 5079 | static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, |
5080 | bool *resize) | ||
4962 | { | 5081 | { |
4963 | int seg, bus, slot, func, align_order, count; | 5082 | int seg, bus, slot, func, align_order, count; |
4964 | unsigned short vendor, device, subsystem_vendor, subsystem_device; | 5083 | unsigned short vendor, device, subsystem_vendor, subsystem_device; |
4965 | resource_size_t align = 0; | 5084 | resource_size_t align = pcibios_default_alignment(); |
4966 | char *p; | 5085 | char *p; |
4967 | 5086 | ||
4968 | spin_lock(&resource_alignment_lock); | 5087 | spin_lock(&resource_alignment_lock); |
4969 | p = resource_alignment_param; | 5088 | p = resource_alignment_param; |
4970 | if (!*p) | 5089 | if (!*p && !align) |
4971 | goto out; | 5090 | goto out; |
4972 | if (pci_has_flag(PCI_PROBE_ONLY)) { | 5091 | if (pci_has_flag(PCI_PROBE_ONLY)) { |
5092 | align = 0; | ||
4973 | pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n"); | 5093 | pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n"); |
4974 | goto out; | 5094 | goto out; |
4975 | } | 5095 | } |
@@ -4999,6 +5119,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) | |||
4999 | (!device || (device == dev->device)) && | 5119 | (!device || (device == dev->device)) && |
5000 | (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) && | 5120 | (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) && |
5001 | (!subsystem_device || (subsystem_device == dev->subsystem_device))) { | 5121 | (!subsystem_device || (subsystem_device == dev->subsystem_device))) { |
5122 | *resize = true; | ||
5002 | if (align_order == -1) | 5123 | if (align_order == -1) |
5003 | align = PAGE_SIZE; | 5124 | align = PAGE_SIZE; |
5004 | else | 5125 | else |
@@ -5024,6 +5145,7 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) | |||
5024 | bus == dev->bus->number && | 5145 | bus == dev->bus->number && |
5025 | slot == PCI_SLOT(dev->devfn) && | 5146 | slot == PCI_SLOT(dev->devfn) && |
5026 | func == PCI_FUNC(dev->devfn)) { | 5147 | func == PCI_FUNC(dev->devfn)) { |
5148 | *resize = true; | ||
5027 | if (align_order == -1) | 5149 | if (align_order == -1) |
5028 | align = PAGE_SIZE; | 5150 | align = PAGE_SIZE; |
5029 | else | 5151 | else |
@@ -5043,6 +5165,68 @@ out: | |||
5043 | return align; | 5165 | return align; |
5044 | } | 5166 | } |
5045 | 5167 | ||
5168 | static void pci_request_resource_alignment(struct pci_dev *dev, int bar, | ||
5169 | resource_size_t align, bool resize) | ||
5170 | { | ||
5171 | struct resource *r = &dev->resource[bar]; | ||
5172 | resource_size_t size; | ||
5173 | |||
5174 | if (!(r->flags & IORESOURCE_MEM)) | ||
5175 | return; | ||
5176 | |||
5177 | if (r->flags & IORESOURCE_PCI_FIXED) { | ||
5178 | dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n", | ||
5179 | bar, r, (unsigned long long)align); | ||
5180 | return; | ||
5181 | } | ||
5182 | |||
5183 | size = resource_size(r); | ||
5184 | if (size >= align) | ||
5185 | return; | ||
5186 | |||
5187 | /* | ||
5188 | * Increase the alignment of the resource. There are two ways we | ||
5189 | * can do this: | ||
5190 | * | ||
5191 | * 1) Increase the size of the resource. BARs are aligned on their | ||
5192 | * size, so when we reallocate space for this resource, we'll | ||
5193 | * allocate it with the larger alignment. This also prevents | ||
5194 | * assignment of any other BARs inside the alignment region, so | ||
5195 | * if we're requesting page alignment, this means no other BARs | ||
5196 | * will share the page. | ||
5197 | * | ||
5198 | * The disadvantage is that this makes the resource larger than | ||
5199 | * the hardware BAR, which may break drivers that compute things | ||
5200 | * based on the resource size, e.g., to find registers at a | ||
5201 | * fixed offset before the end of the BAR. | ||
5202 | * | ||
5203 | * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and | ||
5204 | * set r->start to the desired alignment. By itself this | ||
5205 | * doesn't prevent other BARs being put inside the alignment | ||
5206 | * region, but if we realign *every* resource of every device in | ||
5207 | * the system, none of them will share an alignment region. | ||
5208 | * | ||
5209 | * When the user has requested alignment for only some devices via | ||
5210 | * the "pci=resource_alignment" argument, "resize" is true and we | ||
5211 | * use the first method. Otherwise we assume we're aligning all | ||
5212 | * devices and we use the second. | ||
5213 | */ | ||
5214 | |||
5215 | dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n", | ||
5216 | bar, r, (unsigned long long)align); | ||
5217 | |||
5218 | if (resize) { | ||
5219 | r->start = 0; | ||
5220 | r->end = align - 1; | ||
5221 | } else { | ||
5222 | r->flags &= ~IORESOURCE_SIZEALIGN; | ||
5223 | r->flags |= IORESOURCE_STARTALIGN; | ||
5224 | r->start = align; | ||
5225 | r->end = r->start + size - 1; | ||
5226 | } | ||
5227 | r->flags |= IORESOURCE_UNSET; | ||
5228 | } | ||
5229 | |||
5046 | /* | 5230 | /* |
5047 | * This function disables memory decoding and releases memory resources | 5231 | * This function disables memory decoding and releases memory resources |
5048 | * of the device specified by kernel's boot parameter 'pci=resource_alignment='. | 5232 | * of the device specified by kernel's boot parameter 'pci=resource_alignment='. |
@@ -5054,8 +5238,9 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) | |||
5054 | { | 5238 | { |
5055 | int i; | 5239 | int i; |
5056 | struct resource *r; | 5240 | struct resource *r; |
5057 | resource_size_t align, size; | 5241 | resource_size_t align; |
5058 | u16 command; | 5242 | u16 command; |
5243 | bool resize = false; | ||
5059 | 5244 | ||
5060 | /* | 5245 | /* |
5061 | * VF BARs are read-only zero according to SR-IOV spec r1.1, sec | 5246 | * VF BARs are read-only zero according to SR-IOV spec r1.1, sec |
@@ -5067,7 +5252,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) | |||
5067 | return; | 5252 | return; |
5068 | 5253 | ||
5069 | /* check if specified PCI is target device to reassign */ | 5254 | /* check if specified PCI is target device to reassign */ |
5070 | align = pci_specified_resource_alignment(dev); | 5255 | align = pci_specified_resource_alignment(dev, &resize); |
5071 | if (!align) | 5256 | if (!align) |
5072 | return; | 5257 | return; |
5073 | 5258 | ||
@@ -5084,28 +5269,11 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) | |||
5084 | command &= ~PCI_COMMAND_MEMORY; | 5269 | command &= ~PCI_COMMAND_MEMORY; |
5085 | pci_write_config_word(dev, PCI_COMMAND, command); | 5270 | pci_write_config_word(dev, PCI_COMMAND, command); |
5086 | 5271 | ||
5087 | for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { | 5272 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) |
5088 | r = &dev->resource[i]; | 5273 | pci_request_resource_alignment(dev, i, align, resize); |
5089 | if (!(r->flags & IORESOURCE_MEM)) | ||
5090 | continue; | ||
5091 | if (r->flags & IORESOURCE_PCI_FIXED) { | ||
5092 | dev_info(&dev->dev, "Ignoring requested alignment for BAR%d: %pR\n", | ||
5093 | i, r); | ||
5094 | continue; | ||
5095 | } | ||
5096 | 5274 | ||
5097 | size = resource_size(r); | 5275 | /* |
5098 | if (size < align) { | 5276 | * Need to disable bridge's resource window, |
5099 | size = align; | ||
5100 | dev_info(&dev->dev, | ||
5101 | "Rounding up size of resource #%d to %#llx.\n", | ||
5102 | i, (unsigned long long)size); | ||
5103 | } | ||
5104 | r->flags |= IORESOURCE_UNSET; | ||
5105 | r->end = size - 1; | ||
5106 | r->start = 0; | ||
5107 | } | ||
5108 | /* Need to disable bridge's resource window, | ||
5109 | * to enable the kernel to reassign new resource | 5277 | * to enable the kernel to reassign new resource |
5110 | * window later on. | 5278 | * window later on. |
5111 | */ | 5279 | */ |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 4dbf9f96ae5b..f8113e5b9812 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -23,14 +23,14 @@ void pci_create_firmware_label_files(struct pci_dev *pdev); | |||
23 | void pci_remove_firmware_label_files(struct pci_dev *pdev); | 23 | void pci_remove_firmware_label_files(struct pci_dev *pdev); |
24 | #endif | 24 | #endif |
25 | void pci_cleanup_rom(struct pci_dev *dev); | 25 | void pci_cleanup_rom(struct pci_dev *dev); |
26 | #ifdef HAVE_PCI_MMAP | 26 | |
27 | enum pci_mmap_api { | 27 | enum pci_mmap_api { |
28 | PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ | 28 | PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ |
29 | PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ | 29 | PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ |
30 | }; | 30 | }; |
31 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai, | 31 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai, |
32 | enum pci_mmap_api mmap_api); | 32 | enum pci_mmap_api mmap_api); |
33 | #endif | 33 | |
34 | int pci_probe_reset_function(struct pci_dev *dev); | 34 | int pci_probe_reset_function(struct pci_dev *dev); |
35 | 35 | ||
36 | /** | 36 | /** |
@@ -274,8 +274,23 @@ struct pci_sriov { | |||
274 | struct pci_dev *self; /* this PF */ | 274 | struct pci_dev *self; /* this PF */ |
275 | struct mutex lock; /* lock for setting sriov_numvfs in sysfs */ | 275 | struct mutex lock; /* lock for setting sriov_numvfs in sysfs */ |
276 | resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ | 276 | resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ |
277 | bool drivers_autoprobe; /* auto probing of VFs by driver */ | ||
277 | }; | 278 | }; |
278 | 279 | ||
280 | /* pci_dev priv_flags */ | ||
281 | #define PCI_DEV_DISCONNECTED 0 | ||
282 | |||
283 | static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) | ||
284 | { | ||
285 | set_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) | ||
290 | { | ||
291 | return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); | ||
292 | } | ||
293 | |||
279 | #ifdef CONFIG_PCI_ATS | 294 | #ifdef CONFIG_PCI_ATS |
280 | void pci_restore_ats_state(struct pci_dev *dev); | 295 | void pci_restore_ats_state(struct pci_dev *dev); |
281 | #else | 296 | #else |
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c index d4d70ef4a2d7..77d2ca99d2ec 100644 --- a/drivers/pci/pcie/pcie-dpc.c +++ b/drivers/pci/pcie/pcie-dpc.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/pcieport_if.h> | 16 | #include <linux/pcieport_if.h> |
17 | #include "../pci.h" | ||
17 | 18 | ||
18 | struct dpc_dev { | 19 | struct dpc_dev { |
19 | struct pcie_device *dev; | 20 | struct pcie_device *dev; |
@@ -66,6 +67,10 @@ static void interrupt_event_handler(struct work_struct *work) | |||
66 | list_for_each_entry_safe_reverse(dev, temp, &parent->devices, | 67 | list_for_each_entry_safe_reverse(dev, temp, &parent->devices, |
67 | bus_list) { | 68 | bus_list) { |
68 | pci_dev_get(dev); | 69 | pci_dev_get(dev); |
70 | pci_dev_set_disconnected(dev, NULL); | ||
71 | if (pci_has_subordinate(dev)) | ||
72 | pci_walk_bus(dev->subordinate, | ||
73 | pci_dev_set_disconnected, NULL); | ||
69 | pci_stop_and_remove_bus_device(dev); | 74 | pci_stop_and_remove_bus_device(dev); |
70 | pci_dev_put(dev); | 75 | pci_dev_put(dev); |
71 | } | 76 | } |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 90592d424e9b..01eb8038fceb 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -175,7 +175,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) | |||
175 | int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | 175 | int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
176 | struct resource *res, unsigned int pos) | 176 | struct resource *res, unsigned int pos) |
177 | { | 177 | { |
178 | u32 l, sz, mask; | 178 | u32 l = 0, sz = 0, mask; |
179 | u64 l64, sz64, mask64; | 179 | u64 l64, sz64, mask64; |
180 | u16 orig_cmd; | 180 | u16 orig_cmd; |
181 | struct pci_bus_region region, inverted_region; | 181 | struct pci_bus_region region, inverted_region; |
@@ -231,7 +231,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
231 | res->flags |= IORESOURCE_ROM_ENABLE; | 231 | res->flags |= IORESOURCE_ROM_ENABLE; |
232 | l64 = l & PCI_ROM_ADDRESS_MASK; | 232 | l64 = l & PCI_ROM_ADDRESS_MASK; |
233 | sz64 = sz & PCI_ROM_ADDRESS_MASK; | 233 | sz64 = sz & PCI_ROM_ADDRESS_MASK; |
234 | mask64 = (u32)PCI_ROM_ADDRESS_MASK; | 234 | mask64 = PCI_ROM_ADDRESS_MASK; |
235 | } | 235 | } |
236 | 236 | ||
237 | if (res->flags & IORESOURCE_MEM_64) { | 237 | if (res->flags & IORESOURCE_MEM_64) { |
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index f82710a8694d..098360d7ff81 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
@@ -202,6 +202,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, | |||
202 | 202 | ||
203 | #ifdef HAVE_PCI_MMAP | 203 | #ifdef HAVE_PCI_MMAP |
204 | case PCIIOC_MMAP_IS_IO: | 204 | case PCIIOC_MMAP_IS_IO: |
205 | if (!arch_can_pci_mmap_io()) | ||
206 | return -EINVAL; | ||
205 | fpriv->mmap_state = pci_mmap_io; | 207 | fpriv->mmap_state = pci_mmap_io; |
206 | break; | 208 | break; |
207 | 209 | ||
@@ -210,14 +212,15 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, | |||
210 | break; | 212 | break; |
211 | 213 | ||
212 | case PCIIOC_WRITE_COMBINE: | 214 | case PCIIOC_WRITE_COMBINE: |
213 | if (arg) | 215 | if (arch_can_pci_mmap_wc()) { |
214 | fpriv->write_combine = 1; | 216 | if (arg) |
215 | else | 217 | fpriv->write_combine = 1; |
216 | fpriv->write_combine = 0; | 218 | else |
217 | break; | 219 | fpriv->write_combine = 0; |
218 | 220 | break; | |
221 | } | ||
222 | /* If arch decided it can't, fall through... */ | ||
219 | #endif /* HAVE_PCI_MMAP */ | 223 | #endif /* HAVE_PCI_MMAP */ |
220 | |||
221 | default: | 224 | default: |
222 | ret = -EINVAL; | 225 | ret = -EINVAL; |
223 | break; | 226 | break; |
@@ -231,25 +234,35 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) | |||
231 | { | 234 | { |
232 | struct pci_dev *dev = PDE_DATA(file_inode(file)); | 235 | struct pci_dev *dev = PDE_DATA(file_inode(file)); |
233 | struct pci_filp_private *fpriv = file->private_data; | 236 | struct pci_filp_private *fpriv = file->private_data; |
234 | int i, ret, write_combine; | 237 | int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM; |
235 | 238 | ||
236 | if (!capable(CAP_SYS_RAWIO)) | 239 | if (!capable(CAP_SYS_RAWIO)) |
237 | return -EPERM; | 240 | return -EPERM; |
238 | 241 | ||
242 | if (fpriv->mmap_state == pci_mmap_io) { | ||
243 | if (!arch_can_pci_mmap_io()) | ||
244 | return -EINVAL; | ||
245 | res_bit = IORESOURCE_IO; | ||
246 | } | ||
247 | |||
239 | /* Make sure the caller is mapping a real resource for this device */ | 248 | /* Make sure the caller is mapping a real resource for this device */ |
240 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { | 249 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { |
241 | if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) | 250 | if (dev->resource[i].flags & res_bit && |
251 | pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) | ||
242 | break; | 252 | break; |
243 | } | 253 | } |
244 | 254 | ||
245 | if (i >= PCI_ROM_RESOURCE) | 255 | if (i >= PCI_ROM_RESOURCE) |
246 | return -ENODEV; | 256 | return -ENODEV; |
247 | 257 | ||
248 | if (fpriv->mmap_state == pci_mmap_mem) | 258 | if (fpriv->mmap_state == pci_mmap_mem && |
249 | write_combine = fpriv->write_combine; | 259 | fpriv->write_combine) { |
250 | else | 260 | if (dev->resource[i].flags & IORESOURCE_PREFETCH) |
251 | write_combine = 0; | 261 | write_combine = 1; |
252 | ret = pci_mmap_page_range(dev, vma, | 262 | else |
263 | return -EINVAL; | ||
264 | } | ||
265 | ret = pci_mmap_page_range(dev, i, vma, | ||
253 | fpriv->mmap_state, write_combine); | 266 | fpriv->mmap_state, write_combine); |
254 | if (ret < 0) | 267 | if (ret < 0) |
255 | return ret; | 268 | return ret; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 673683660b5c..085fb787aa9e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1685,6 +1685,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); | |||
1685 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); | 1685 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); |
1686 | 1686 | ||
1687 | #ifdef CONFIG_X86_IO_APIC | 1687 | #ifdef CONFIG_X86_IO_APIC |
1688 | static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) | ||
1689 | { | ||
1690 | noioapicreroute = 1; | ||
1691 | pr_info("%s detected: disable boot interrupt reroute\n", d->ident); | ||
1692 | |||
1693 | return 0; | ||
1694 | } | ||
1695 | |||
1696 | static struct dmi_system_id boot_interrupt_dmi_table[] = { | ||
1697 | /* | ||
1698 | * Systems to exclude from boot interrupt reroute quirks | ||
1699 | */ | ||
1700 | { | ||
1701 | .callback = dmi_disable_ioapicreroute, | ||
1702 | .ident = "ASUSTek Computer INC. M2N-LR", | ||
1703 | .matches = { | ||
1704 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."), | ||
1705 | DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"), | ||
1706 | }, | ||
1707 | }, | ||
1708 | {} | ||
1709 | }; | ||
1710 | |||
1688 | /* | 1711 | /* |
1689 | * Boot interrupts on some chipsets cannot be turned off. For these chipsets, | 1712 | * Boot interrupts on some chipsets cannot be turned off. For these chipsets, |
1690 | * remap the original interrupt in the linux kernel to the boot interrupt, so | 1713 | * remap the original interrupt in the linux kernel to the boot interrupt, so |
@@ -1693,6 +1716,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); | |||
1693 | */ | 1716 | */ |
1694 | static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) | 1717 | static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) |
1695 | { | 1718 | { |
1719 | dmi_check_system(boot_interrupt_dmi_table); | ||
1696 | if (noioapicquirk || noioapicreroute) | 1720 | if (noioapicquirk || noioapicreroute) |
1697 | return; | 1721 | return; |
1698 | 1722 | ||
@@ -3642,19 +3666,11 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) | |||
3642 | * | 3666 | * |
3643 | * The 82599 supports FLR on VFs, but FLR support is reported only | 3667 | * The 82599 supports FLR on VFs, but FLR support is reported only |
3644 | * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5). | 3668 | * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5). |
3645 | * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP. | 3669 | * Thus we must call pcie_flr() directly without first checking if it is |
3670 | * supported. | ||
3646 | */ | 3671 | */ |
3647 | 3672 | if (!probe) | |
3648 | if (probe) | 3673 | pcie_flr(dev); |
3649 | return 0; | ||
3650 | |||
3651 | if (!pci_wait_for_pending_transaction(dev)) | ||
3652 | dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); | ||
3653 | |||
3654 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); | ||
3655 | |||
3656 | msleep(100); | ||
3657 | |||
3658 | return 0; | 3674 | return 0; |
3659 | } | 3675 | } |
3660 | 3676 | ||
@@ -3759,20 +3775,7 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe) | |||
3759 | PCI_MSIX_FLAGS_ENABLE | | 3775 | PCI_MSIX_FLAGS_ENABLE | |
3760 | PCI_MSIX_FLAGS_MASKALL); | 3776 | PCI_MSIX_FLAGS_MASKALL); |
3761 | 3777 | ||
3762 | /* | 3778 | pcie_flr(dev); |
3763 | * Start of pcie_flr() code sequence. This reset code is a copy of | ||
3764 | * the guts of pcie_flr() because that's not an exported function. | ||
3765 | */ | ||
3766 | |||
3767 | if (!pci_wait_for_pending_transaction(dev)) | ||
3768 | dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); | ||
3769 | |||
3770 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); | ||
3771 | msleep(100); | ||
3772 | |||
3773 | /* | ||
3774 | * End of pcie_flr() code sequence. | ||
3775 | */ | ||
3776 | 3779 | ||
3777 | /* | 3780 | /* |
3778 | * Restore the configuration information (BAR values, etc.) including | 3781 | * Restore the configuration information (BAR values, etc.) including |
@@ -3939,6 +3942,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, | |||
3939 | DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias); | 3942 | DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias); |
3940 | /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */ | 3943 | /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */ |
3941 | DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); | 3944 | DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); |
3945 | /* ITE 8893 has the same problem as the 8892 */ | ||
3946 | DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias); | ||
3942 | /* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */ | 3947 | /* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */ |
3943 | DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); | 3948 | DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); |
3944 | 3949 | ||
@@ -3958,6 +3963,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias); | |||
3958 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias); | 3963 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias); |
3959 | 3964 | ||
3960 | /* | 3965 | /* |
3966 | * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are | ||
3967 | * associated not at the root bus, but at a bridge below. This quirk avoids | ||
3968 | * generating invalid DMA aliases. | ||
3969 | */ | ||
3970 | static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev) | ||
3971 | { | ||
3972 | pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT; | ||
3973 | } | ||
3974 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, | ||
3975 | quirk_bridge_cavm_thrx2_pcie_root); | ||
3976 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, | ||
3977 | quirk_bridge_cavm_thrx2_pcie_root); | ||
3978 | |||
3979 | /* | ||
3961 | * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) | 3980 | * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) |
3962 | * class code. Fix it. | 3981 | * class code. Fix it. |
3963 | */ | 3982 | */ |
@@ -4095,6 +4114,9 @@ static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) | |||
4095 | acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | | 4114 | acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | |
4096 | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); | 4115 | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); |
4097 | 4116 | ||
4117 | if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff))) | ||
4118 | return -ENOTTY; | ||
4119 | |||
4098 | return acs_flags ? 0 : 1; | 4120 | return acs_flags ? 0 : 1; |
4099 | } | 4121 | } |
4100 | 4122 | ||
@@ -4634,3 +4656,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid); | |||
4634 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); | 4656 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid); |
4635 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); | 4657 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid); |
4636 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); | 4658 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); |
4659 | |||
4660 | /* FLR may cause some 82579 devices to hang. */ | ||
4661 | static void quirk_intel_no_flr(struct pci_dev *dev) | ||
4662 | { | ||
4663 | dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; | ||
4664 | } | ||
4665 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); | ||
4666 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); | ||
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 33e0f033a48e..4c6044ad7368 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
@@ -60,6 +60,10 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, | |||
60 | 60 | ||
61 | tmp = bus->self; | 61 | tmp = bus->self; |
62 | 62 | ||
63 | /* stop at bridge where translation unit is associated */ | ||
64 | if (tmp->dev_flags & PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT) | ||
65 | return ret; | ||
66 | |||
63 | /* | 67 | /* |
64 | * PCIe-to-PCI/X bridges alias transactions from downstream | 68 | * PCIe-to-PCI/X bridges alias transactions from downstream |
65 | * devices using the subordinate bus number (PCI Express to | 69 | * devices using the subordinate bus number (PCI Express to |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index cb389277df41..958da7db9033 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -1066,10 +1066,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
1066 | r->flags = 0; | 1066 | r->flags = 0; |
1067 | continue; | 1067 | continue; |
1068 | } | 1068 | } |
1069 | size += r_size; | 1069 | size += max(r_size, align); |
1070 | /* Exclude ranges with size > align from | 1070 | /* Exclude ranges with size > align from |
1071 | calculation of the alignment. */ | 1071 | calculation of the alignment. */ |
1072 | if (r_size == align) | 1072 | if (r_size <= align) |
1073 | aligns[order] += align; | 1073 | aligns[order] += align; |
1074 | if (order > max_order) | 1074 | if (order > max_order) |
1075 | max_order = order; | 1075 | max_order = order; |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 4bc589ee78d0..85774b7a316a 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) | |||
63 | mask = (u32)PCI_BASE_ADDRESS_IO_MASK; | 63 | mask = (u32)PCI_BASE_ADDRESS_IO_MASK; |
64 | new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; | 64 | new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; |
65 | } else if (resno == PCI_ROM_RESOURCE) { | 65 | } else if (resno == PCI_ROM_RESOURCE) { |
66 | mask = (u32)PCI_ROM_ADDRESS_MASK; | 66 | mask = PCI_ROM_ADDRESS_MASK; |
67 | } else { | 67 | } else { |
68 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; | 68 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
69 | new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; | 69 | new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; |
diff --git a/drivers/pci/switch/Kconfig b/drivers/pci/switch/Kconfig new file mode 100644 index 000000000000..4c49648e0646 --- /dev/null +++ b/drivers/pci/switch/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | menu "PCI switch controller drivers" | ||
2 | depends on PCI | ||
3 | |||
4 | config PCI_SW_SWITCHTEC | ||
5 | tristate "MicroSemi Switchtec PCIe Switch Management Driver" | ||
6 | help | ||
7 | Enables support for the management interface for the MicroSemi | ||
8 | Switchtec series of PCIe switches. Supports userspace access | ||
9 | to submit MRPC commands to the switch via /dev/switchtecX | ||
10 | devices. See <file:Documentation/switchtec.txt> for more | ||
11 | information. | ||
12 | |||
13 | endmenu | ||
diff --git a/drivers/pci/switch/Makefile b/drivers/pci/switch/Makefile new file mode 100644 index 000000000000..37d8cfb03f3f --- /dev/null +++ b/drivers/pci/switch/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_PCI_SW_SWITCHTEC) += switchtec.o | |||
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c new file mode 100644 index 000000000000..cc6e085008fb --- /dev/null +++ b/drivers/pci/switch/switchtec.c | |||
@@ -0,0 +1,1600 @@ | |||
1 | /* | ||
2 | * Microsemi Switchtec(tm) PCIe Management Driver | ||
3 | * Copyright (c) 2017, Microsemi Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/switchtec_ioctl.h> | ||
17 | |||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/poll.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/cdev.h> | ||
25 | #include <linux/wait.h> | ||
26 | |||
27 | MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); | ||
28 | MODULE_VERSION("0.1"); | ||
29 | MODULE_LICENSE("GPL"); | ||
30 | MODULE_AUTHOR("Microsemi Corporation"); | ||
31 | |||
32 | static int max_devices = 16; | ||
33 | module_param(max_devices, int, 0644); | ||
34 | MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); | ||
35 | |||
36 | static dev_t switchtec_devt; | ||
37 | static struct class *switchtec_class; | ||
38 | static DEFINE_IDA(switchtec_minor_ida); | ||
39 | |||
40 | #define MICROSEMI_VENDOR_ID 0x11f8 | ||
41 | #define MICROSEMI_NTB_CLASSCODE 0x068000 | ||
42 | #define MICROSEMI_MGMT_CLASSCODE 0x058000 | ||
43 | |||
44 | #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 | ||
45 | #define SWITCHTEC_MAX_PFF_CSR 48 | ||
46 | |||
47 | #define SWITCHTEC_EVENT_OCCURRED BIT(0) | ||
48 | #define SWITCHTEC_EVENT_CLEAR BIT(0) | ||
49 | #define SWITCHTEC_EVENT_EN_LOG BIT(1) | ||
50 | #define SWITCHTEC_EVENT_EN_CLI BIT(2) | ||
51 | #define SWITCHTEC_EVENT_EN_IRQ BIT(3) | ||
52 | #define SWITCHTEC_EVENT_FATAL BIT(4) | ||
53 | |||
54 | enum { | ||
55 | SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, | ||
56 | SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, | ||
57 | SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800, | ||
58 | SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000, | ||
59 | SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200, | ||
60 | SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000, | ||
61 | SWITCHTEC_GAS_NTB_OFFSET = 0x10000, | ||
62 | SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000, | ||
63 | }; | ||
64 | |||
65 | struct mrpc_regs { | ||
66 | u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; | ||
67 | u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; | ||
68 | u32 cmd; | ||
69 | u32 status; | ||
70 | u32 ret_value; | ||
71 | } __packed; | ||
72 | |||
73 | enum mrpc_status { | ||
74 | SWITCHTEC_MRPC_STATUS_INPROGRESS = 1, | ||
75 | SWITCHTEC_MRPC_STATUS_DONE = 2, | ||
76 | SWITCHTEC_MRPC_STATUS_ERROR = 0xFF, | ||
77 | SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100, | ||
78 | }; | ||
79 | |||
80 | struct sw_event_regs { | ||
81 | u64 event_report_ctrl; | ||
82 | u64 reserved1; | ||
83 | u64 part_event_bitmap; | ||
84 | u64 reserved2; | ||
85 | u32 global_summary; | ||
86 | u32 reserved3[3]; | ||
87 | u32 stack_error_event_hdr; | ||
88 | u32 stack_error_event_data; | ||
89 | u32 reserved4[4]; | ||
90 | u32 ppu_error_event_hdr; | ||
91 | u32 ppu_error_event_data; | ||
92 | u32 reserved5[4]; | ||
93 | u32 isp_error_event_hdr; | ||
94 | u32 isp_error_event_data; | ||
95 | u32 reserved6[4]; | ||
96 | u32 sys_reset_event_hdr; | ||
97 | u32 reserved7[5]; | ||
98 | u32 fw_exception_hdr; | ||
99 | u32 reserved8[5]; | ||
100 | u32 fw_nmi_hdr; | ||
101 | u32 reserved9[5]; | ||
102 | u32 fw_non_fatal_hdr; | ||
103 | u32 reserved10[5]; | ||
104 | u32 fw_fatal_hdr; | ||
105 | u32 reserved11[5]; | ||
106 | u32 twi_mrpc_comp_hdr; | ||
107 | u32 twi_mrpc_comp_data; | ||
108 | u32 reserved12[4]; | ||
109 | u32 twi_mrpc_comp_async_hdr; | ||
110 | u32 twi_mrpc_comp_async_data; | ||
111 | u32 reserved13[4]; | ||
112 | u32 cli_mrpc_comp_hdr; | ||
113 | u32 cli_mrpc_comp_data; | ||
114 | u32 reserved14[4]; | ||
115 | u32 cli_mrpc_comp_async_hdr; | ||
116 | u32 cli_mrpc_comp_async_data; | ||
117 | u32 reserved15[4]; | ||
118 | u32 gpio_interrupt_hdr; | ||
119 | u32 gpio_interrupt_data; | ||
120 | u32 reserved16[4]; | ||
121 | } __packed; | ||
122 | |||
123 | struct sys_info_regs { | ||
124 | u32 device_id; | ||
125 | u32 device_version; | ||
126 | u32 firmware_version; | ||
127 | u32 reserved1; | ||
128 | u32 vendor_table_revision; | ||
129 | u32 table_format_version; | ||
130 | u32 partition_id; | ||
131 | u32 cfg_file_fmt_version; | ||
132 | u32 reserved2[58]; | ||
133 | char vendor_id[8]; | ||
134 | char product_id[16]; | ||
135 | char product_revision[4]; | ||
136 | char component_vendor[8]; | ||
137 | u16 component_id; | ||
138 | u8 component_revision; | ||
139 | } __packed; | ||
140 | |||
141 | struct flash_info_regs { | ||
142 | u32 flash_part_map_upd_idx; | ||
143 | |||
144 | struct active_partition_info { | ||
145 | u32 address; | ||
146 | u32 build_version; | ||
147 | u32 build_string; | ||
148 | } active_img; | ||
149 | |||
150 | struct active_partition_info active_cfg; | ||
151 | struct active_partition_info inactive_img; | ||
152 | struct active_partition_info inactive_cfg; | ||
153 | |||
154 | u32 flash_length; | ||
155 | |||
156 | struct partition_info { | ||
157 | u32 address; | ||
158 | u32 length; | ||
159 | } cfg0; | ||
160 | |||
161 | struct partition_info cfg1; | ||
162 | struct partition_info img0; | ||
163 | struct partition_info img1; | ||
164 | struct partition_info nvlog; | ||
165 | struct partition_info vendor[8]; | ||
166 | }; | ||
167 | |||
168 | struct ntb_info_regs { | ||
169 | u8 partition_count; | ||
170 | u8 partition_id; | ||
171 | u16 reserved1; | ||
172 | u64 ep_map; | ||
173 | u16 requester_id; | ||
174 | } __packed; | ||
175 | |||
176 | struct part_cfg_regs { | ||
177 | u32 status; | ||
178 | u32 state; | ||
179 | u32 port_cnt; | ||
180 | u32 usp_port_mode; | ||
181 | u32 usp_pff_inst_id; | ||
182 | u32 vep_pff_inst_id; | ||
183 | u32 dsp_pff_inst_id[47]; | ||
184 | u32 reserved1[11]; | ||
185 | u16 vep_vector_number; | ||
186 | u16 usp_vector_number; | ||
187 | u32 port_event_bitmap; | ||
188 | u32 reserved2[3]; | ||
189 | u32 part_event_summary; | ||
190 | u32 reserved3[3]; | ||
191 | u32 part_reset_hdr; | ||
192 | u32 part_reset_data[5]; | ||
193 | u32 mrpc_comp_hdr; | ||
194 | u32 mrpc_comp_data[5]; | ||
195 | u32 mrpc_comp_async_hdr; | ||
196 | u32 mrpc_comp_async_data[5]; | ||
197 | u32 dyn_binding_hdr; | ||
198 | u32 dyn_binding_data[5]; | ||
199 | u32 reserved4[159]; | ||
200 | } __packed; | ||
201 | |||
202 | enum { | ||
203 | SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0, | ||
204 | SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1, | ||
205 | SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2, | ||
206 | SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3, | ||
207 | }; | ||
208 | |||
209 | struct pff_csr_regs { | ||
210 | u16 vendor_id; | ||
211 | u16 device_id; | ||
212 | u32 pci_cfg_header[15]; | ||
213 | u32 pci_cap_region[48]; | ||
214 | u32 pcie_cap_region[448]; | ||
215 | u32 indirect_gas_window[128]; | ||
216 | u32 indirect_gas_window_off; | ||
217 | u32 reserved[127]; | ||
218 | u32 pff_event_summary; | ||
219 | u32 reserved2[3]; | ||
220 | u32 aer_in_p2p_hdr; | ||
221 | u32 aer_in_p2p_data[5]; | ||
222 | u32 aer_in_vep_hdr; | ||
223 | u32 aer_in_vep_data[5]; | ||
224 | u32 dpc_hdr; | ||
225 | u32 dpc_data[5]; | ||
226 | u32 cts_hdr; | ||
227 | u32 cts_data[5]; | ||
228 | u32 reserved3[6]; | ||
229 | u32 hotplug_hdr; | ||
230 | u32 hotplug_data[5]; | ||
231 | u32 ier_hdr; | ||
232 | u32 ier_data[5]; | ||
233 | u32 threshold_hdr; | ||
234 | u32 threshold_data[5]; | ||
235 | u32 power_mgmt_hdr; | ||
236 | u32 power_mgmt_data[5]; | ||
237 | u32 tlp_throttling_hdr; | ||
238 | u32 tlp_throttling_data[5]; | ||
239 | u32 force_speed_hdr; | ||
240 | u32 force_speed_data[5]; | ||
241 | u32 credit_timeout_hdr; | ||
242 | u32 credit_timeout_data[5]; | ||
243 | u32 link_state_hdr; | ||
244 | u32 link_state_data[5]; | ||
245 | u32 reserved4[174]; | ||
246 | } __packed; | ||
247 | |||
248 | struct switchtec_dev { | ||
249 | struct pci_dev *pdev; | ||
250 | struct device dev; | ||
251 | struct cdev cdev; | ||
252 | |||
253 | int partition; | ||
254 | int partition_count; | ||
255 | int pff_csr_count; | ||
256 | char pff_local[SWITCHTEC_MAX_PFF_CSR]; | ||
257 | |||
258 | void __iomem *mmio; | ||
259 | struct mrpc_regs __iomem *mmio_mrpc; | ||
260 | struct sw_event_regs __iomem *mmio_sw_event; | ||
261 | struct sys_info_regs __iomem *mmio_sys_info; | ||
262 | struct flash_info_regs __iomem *mmio_flash_info; | ||
263 | struct ntb_info_regs __iomem *mmio_ntb; | ||
264 | struct part_cfg_regs __iomem *mmio_part_cfg; | ||
265 | struct part_cfg_regs __iomem *mmio_part_cfg_all; | ||
266 | struct pff_csr_regs __iomem *mmio_pff_csr; | ||
267 | |||
268 | /* | ||
269 | * The mrpc mutex must be held when accessing the other | ||
270 | * mrpc_ fields, alive flag and stuser->state field | ||
271 | */ | ||
272 | struct mutex mrpc_mutex; | ||
273 | struct list_head mrpc_queue; | ||
274 | int mrpc_busy; | ||
275 | struct work_struct mrpc_work; | ||
276 | struct delayed_work mrpc_timeout; | ||
277 | bool alive; | ||
278 | |||
279 | wait_queue_head_t event_wq; | ||
280 | atomic_t event_cnt; | ||
281 | }; | ||
282 | |||
283 | static struct switchtec_dev *to_stdev(struct device *dev) | ||
284 | { | ||
285 | return container_of(dev, struct switchtec_dev, dev); | ||
286 | } | ||
287 | |||
288 | enum mrpc_state { | ||
289 | MRPC_IDLE = 0, | ||
290 | MRPC_QUEUED, | ||
291 | MRPC_RUNNING, | ||
292 | MRPC_DONE, | ||
293 | }; | ||
294 | |||
295 | struct switchtec_user { | ||
296 | struct switchtec_dev *stdev; | ||
297 | |||
298 | enum mrpc_state state; | ||
299 | |||
300 | struct completion comp; | ||
301 | struct kref kref; | ||
302 | struct list_head list; | ||
303 | |||
304 | u32 cmd; | ||
305 | u32 status; | ||
306 | u32 return_code; | ||
307 | size_t data_len; | ||
308 | size_t read_len; | ||
309 | unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; | ||
310 | int event_cnt; | ||
311 | }; | ||
312 | |||
313 | static struct switchtec_user *stuser_create(struct switchtec_dev *stdev) | ||
314 | { | ||
315 | struct switchtec_user *stuser; | ||
316 | |||
317 | stuser = kzalloc(sizeof(*stuser), GFP_KERNEL); | ||
318 | if (!stuser) | ||
319 | return ERR_PTR(-ENOMEM); | ||
320 | |||
321 | get_device(&stdev->dev); | ||
322 | stuser->stdev = stdev; | ||
323 | kref_init(&stuser->kref); | ||
324 | INIT_LIST_HEAD(&stuser->list); | ||
325 | init_completion(&stuser->comp); | ||
326 | stuser->event_cnt = atomic_read(&stdev->event_cnt); | ||
327 | |||
328 | dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); | ||
329 | |||
330 | return stuser; | ||
331 | } | ||
332 | |||
333 | static void stuser_free(struct kref *kref) | ||
334 | { | ||
335 | struct switchtec_user *stuser; | ||
336 | |||
337 | stuser = container_of(kref, struct switchtec_user, kref); | ||
338 | |||
339 | dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser); | ||
340 | |||
341 | put_device(&stuser->stdev->dev); | ||
342 | kfree(stuser); | ||
343 | } | ||
344 | |||
345 | static void stuser_put(struct switchtec_user *stuser) | ||
346 | { | ||
347 | kref_put(&stuser->kref, stuser_free); | ||
348 | } | ||
349 | |||
350 | static void stuser_set_state(struct switchtec_user *stuser, | ||
351 | enum mrpc_state state) | ||
352 | { | ||
353 | /* requires the mrpc_mutex to already be held when called */ | ||
354 | |||
355 | const char * const state_names[] = { | ||
356 | [MRPC_IDLE] = "IDLE", | ||
357 | [MRPC_QUEUED] = "QUEUED", | ||
358 | [MRPC_RUNNING] = "RUNNING", | ||
359 | [MRPC_DONE] = "DONE", | ||
360 | }; | ||
361 | |||
362 | stuser->state = state; | ||
363 | |||
364 | dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s", | ||
365 | stuser, state_names[state]); | ||
366 | } | ||
367 | |||
368 | static void mrpc_complete_cmd(struct switchtec_dev *stdev); | ||
369 | |||
370 | static void mrpc_cmd_submit(struct switchtec_dev *stdev) | ||
371 | { | ||
372 | /* requires the mrpc_mutex to already be held when called */ | ||
373 | |||
374 | struct switchtec_user *stuser; | ||
375 | |||
376 | if (stdev->mrpc_busy) | ||
377 | return; | ||
378 | |||
379 | if (list_empty(&stdev->mrpc_queue)) | ||
380 | return; | ||
381 | |||
382 | stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, | ||
383 | list); | ||
384 | |||
385 | stuser_set_state(stuser, MRPC_RUNNING); | ||
386 | stdev->mrpc_busy = 1; | ||
387 | memcpy_toio(&stdev->mmio_mrpc->input_data, | ||
388 | stuser->data, stuser->data_len); | ||
389 | iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd); | ||
390 | |||
391 | stuser->status = ioread32(&stdev->mmio_mrpc->status); | ||
392 | if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS) | ||
393 | mrpc_complete_cmd(stdev); | ||
394 | |||
395 | schedule_delayed_work(&stdev->mrpc_timeout, | ||
396 | msecs_to_jiffies(500)); | ||
397 | } | ||
398 | |||
399 | static int mrpc_queue_cmd(struct switchtec_user *stuser) | ||
400 | { | ||
401 | /* requires the mrpc_mutex to already be held when called */ | ||
402 | |||
403 | struct switchtec_dev *stdev = stuser->stdev; | ||
404 | |||
405 | kref_get(&stuser->kref); | ||
406 | stuser->read_len = sizeof(stuser->data); | ||
407 | stuser_set_state(stuser, MRPC_QUEUED); | ||
408 | init_completion(&stuser->comp); | ||
409 | list_add_tail(&stuser->list, &stdev->mrpc_queue); | ||
410 | |||
411 | mrpc_cmd_submit(stdev); | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static void mrpc_complete_cmd(struct switchtec_dev *stdev) | ||
417 | { | ||
418 | /* requires the mrpc_mutex to already be held when called */ | ||
419 | struct switchtec_user *stuser; | ||
420 | |||
421 | if (list_empty(&stdev->mrpc_queue)) | ||
422 | return; | ||
423 | |||
424 | stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, | ||
425 | list); | ||
426 | |||
427 | stuser->status = ioread32(&stdev->mmio_mrpc->status); | ||
428 | if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS) | ||
429 | return; | ||
430 | |||
431 | stuser_set_state(stuser, MRPC_DONE); | ||
432 | stuser->return_code = 0; | ||
433 | |||
434 | if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE) | ||
435 | goto out; | ||
436 | |||
437 | stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value); | ||
438 | if (stuser->return_code != 0) | ||
439 | goto out; | ||
440 | |||
441 | memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data, | ||
442 | stuser->read_len); | ||
443 | |||
444 | out: | ||
445 | complete_all(&stuser->comp); | ||
446 | list_del_init(&stuser->list); | ||
447 | stuser_put(stuser); | ||
448 | stdev->mrpc_busy = 0; | ||
449 | |||
450 | mrpc_cmd_submit(stdev); | ||
451 | } | ||
452 | |||
453 | static void mrpc_event_work(struct work_struct *work) | ||
454 | { | ||
455 | struct switchtec_dev *stdev; | ||
456 | |||
457 | stdev = container_of(work, struct switchtec_dev, mrpc_work); | ||
458 | |||
459 | dev_dbg(&stdev->dev, "%s\n", __func__); | ||
460 | |||
461 | mutex_lock(&stdev->mrpc_mutex); | ||
462 | cancel_delayed_work(&stdev->mrpc_timeout); | ||
463 | mrpc_complete_cmd(stdev); | ||
464 | mutex_unlock(&stdev->mrpc_mutex); | ||
465 | } | ||
466 | |||
467 | static void mrpc_timeout_work(struct work_struct *work) | ||
468 | { | ||
469 | struct switchtec_dev *stdev; | ||
470 | u32 status; | ||
471 | |||
472 | stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work); | ||
473 | |||
474 | dev_dbg(&stdev->dev, "%s\n", __func__); | ||
475 | |||
476 | mutex_lock(&stdev->mrpc_mutex); | ||
477 | |||
478 | status = ioread32(&stdev->mmio_mrpc->status); | ||
479 | if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) { | ||
480 | schedule_delayed_work(&stdev->mrpc_timeout, | ||
481 | msecs_to_jiffies(500)); | ||
482 | goto out; | ||
483 | } | ||
484 | |||
485 | mrpc_complete_cmd(stdev); | ||
486 | |||
487 | out: | ||
488 | mutex_unlock(&stdev->mrpc_mutex); | ||
489 | } | ||
490 | |||
491 | static ssize_t device_version_show(struct device *dev, | ||
492 | struct device_attribute *attr, char *buf) | ||
493 | { | ||
494 | struct switchtec_dev *stdev = to_stdev(dev); | ||
495 | u32 ver; | ||
496 | |||
497 | ver = ioread32(&stdev->mmio_sys_info->device_version); | ||
498 | |||
499 | return sprintf(buf, "%x\n", ver); | ||
500 | } | ||
501 | static DEVICE_ATTR_RO(device_version); | ||
502 | |||
503 | static ssize_t fw_version_show(struct device *dev, | ||
504 | struct device_attribute *attr, char *buf) | ||
505 | { | ||
506 | struct switchtec_dev *stdev = to_stdev(dev); | ||
507 | u32 ver; | ||
508 | |||
509 | ver = ioread32(&stdev->mmio_sys_info->firmware_version); | ||
510 | |||
511 | return sprintf(buf, "%08x\n", ver); | ||
512 | } | ||
513 | static DEVICE_ATTR_RO(fw_version); | ||
514 | |||
515 | static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len) | ||
516 | { | ||
517 | int i; | ||
518 | |||
519 | memcpy_fromio(buf, attr, len); | ||
520 | buf[len] = '\n'; | ||
521 | buf[len + 1] = 0; | ||
522 | |||
523 | for (i = len - 1; i > 0; i--) { | ||
524 | if (buf[i] != ' ') | ||
525 | break; | ||
526 | buf[i] = '\n'; | ||
527 | buf[i + 1] = 0; | ||
528 | } | ||
529 | |||
530 | return strlen(buf); | ||
531 | } | ||
532 | |||
533 | #define DEVICE_ATTR_SYS_INFO_STR(field) \ | ||
534 | static ssize_t field ## _show(struct device *dev, \ | ||
535 | struct device_attribute *attr, char *buf) \ | ||
536 | { \ | ||
537 | struct switchtec_dev *stdev = to_stdev(dev); \ | ||
538 | return io_string_show(buf, &stdev->mmio_sys_info->field, \ | ||
539 | sizeof(stdev->mmio_sys_info->field)); \ | ||
540 | } \ | ||
541 | \ | ||
542 | static DEVICE_ATTR_RO(field) | ||
543 | |||
544 | DEVICE_ATTR_SYS_INFO_STR(vendor_id); | ||
545 | DEVICE_ATTR_SYS_INFO_STR(product_id); | ||
546 | DEVICE_ATTR_SYS_INFO_STR(product_revision); | ||
547 | DEVICE_ATTR_SYS_INFO_STR(component_vendor); | ||
548 | |||
549 | static ssize_t component_id_show(struct device *dev, | ||
550 | struct device_attribute *attr, char *buf) | ||
551 | { | ||
552 | struct switchtec_dev *stdev = to_stdev(dev); | ||
553 | int id = ioread16(&stdev->mmio_sys_info->component_id); | ||
554 | |||
555 | return sprintf(buf, "PM%04X\n", id); | ||
556 | } | ||
557 | static DEVICE_ATTR_RO(component_id); | ||
558 | |||
559 | static ssize_t component_revision_show(struct device *dev, | ||
560 | struct device_attribute *attr, char *buf) | ||
561 | { | ||
562 | struct switchtec_dev *stdev = to_stdev(dev); | ||
563 | int rev = ioread8(&stdev->mmio_sys_info->component_revision); | ||
564 | |||
565 | return sprintf(buf, "%d\n", rev); | ||
566 | } | ||
567 | static DEVICE_ATTR_RO(component_revision); | ||
568 | |||
569 | static ssize_t partition_show(struct device *dev, | ||
570 | struct device_attribute *attr, char *buf) | ||
571 | { | ||
572 | struct switchtec_dev *stdev = to_stdev(dev); | ||
573 | |||
574 | return sprintf(buf, "%d\n", stdev->partition); | ||
575 | } | ||
576 | static DEVICE_ATTR_RO(partition); | ||
577 | |||
578 | static ssize_t partition_count_show(struct device *dev, | ||
579 | struct device_attribute *attr, char *buf) | ||
580 | { | ||
581 | struct switchtec_dev *stdev = to_stdev(dev); | ||
582 | |||
583 | return sprintf(buf, "%d\n", stdev->partition_count); | ||
584 | } | ||
585 | static DEVICE_ATTR_RO(partition_count); | ||
586 | |||
587 | static struct attribute *switchtec_device_attrs[] = { | ||
588 | &dev_attr_device_version.attr, | ||
589 | &dev_attr_fw_version.attr, | ||
590 | &dev_attr_vendor_id.attr, | ||
591 | &dev_attr_product_id.attr, | ||
592 | &dev_attr_product_revision.attr, | ||
593 | &dev_attr_component_vendor.attr, | ||
594 | &dev_attr_component_id.attr, | ||
595 | &dev_attr_component_revision.attr, | ||
596 | &dev_attr_partition.attr, | ||
597 | &dev_attr_partition_count.attr, | ||
598 | NULL, | ||
599 | }; | ||
600 | |||
601 | ATTRIBUTE_GROUPS(switchtec_device); | ||
602 | |||
603 | static int switchtec_dev_open(struct inode *inode, struct file *filp) | ||
604 | { | ||
605 | struct switchtec_dev *stdev; | ||
606 | struct switchtec_user *stuser; | ||
607 | |||
608 | stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev); | ||
609 | |||
610 | stuser = stuser_create(stdev); | ||
611 | if (IS_ERR(stuser)) | ||
612 | return PTR_ERR(stuser); | ||
613 | |||
614 | filp->private_data = stuser; | ||
615 | nonseekable_open(inode, filp); | ||
616 | |||
617 | dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); | ||
618 | |||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | static int switchtec_dev_release(struct inode *inode, struct file *filp) | ||
623 | { | ||
624 | struct switchtec_user *stuser = filp->private_data; | ||
625 | |||
626 | stuser_put(stuser); | ||
627 | |||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | static int lock_mutex_and_test_alive(struct switchtec_dev *stdev) | ||
632 | { | ||
633 | if (mutex_lock_interruptible(&stdev->mrpc_mutex)) | ||
634 | return -EINTR; | ||
635 | |||
636 | if (!stdev->alive) { | ||
637 | mutex_unlock(&stdev->mrpc_mutex); | ||
638 | return -ENODEV; | ||
639 | } | ||
640 | |||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | static ssize_t switchtec_dev_write(struct file *filp, const char __user *data, | ||
645 | size_t size, loff_t *off) | ||
646 | { | ||
647 | struct switchtec_user *stuser = filp->private_data; | ||
648 | struct switchtec_dev *stdev = stuser->stdev; | ||
649 | int rc; | ||
650 | |||
651 | if (size < sizeof(stuser->cmd) || | ||
652 | size > sizeof(stuser->cmd) + sizeof(stuser->data)) | ||
653 | return -EINVAL; | ||
654 | |||
655 | stuser->data_len = size - sizeof(stuser->cmd); | ||
656 | |||
657 | rc = lock_mutex_and_test_alive(stdev); | ||
658 | if (rc) | ||
659 | return rc; | ||
660 | |||
661 | if (stuser->state != MRPC_IDLE) { | ||
662 | rc = -EBADE; | ||
663 | goto out; | ||
664 | } | ||
665 | |||
666 | rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd)); | ||
667 | if (rc) { | ||
668 | rc = -EFAULT; | ||
669 | goto out; | ||
670 | } | ||
671 | |||
672 | data += sizeof(stuser->cmd); | ||
673 | rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd)); | ||
674 | if (rc) { | ||
675 | rc = -EFAULT; | ||
676 | goto out; | ||
677 | } | ||
678 | |||
679 | rc = mrpc_queue_cmd(stuser); | ||
680 | |||
681 | out: | ||
682 | mutex_unlock(&stdev->mrpc_mutex); | ||
683 | |||
684 | if (rc) | ||
685 | return rc; | ||
686 | |||
687 | return size; | ||
688 | } | ||
689 | |||
690 | static ssize_t switchtec_dev_read(struct file *filp, char __user *data, | ||
691 | size_t size, loff_t *off) | ||
692 | { | ||
693 | struct switchtec_user *stuser = filp->private_data; | ||
694 | struct switchtec_dev *stdev = stuser->stdev; | ||
695 | int rc; | ||
696 | |||
697 | if (size < sizeof(stuser->cmd) || | ||
698 | size > sizeof(stuser->cmd) + sizeof(stuser->data)) | ||
699 | return -EINVAL; | ||
700 | |||
701 | rc = lock_mutex_and_test_alive(stdev); | ||
702 | if (rc) | ||
703 | return rc; | ||
704 | |||
705 | if (stuser->state == MRPC_IDLE) { | ||
706 | mutex_unlock(&stdev->mrpc_mutex); | ||
707 | return -EBADE; | ||
708 | } | ||
709 | |||
710 | stuser->read_len = size - sizeof(stuser->return_code); | ||
711 | |||
712 | mutex_unlock(&stdev->mrpc_mutex); | ||
713 | |||
714 | if (filp->f_flags & O_NONBLOCK) { | ||
715 | if (!try_wait_for_completion(&stuser->comp)) | ||
716 | return -EAGAIN; | ||
717 | } else { | ||
718 | rc = wait_for_completion_interruptible(&stuser->comp); | ||
719 | if (rc < 0) | ||
720 | return rc; | ||
721 | } | ||
722 | |||
723 | rc = lock_mutex_and_test_alive(stdev); | ||
724 | if (rc) | ||
725 | return rc; | ||
726 | |||
727 | if (stuser->state != MRPC_DONE) { | ||
728 | mutex_unlock(&stdev->mrpc_mutex); | ||
729 | return -EBADE; | ||
730 | } | ||
731 | |||
732 | rc = copy_to_user(data, &stuser->return_code, | ||
733 | sizeof(stuser->return_code)); | ||
734 | if (rc) { | ||
735 | rc = -EFAULT; | ||
736 | goto out; | ||
737 | } | ||
738 | |||
739 | data += sizeof(stuser->return_code); | ||
740 | rc = copy_to_user(data, &stuser->data, | ||
741 | size - sizeof(stuser->return_code)); | ||
742 | if (rc) { | ||
743 | rc = -EFAULT; | ||
744 | goto out; | ||
745 | } | ||
746 | |||
747 | stuser_set_state(stuser, MRPC_IDLE); | ||
748 | |||
749 | out: | ||
750 | mutex_unlock(&stdev->mrpc_mutex); | ||
751 | |||
752 | if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE) | ||
753 | return size; | ||
754 | else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED) | ||
755 | return -ENXIO; | ||
756 | else | ||
757 | return -EBADMSG; | ||
758 | } | ||
759 | |||
760 | static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait) | ||
761 | { | ||
762 | struct switchtec_user *stuser = filp->private_data; | ||
763 | struct switchtec_dev *stdev = stuser->stdev; | ||
764 | int ret = 0; | ||
765 | |||
766 | poll_wait(filp, &stuser->comp.wait, wait); | ||
767 | poll_wait(filp, &stdev->event_wq, wait); | ||
768 | |||
769 | if (lock_mutex_and_test_alive(stdev)) | ||
770 | return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP; | ||
771 | |||
772 | mutex_unlock(&stdev->mrpc_mutex); | ||
773 | |||
774 | if (try_wait_for_completion(&stuser->comp)) | ||
775 | ret |= POLLIN | POLLRDNORM; | ||
776 | |||
777 | if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) | ||
778 | ret |= POLLPRI | POLLRDBAND; | ||
779 | |||
780 | return ret; | ||
781 | } | ||
782 | |||
783 | static int ioctl_flash_info(struct switchtec_dev *stdev, | ||
784 | struct switchtec_ioctl_flash_info __user *uinfo) | ||
785 | { | ||
786 | struct switchtec_ioctl_flash_info info = {0}; | ||
787 | struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; | ||
788 | |||
789 | info.flash_length = ioread32(&fi->flash_length); | ||
790 | info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS; | ||
791 | |||
792 | if (copy_to_user(uinfo, &info, sizeof(info))) | ||
793 | return -EFAULT; | ||
794 | |||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info, | ||
799 | struct partition_info __iomem *pi) | ||
800 | { | ||
801 | info->address = ioread32(&pi->address); | ||
802 | info->length = ioread32(&pi->length); | ||
803 | } | ||
804 | |||
805 | static int ioctl_flash_part_info(struct switchtec_dev *stdev, | ||
806 | struct switchtec_ioctl_flash_part_info __user *uinfo) | ||
807 | { | ||
808 | struct switchtec_ioctl_flash_part_info info = {0}; | ||
809 | struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; | ||
810 | u32 active_addr = -1; | ||
811 | |||
812 | if (copy_from_user(&info, uinfo, sizeof(info))) | ||
813 | return -EFAULT; | ||
814 | |||
815 | switch (info.flash_partition) { | ||
816 | case SWITCHTEC_IOCTL_PART_CFG0: | ||
817 | active_addr = ioread32(&fi->active_cfg); | ||
818 | set_fw_info_part(&info, &fi->cfg0); | ||
819 | break; | ||
820 | case SWITCHTEC_IOCTL_PART_CFG1: | ||
821 | active_addr = ioread32(&fi->active_cfg); | ||
822 | set_fw_info_part(&info, &fi->cfg1); | ||
823 | break; | ||
824 | case SWITCHTEC_IOCTL_PART_IMG0: | ||
825 | active_addr = ioread32(&fi->active_img); | ||
826 | set_fw_info_part(&info, &fi->img0); | ||
827 | break; | ||
828 | case SWITCHTEC_IOCTL_PART_IMG1: | ||
829 | active_addr = ioread32(&fi->active_img); | ||
830 | set_fw_info_part(&info, &fi->img1); | ||
831 | break; | ||
832 | case SWITCHTEC_IOCTL_PART_NVLOG: | ||
833 | set_fw_info_part(&info, &fi->nvlog); | ||
834 | break; | ||
835 | case SWITCHTEC_IOCTL_PART_VENDOR0: | ||
836 | set_fw_info_part(&info, &fi->vendor[0]); | ||
837 | break; | ||
838 | case SWITCHTEC_IOCTL_PART_VENDOR1: | ||
839 | set_fw_info_part(&info, &fi->vendor[1]); | ||
840 | break; | ||
841 | case SWITCHTEC_IOCTL_PART_VENDOR2: | ||
842 | set_fw_info_part(&info, &fi->vendor[2]); | ||
843 | break; | ||
844 | case SWITCHTEC_IOCTL_PART_VENDOR3: | ||
845 | set_fw_info_part(&info, &fi->vendor[3]); | ||
846 | break; | ||
847 | case SWITCHTEC_IOCTL_PART_VENDOR4: | ||
848 | set_fw_info_part(&info, &fi->vendor[4]); | ||
849 | break; | ||
850 | case SWITCHTEC_IOCTL_PART_VENDOR5: | ||
851 | set_fw_info_part(&info, &fi->vendor[5]); | ||
852 | break; | ||
853 | case SWITCHTEC_IOCTL_PART_VENDOR6: | ||
854 | set_fw_info_part(&info, &fi->vendor[6]); | ||
855 | break; | ||
856 | case SWITCHTEC_IOCTL_PART_VENDOR7: | ||
857 | set_fw_info_part(&info, &fi->vendor[7]); | ||
858 | break; | ||
859 | default: | ||
860 | return -EINVAL; | ||
861 | } | ||
862 | |||
863 | if (info.address == active_addr) | ||
864 | info.active = 1; | ||
865 | |||
866 | if (copy_to_user(uinfo, &info, sizeof(info))) | ||
867 | return -EFAULT; | ||
868 | |||
869 | return 0; | ||
870 | } | ||
871 | |||
872 | static int ioctl_event_summary(struct switchtec_dev *stdev, | ||
873 | struct switchtec_user *stuser, | ||
874 | struct switchtec_ioctl_event_summary __user *usum) | ||
875 | { | ||
876 | struct switchtec_ioctl_event_summary s = {0}; | ||
877 | int i; | ||
878 | u32 reg; | ||
879 | |||
880 | s.global = ioread32(&stdev->mmio_sw_event->global_summary); | ||
881 | s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap); | ||
882 | s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); | ||
883 | |||
884 | for (i = 0; i < stdev->partition_count; i++) { | ||
885 | reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary); | ||
886 | s.part[i] = reg; | ||
887 | } | ||
888 | |||
889 | for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { | ||
890 | reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id); | ||
891 | if (reg != MICROSEMI_VENDOR_ID) | ||
892 | break; | ||
893 | |||
894 | reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary); | ||
895 | s.pff[i] = reg; | ||
896 | } | ||
897 | |||
898 | if (copy_to_user(usum, &s, sizeof(s))) | ||
899 | return -EFAULT; | ||
900 | |||
901 | stuser->event_cnt = atomic_read(&stdev->event_cnt); | ||
902 | |||
903 | return 0; | ||
904 | } | ||
905 | |||
906 | static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev, | ||
907 | size_t offset, int index) | ||
908 | { | ||
909 | return (void __iomem *)stdev->mmio_sw_event + offset; | ||
910 | } | ||
911 | |||
912 | static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev, | ||
913 | size_t offset, int index) | ||
914 | { | ||
915 | return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset; | ||
916 | } | ||
917 | |||
918 | static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev, | ||
919 | size_t offset, int index) | ||
920 | { | ||
921 | return (void __iomem *)&stdev->mmio_pff_csr[index] + offset; | ||
922 | } | ||
923 | |||
924 | #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg} | ||
925 | #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg} | ||
926 | #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg} | ||
927 | |||
928 | const struct event_reg { | ||
929 | size_t offset; | ||
930 | u32 __iomem *(*map_reg)(struct switchtec_dev *stdev, | ||
931 | size_t offset, int index); | ||
932 | } event_regs[] = { | ||
933 | EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr), | ||
934 | EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr), | ||
935 | EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr), | ||
936 | EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr), | ||
937 | EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr), | ||
938 | EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr), | ||
939 | EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr), | ||
940 | EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr), | ||
941 | EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr), | ||
942 | EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC, | ||
943 | twi_mrpc_comp_async_hdr), | ||
944 | EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr), | ||
945 | EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC, | ||
946 | cli_mrpc_comp_async_hdr), | ||
947 | EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr), | ||
948 | EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr), | ||
949 | EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr), | ||
950 | EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr), | ||
951 | EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr), | ||
952 | EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr), | ||
953 | EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr), | ||
954 | EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr), | ||
955 | EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr), | ||
956 | EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr), | ||
957 | EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr), | ||
958 | EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr), | ||
959 | EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr), | ||
960 | EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr), | ||
961 | EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr), | ||
962 | EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr), | ||
963 | EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr), | ||
964 | }; | ||
965 | |||
966 | static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev, | ||
967 | int event_id, int index) | ||
968 | { | ||
969 | size_t off; | ||
970 | |||
971 | if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS) | ||
972 | return ERR_PTR(-EINVAL); | ||
973 | |||
974 | off = event_regs[event_id].offset; | ||
975 | |||
976 | if (event_regs[event_id].map_reg == part_ev_reg) { | ||
977 | if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX) | ||
978 | index = stdev->partition; | ||
979 | else if (index < 0 || index >= stdev->partition_count) | ||
980 | return ERR_PTR(-EINVAL); | ||
981 | } else if (event_regs[event_id].map_reg == pff_ev_reg) { | ||
982 | if (index < 0 || index >= stdev->pff_csr_count) | ||
983 | return ERR_PTR(-EINVAL); | ||
984 | } | ||
985 | |||
986 | return event_regs[event_id].map_reg(stdev, off, index); | ||
987 | } | ||
988 | |||
989 | static int event_ctl(struct switchtec_dev *stdev, | ||
990 | struct switchtec_ioctl_event_ctl *ctl) | ||
991 | { | ||
992 | int i; | ||
993 | u32 __iomem *reg; | ||
994 | u32 hdr; | ||
995 | |||
996 | reg = event_hdr_addr(stdev, ctl->event_id, ctl->index); | ||
997 | if (IS_ERR(reg)) | ||
998 | return PTR_ERR(reg); | ||
999 | |||
1000 | hdr = ioread32(reg); | ||
1001 | for (i = 0; i < ARRAY_SIZE(ctl->data); i++) | ||
1002 | ctl->data[i] = ioread32(®[i + 1]); | ||
1003 | |||
1004 | ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED; | ||
1005 | ctl->count = (hdr >> 5) & 0xFF; | ||
1006 | |||
1007 | if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR)) | ||
1008 | hdr &= ~SWITCHTEC_EVENT_CLEAR; | ||
1009 | if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL) | ||
1010 | hdr |= SWITCHTEC_EVENT_EN_IRQ; | ||
1011 | if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL) | ||
1012 | hdr &= ~SWITCHTEC_EVENT_EN_IRQ; | ||
1013 | if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG) | ||
1014 | hdr |= SWITCHTEC_EVENT_EN_LOG; | ||
1015 | if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG) | ||
1016 | hdr &= ~SWITCHTEC_EVENT_EN_LOG; | ||
1017 | if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI) | ||
1018 | hdr |= SWITCHTEC_EVENT_EN_CLI; | ||
1019 | if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI) | ||
1020 | hdr &= ~SWITCHTEC_EVENT_EN_CLI; | ||
1021 | if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL) | ||
1022 | hdr |= SWITCHTEC_EVENT_FATAL; | ||
1023 | if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL) | ||
1024 | hdr &= ~SWITCHTEC_EVENT_FATAL; | ||
1025 | |||
1026 | if (ctl->flags) | ||
1027 | iowrite32(hdr, reg); | ||
1028 | |||
1029 | ctl->flags = 0; | ||
1030 | if (hdr & SWITCHTEC_EVENT_EN_IRQ) | ||
1031 | ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL; | ||
1032 | if (hdr & SWITCHTEC_EVENT_EN_LOG) | ||
1033 | ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG; | ||
1034 | if (hdr & SWITCHTEC_EVENT_EN_CLI) | ||
1035 | ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI; | ||
1036 | if (hdr & SWITCHTEC_EVENT_FATAL) | ||
1037 | ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL; | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | static int ioctl_event_ctl(struct switchtec_dev *stdev, | ||
1043 | struct switchtec_ioctl_event_ctl __user *uctl) | ||
1044 | { | ||
1045 | int ret; | ||
1046 | int nr_idxs; | ||
1047 | struct switchtec_ioctl_event_ctl ctl; | ||
1048 | |||
1049 | if (copy_from_user(&ctl, uctl, sizeof(ctl))) | ||
1050 | return -EFAULT; | ||
1051 | |||
1052 | if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS) | ||
1053 | return -EINVAL; | ||
1054 | |||
1055 | if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED) | ||
1056 | return -EINVAL; | ||
1057 | |||
1058 | if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) { | ||
1059 | if (event_regs[ctl.event_id].map_reg == global_ev_reg) | ||
1060 | nr_idxs = 1; | ||
1061 | else if (event_regs[ctl.event_id].map_reg == part_ev_reg) | ||
1062 | nr_idxs = stdev->partition_count; | ||
1063 | else if (event_regs[ctl.event_id].map_reg == pff_ev_reg) | ||
1064 | nr_idxs = stdev->pff_csr_count; | ||
1065 | else | ||
1066 | return -EINVAL; | ||
1067 | |||
1068 | for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) { | ||
1069 | ret = event_ctl(stdev, &ctl); | ||
1070 | if (ret < 0) | ||
1071 | return ret; | ||
1072 | } | ||
1073 | } else { | ||
1074 | ret = event_ctl(stdev, &ctl); | ||
1075 | if (ret < 0) | ||
1076 | return ret; | ||
1077 | } | ||
1078 | |||
1079 | if (copy_to_user(uctl, &ctl, sizeof(ctl))) | ||
1080 | return -EFAULT; | ||
1081 | |||
1082 | return 0; | ||
1083 | } | ||
1084 | |||
1085 | static int ioctl_pff_to_port(struct switchtec_dev *stdev, | ||
1086 | struct switchtec_ioctl_pff_port *up) | ||
1087 | { | ||
1088 | int i, part; | ||
1089 | u32 reg; | ||
1090 | struct part_cfg_regs *pcfg; | ||
1091 | struct switchtec_ioctl_pff_port p; | ||
1092 | |||
1093 | if (copy_from_user(&p, up, sizeof(p))) | ||
1094 | return -EFAULT; | ||
1095 | |||
1096 | p.port = -1; | ||
1097 | for (part = 0; part < stdev->partition_count; part++) { | ||
1098 | pcfg = &stdev->mmio_part_cfg_all[part]; | ||
1099 | p.partition = part; | ||
1100 | |||
1101 | reg = ioread32(&pcfg->usp_pff_inst_id); | ||
1102 | if (reg == p.pff) { | ||
1103 | p.port = 0; | ||
1104 | break; | ||
1105 | } | ||
1106 | |||
1107 | reg = ioread32(&pcfg->vep_pff_inst_id); | ||
1108 | if (reg == p.pff) { | ||
1109 | p.port = SWITCHTEC_IOCTL_PFF_VEP; | ||
1110 | break; | ||
1111 | } | ||
1112 | |||
1113 | for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) { | ||
1114 | reg = ioread32(&pcfg->dsp_pff_inst_id[i]); | ||
1115 | if (reg != p.pff) | ||
1116 | continue; | ||
1117 | |||
1118 | p.port = i + 1; | ||
1119 | break; | ||
1120 | } | ||
1121 | |||
1122 | if (p.port != -1) | ||
1123 | break; | ||
1124 | } | ||
1125 | |||
1126 | if (copy_to_user(up, &p, sizeof(p))) | ||
1127 | return -EFAULT; | ||
1128 | |||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1132 | static int ioctl_port_to_pff(struct switchtec_dev *stdev, | ||
1133 | struct switchtec_ioctl_pff_port *up) | ||
1134 | { | ||
1135 | struct switchtec_ioctl_pff_port p; | ||
1136 | struct part_cfg_regs *pcfg; | ||
1137 | |||
1138 | if (copy_from_user(&p, up, sizeof(p))) | ||
1139 | return -EFAULT; | ||
1140 | |||
1141 | if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX) | ||
1142 | pcfg = stdev->mmio_part_cfg; | ||
1143 | else if (p.partition < stdev->partition_count) | ||
1144 | pcfg = &stdev->mmio_part_cfg_all[p.partition]; | ||
1145 | else | ||
1146 | return -EINVAL; | ||
1147 | |||
1148 | switch (p.port) { | ||
1149 | case 0: | ||
1150 | p.pff = ioread32(&pcfg->usp_pff_inst_id); | ||
1151 | break; | ||
1152 | case SWITCHTEC_IOCTL_PFF_VEP: | ||
1153 | p.pff = ioread32(&pcfg->vep_pff_inst_id); | ||
1154 | break; | ||
1155 | default: | ||
1156 | if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) | ||
1157 | return -EINVAL; | ||
1158 | p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); | ||
1159 | break; | ||
1160 | } | ||
1161 | |||
1162 | if (copy_to_user(up, &p, sizeof(p))) | ||
1163 | return -EFAULT; | ||
1164 | |||
1165 | return 0; | ||
1166 | } | ||
1167 | |||
1168 | static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd, | ||
1169 | unsigned long arg) | ||
1170 | { | ||
1171 | struct switchtec_user *stuser = filp->private_data; | ||
1172 | struct switchtec_dev *stdev = stuser->stdev; | ||
1173 | int rc; | ||
1174 | void __user *argp = (void __user *)arg; | ||
1175 | |||
1176 | rc = lock_mutex_and_test_alive(stdev); | ||
1177 | if (rc) | ||
1178 | return rc; | ||
1179 | |||
1180 | switch (cmd) { | ||
1181 | case SWITCHTEC_IOCTL_FLASH_INFO: | ||
1182 | rc = ioctl_flash_info(stdev, argp); | ||
1183 | break; | ||
1184 | case SWITCHTEC_IOCTL_FLASH_PART_INFO: | ||
1185 | rc = ioctl_flash_part_info(stdev, argp); | ||
1186 | break; | ||
1187 | case SWITCHTEC_IOCTL_EVENT_SUMMARY: | ||
1188 | rc = ioctl_event_summary(stdev, stuser, argp); | ||
1189 | break; | ||
1190 | case SWITCHTEC_IOCTL_EVENT_CTL: | ||
1191 | rc = ioctl_event_ctl(stdev, argp); | ||
1192 | break; | ||
1193 | case SWITCHTEC_IOCTL_PFF_TO_PORT: | ||
1194 | rc = ioctl_pff_to_port(stdev, argp); | ||
1195 | break; | ||
1196 | case SWITCHTEC_IOCTL_PORT_TO_PFF: | ||
1197 | rc = ioctl_port_to_pff(stdev, argp); | ||
1198 | break; | ||
1199 | default: | ||
1200 | rc = -ENOTTY; | ||
1201 | break; | ||
1202 | } | ||
1203 | |||
1204 | mutex_unlock(&stdev->mrpc_mutex); | ||
1205 | return rc; | ||
1206 | } | ||
1207 | |||
1208 | static const struct file_operations switchtec_fops = { | ||
1209 | .owner = THIS_MODULE, | ||
1210 | .open = switchtec_dev_open, | ||
1211 | .release = switchtec_dev_release, | ||
1212 | .write = switchtec_dev_write, | ||
1213 | .read = switchtec_dev_read, | ||
1214 | .poll = switchtec_dev_poll, | ||
1215 | .unlocked_ioctl = switchtec_dev_ioctl, | ||
1216 | .compat_ioctl = switchtec_dev_ioctl, | ||
1217 | }; | ||
1218 | |||
1219 | static void stdev_release(struct device *dev) | ||
1220 | { | ||
1221 | struct switchtec_dev *stdev = to_stdev(dev); | ||
1222 | |||
1223 | kfree(stdev); | ||
1224 | } | ||
1225 | |||
1226 | static void stdev_kill(struct switchtec_dev *stdev) | ||
1227 | { | ||
1228 | struct switchtec_user *stuser, *tmpuser; | ||
1229 | |||
1230 | pci_clear_master(stdev->pdev); | ||
1231 | |||
1232 | cancel_delayed_work_sync(&stdev->mrpc_timeout); | ||
1233 | |||
1234 | /* Mark the hardware as unavailable and complete all completions */ | ||
1235 | mutex_lock(&stdev->mrpc_mutex); | ||
1236 | stdev->alive = false; | ||
1237 | |||
1238 | /* Wake up and kill any users waiting on an MRPC request */ | ||
1239 | list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { | ||
1240 | complete_all(&stuser->comp); | ||
1241 | list_del_init(&stuser->list); | ||
1242 | stuser_put(stuser); | ||
1243 | } | ||
1244 | |||
1245 | mutex_unlock(&stdev->mrpc_mutex); | ||
1246 | |||
1247 | /* Wake up any users waiting on event_wq */ | ||
1248 | wake_up_interruptible(&stdev->event_wq); | ||
1249 | } | ||
1250 | |||
1251 | static struct switchtec_dev *stdev_create(struct pci_dev *pdev) | ||
1252 | { | ||
1253 | struct switchtec_dev *stdev; | ||
1254 | int minor; | ||
1255 | struct device *dev; | ||
1256 | struct cdev *cdev; | ||
1257 | int rc; | ||
1258 | |||
1259 | stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL, | ||
1260 | dev_to_node(&pdev->dev)); | ||
1261 | if (!stdev) | ||
1262 | return ERR_PTR(-ENOMEM); | ||
1263 | |||
1264 | stdev->alive = true; | ||
1265 | stdev->pdev = pdev; | ||
1266 | INIT_LIST_HEAD(&stdev->mrpc_queue); | ||
1267 | mutex_init(&stdev->mrpc_mutex); | ||
1268 | stdev->mrpc_busy = 0; | ||
1269 | INIT_WORK(&stdev->mrpc_work, mrpc_event_work); | ||
1270 | INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work); | ||
1271 | init_waitqueue_head(&stdev->event_wq); | ||
1272 | atomic_set(&stdev->event_cnt, 0); | ||
1273 | |||
1274 | dev = &stdev->dev; | ||
1275 | device_initialize(dev); | ||
1276 | dev->class = switchtec_class; | ||
1277 | dev->parent = &pdev->dev; | ||
1278 | dev->groups = switchtec_device_groups; | ||
1279 | dev->release = stdev_release; | ||
1280 | |||
1281 | minor = ida_simple_get(&switchtec_minor_ida, 0, 0, | ||
1282 | GFP_KERNEL); | ||
1283 | if (minor < 0) { | ||
1284 | rc = minor; | ||
1285 | goto err_put; | ||
1286 | } | ||
1287 | |||
1288 | dev->devt = MKDEV(MAJOR(switchtec_devt), minor); | ||
1289 | dev_set_name(dev, "switchtec%d", minor); | ||
1290 | |||
1291 | cdev = &stdev->cdev; | ||
1292 | cdev_init(cdev, &switchtec_fops); | ||
1293 | cdev->owner = THIS_MODULE; | ||
1294 | cdev->kobj.parent = &dev->kobj; | ||
1295 | |||
1296 | return stdev; | ||
1297 | |||
1298 | err_put: | ||
1299 | put_device(&stdev->dev); | ||
1300 | return ERR_PTR(rc); | ||
1301 | } | ||
1302 | |||
1303 | static int mask_event(struct switchtec_dev *stdev, int eid, int idx) | ||
1304 | { | ||
1305 | size_t off = event_regs[eid].offset; | ||
1306 | u32 __iomem *hdr_reg; | ||
1307 | u32 hdr; | ||
1308 | |||
1309 | hdr_reg = event_regs[eid].map_reg(stdev, off, idx); | ||
1310 | hdr = ioread32(hdr_reg); | ||
1311 | |||
1312 | if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) | ||
1313 | return 0; | ||
1314 | |||
1315 | dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); | ||
1316 | hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED); | ||
1317 | iowrite32(hdr, hdr_reg); | ||
1318 | |||
1319 | return 1; | ||
1320 | } | ||
1321 | |||
1322 | static int mask_all_events(struct switchtec_dev *stdev, int eid) | ||
1323 | { | ||
1324 | int idx; | ||
1325 | int count = 0; | ||
1326 | |||
1327 | if (event_regs[eid].map_reg == part_ev_reg) { | ||
1328 | for (idx = 0; idx < stdev->partition_count; idx++) | ||
1329 | count += mask_event(stdev, eid, idx); | ||
1330 | } else if (event_regs[eid].map_reg == pff_ev_reg) { | ||
1331 | for (idx = 0; idx < stdev->pff_csr_count; idx++) { | ||
1332 | if (!stdev->pff_local[idx]) | ||
1333 | continue; | ||
1334 | count += mask_event(stdev, eid, idx); | ||
1335 | } | ||
1336 | } else { | ||
1337 | count += mask_event(stdev, eid, 0); | ||
1338 | } | ||
1339 | |||
1340 | return count; | ||
1341 | } | ||
1342 | |||
1343 | static irqreturn_t switchtec_event_isr(int irq, void *dev) | ||
1344 | { | ||
1345 | struct switchtec_dev *stdev = dev; | ||
1346 | u32 reg; | ||
1347 | irqreturn_t ret = IRQ_NONE; | ||
1348 | int eid, event_count = 0; | ||
1349 | |||
1350 | reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr); | ||
1351 | if (reg & SWITCHTEC_EVENT_OCCURRED) { | ||
1352 | dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__); | ||
1353 | ret = IRQ_HANDLED; | ||
1354 | schedule_work(&stdev->mrpc_work); | ||
1355 | iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr); | ||
1356 | } | ||
1357 | |||
1358 | for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) | ||
1359 | event_count += mask_all_events(stdev, eid); | ||
1360 | |||
1361 | if (event_count) { | ||
1362 | atomic_inc(&stdev->event_cnt); | ||
1363 | wake_up_interruptible(&stdev->event_wq); | ||
1364 | dev_dbg(&stdev->dev, "%s: %d events\n", __func__, | ||
1365 | event_count); | ||
1366 | return IRQ_HANDLED; | ||
1367 | } | ||
1368 | |||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | static int switchtec_init_isr(struct switchtec_dev *stdev) | ||
1373 | { | ||
1374 | int nvecs; | ||
1375 | int event_irq; | ||
1376 | |||
1377 | nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4, | ||
1378 | PCI_IRQ_MSIX | PCI_IRQ_MSI); | ||
1379 | if (nvecs < 0) | ||
1380 | return nvecs; | ||
1381 | |||
1382 | event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number); | ||
1383 | if (event_irq < 0 || event_irq >= nvecs) | ||
1384 | return -EFAULT; | ||
1385 | |||
1386 | event_irq = pci_irq_vector(stdev->pdev, event_irq); | ||
1387 | if (event_irq < 0) | ||
1388 | return event_irq; | ||
1389 | |||
1390 | return devm_request_irq(&stdev->pdev->dev, event_irq, | ||
1391 | switchtec_event_isr, 0, | ||
1392 | KBUILD_MODNAME, stdev); | ||
1393 | } | ||
1394 | |||
1395 | static void init_pff(struct switchtec_dev *stdev) | ||
1396 | { | ||
1397 | int i; | ||
1398 | u32 reg; | ||
1399 | struct part_cfg_regs *pcfg = stdev->mmio_part_cfg; | ||
1400 | |||
1401 | for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { | ||
1402 | reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id); | ||
1403 | if (reg != MICROSEMI_VENDOR_ID) | ||
1404 | break; | ||
1405 | } | ||
1406 | |||
1407 | stdev->pff_csr_count = i; | ||
1408 | |||
1409 | reg = ioread32(&pcfg->usp_pff_inst_id); | ||
1410 | if (reg < SWITCHTEC_MAX_PFF_CSR) | ||
1411 | stdev->pff_local[reg] = 1; | ||
1412 | |||
1413 | reg = ioread32(&pcfg->vep_pff_inst_id); | ||
1414 | if (reg < SWITCHTEC_MAX_PFF_CSR) | ||
1415 | stdev->pff_local[reg] = 1; | ||
1416 | |||
1417 | for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) { | ||
1418 | reg = ioread32(&pcfg->dsp_pff_inst_id[i]); | ||
1419 | if (reg < SWITCHTEC_MAX_PFF_CSR) | ||
1420 | stdev->pff_local[reg] = 1; | ||
1421 | } | ||
1422 | } | ||
1423 | |||
1424 | static int switchtec_init_pci(struct switchtec_dev *stdev, | ||
1425 | struct pci_dev *pdev) | ||
1426 | { | ||
1427 | int rc; | ||
1428 | |||
1429 | rc = pcim_enable_device(pdev); | ||
1430 | if (rc) | ||
1431 | return rc; | ||
1432 | |||
1433 | rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME); | ||
1434 | if (rc) | ||
1435 | return rc; | ||
1436 | |||
1437 | pci_set_master(pdev); | ||
1438 | |||
1439 | stdev->mmio = pcim_iomap_table(pdev)[0]; | ||
1440 | stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET; | ||
1441 | stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET; | ||
1442 | stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; | ||
1443 | stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; | ||
1444 | stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; | ||
1445 | stdev->partition = ioread8(&stdev->mmio_ntb->partition_id); | ||
1446 | stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); | ||
1447 | stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; | ||
1448 | stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; | ||
1449 | stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; | ||
1450 | |||
1451 | init_pff(stdev); | ||
1452 | |||
1453 | pci_set_drvdata(pdev, stdev); | ||
1454 | |||
1455 | return 0; | ||
1456 | } | ||
1457 | |||
1458 | static int switchtec_pci_probe(struct pci_dev *pdev, | ||
1459 | const struct pci_device_id *id) | ||
1460 | { | ||
1461 | struct switchtec_dev *stdev; | ||
1462 | int rc; | ||
1463 | |||
1464 | stdev = stdev_create(pdev); | ||
1465 | if (IS_ERR(stdev)) | ||
1466 | return PTR_ERR(stdev); | ||
1467 | |||
1468 | rc = switchtec_init_pci(stdev, pdev); | ||
1469 | if (rc) | ||
1470 | goto err_put; | ||
1471 | |||
1472 | rc = switchtec_init_isr(stdev); | ||
1473 | if (rc) { | ||
1474 | dev_err(&stdev->dev, "failed to init isr.\n"); | ||
1475 | goto err_put; | ||
1476 | } | ||
1477 | |||
1478 | iowrite32(SWITCHTEC_EVENT_CLEAR | | ||
1479 | SWITCHTEC_EVENT_EN_IRQ, | ||
1480 | &stdev->mmio_part_cfg->mrpc_comp_hdr); | ||
1481 | |||
1482 | rc = cdev_add(&stdev->cdev, stdev->dev.devt, 1); | ||
1483 | if (rc) | ||
1484 | goto err_put; | ||
1485 | |||
1486 | rc = device_add(&stdev->dev); | ||
1487 | if (rc) | ||
1488 | goto err_devadd; | ||
1489 | |||
1490 | dev_info(&stdev->dev, "Management device registered.\n"); | ||
1491 | |||
1492 | return 0; | ||
1493 | |||
1494 | err_devadd: | ||
1495 | cdev_del(&stdev->cdev); | ||
1496 | stdev_kill(stdev); | ||
1497 | err_put: | ||
1498 | ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); | ||
1499 | put_device(&stdev->dev); | ||
1500 | return rc; | ||
1501 | } | ||
1502 | |||
1503 | static void switchtec_pci_remove(struct pci_dev *pdev) | ||
1504 | { | ||
1505 | struct switchtec_dev *stdev = pci_get_drvdata(pdev); | ||
1506 | |||
1507 | pci_set_drvdata(pdev, NULL); | ||
1508 | |||
1509 | device_del(&stdev->dev); | ||
1510 | cdev_del(&stdev->cdev); | ||
1511 | ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); | ||
1512 | dev_info(&stdev->dev, "unregistered.\n"); | ||
1513 | |||
1514 | stdev_kill(stdev); | ||
1515 | put_device(&stdev->dev); | ||
1516 | } | ||
1517 | |||
1518 | #define SWITCHTEC_PCI_DEVICE(device_id) \ | ||
1519 | { \ | ||
1520 | .vendor = MICROSEMI_VENDOR_ID, \ | ||
1521 | .device = device_id, \ | ||
1522 | .subvendor = PCI_ANY_ID, \ | ||
1523 | .subdevice = PCI_ANY_ID, \ | ||
1524 | .class = MICROSEMI_MGMT_CLASSCODE, \ | ||
1525 | .class_mask = 0xFFFFFFFF, \ | ||
1526 | }, \ | ||
1527 | { \ | ||
1528 | .vendor = MICROSEMI_VENDOR_ID, \ | ||
1529 | .device = device_id, \ | ||
1530 | .subvendor = PCI_ANY_ID, \ | ||
1531 | .subdevice = PCI_ANY_ID, \ | ||
1532 | .class = MICROSEMI_NTB_CLASSCODE, \ | ||
1533 | .class_mask = 0xFFFFFFFF, \ | ||
1534 | } | ||
1535 | |||
1536 | static const struct pci_device_id switchtec_pci_tbl[] = { | ||
1537 | SWITCHTEC_PCI_DEVICE(0x8531), //PFX 24xG3 | ||
1538 | SWITCHTEC_PCI_DEVICE(0x8532), //PFX 32xG3 | ||
1539 | SWITCHTEC_PCI_DEVICE(0x8533), //PFX 48xG3 | ||
1540 | SWITCHTEC_PCI_DEVICE(0x8534), //PFX 64xG3 | ||
1541 | SWITCHTEC_PCI_DEVICE(0x8535), //PFX 80xG3 | ||
1542 | SWITCHTEC_PCI_DEVICE(0x8536), //PFX 96xG3 | ||
1543 | SWITCHTEC_PCI_DEVICE(0x8543), //PSX 48xG3 | ||
1544 | SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3 | ||
1545 | SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3 | ||
1546 | SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3 | ||
1547 | {0} | ||
1548 | }; | ||
1549 | MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); | ||
1550 | |||
1551 | static struct pci_driver switchtec_pci_driver = { | ||
1552 | .name = KBUILD_MODNAME, | ||
1553 | .id_table = switchtec_pci_tbl, | ||
1554 | .probe = switchtec_pci_probe, | ||
1555 | .remove = switchtec_pci_remove, | ||
1556 | }; | ||
1557 | |||
1558 | static int __init switchtec_init(void) | ||
1559 | { | ||
1560 | int rc; | ||
1561 | |||
1562 | rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices, | ||
1563 | "switchtec"); | ||
1564 | if (rc) | ||
1565 | return rc; | ||
1566 | |||
1567 | switchtec_class = class_create(THIS_MODULE, "switchtec"); | ||
1568 | if (IS_ERR(switchtec_class)) { | ||
1569 | rc = PTR_ERR(switchtec_class); | ||
1570 | goto err_create_class; | ||
1571 | } | ||
1572 | |||
1573 | rc = pci_register_driver(&switchtec_pci_driver); | ||
1574 | if (rc) | ||
1575 | goto err_pci_register; | ||
1576 | |||
1577 | pr_info(KBUILD_MODNAME ": loaded.\n"); | ||
1578 | |||
1579 | return 0; | ||
1580 | |||
1581 | err_pci_register: | ||
1582 | class_destroy(switchtec_class); | ||
1583 | |||
1584 | err_create_class: | ||
1585 | unregister_chrdev_region(switchtec_devt, max_devices); | ||
1586 | |||
1587 | return rc; | ||
1588 | } | ||
1589 | module_init(switchtec_init); | ||
1590 | |||
1591 | static void __exit switchtec_exit(void) | ||
1592 | { | ||
1593 | pci_unregister_driver(&switchtec_pci_driver); | ||
1594 | class_destroy(switchtec_class); | ||
1595 | unregister_chrdev_region(switchtec_devt, max_devices); | ||
1596 | ida_destroy(&switchtec_minor_ida); | ||
1597 | |||
1598 | pr_info(KBUILD_MODNAME ": unloaded.\n"); | ||
1599 | } | ||
1600 | module_exit(switchtec_exit); | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 53144e78a369..a6fba4804672 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -155,7 +155,7 @@ extern int __must_check | |||
155 | request_percpu_irq(unsigned int irq, irq_handler_t handler, | 155 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
156 | const char *devname, void __percpu *percpu_dev_id); | 156 | const char *devname, void __percpu *percpu_dev_id); |
157 | 157 | ||
158 | extern void free_irq(unsigned int, void *); | 158 | extern const void *free_irq(unsigned int, void *); |
159 | extern void free_percpu_irq(unsigned int, void __percpu *); | 159 | extern void free_percpu_irq(unsigned int, void __percpu *); |
160 | 160 | ||
161 | struct device; | 161 | struct device; |
diff --git a/include/linux/io.h b/include/linux/io.h index 82ef36eac8a1..2195d9ea4aaa 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
@@ -90,6 +90,27 @@ void devm_memunmap(struct device *dev, void *addr); | |||
90 | 90 | ||
91 | void *__devm_memremap_pages(struct device *dev, struct resource *res); | 91 | void *__devm_memremap_pages(struct device *dev, struct resource *res); |
92 | 92 | ||
93 | #ifdef CONFIG_PCI | ||
94 | /* | ||
95 | * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and | ||
96 | * Posting") mandate non-posted configuration transactions. There is | ||
97 | * no ioremap API in the kernel that can guarantee non-posted write | ||
98 | * semantics across arches so provide a default implementation for | ||
99 | * mapping PCI config space that defaults to ioremap_nocache(); arches | ||
100 | * should override it if they have memory mapping implementations that | ||
101 | * guarantee non-posted writes semantics to make the memory mapping | ||
102 | * compliant with the PCI specification. | ||
103 | */ | ||
104 | #ifndef pci_remap_cfgspace | ||
105 | #define pci_remap_cfgspace pci_remap_cfgspace | ||
106 | static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset, | ||
107 | size_t size) | ||
108 | { | ||
109 | return ioremap_nocache(offset, size); | ||
110 | } | ||
111 | #endif | ||
112 | #endif | ||
113 | |||
93 | /* | 114 | /* |
94 | * Some systems do not have legacy ISA devices. | 115 | * Some systems do not have legacy ISA devices. |
95 | * /dev/port is not a valid interface on these systems. | 116 | * /dev/port is not a valid interface on these systems. |
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h index 4585d6105d68..abbd52466573 100644 --- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h | |||
@@ -44,4 +44,8 @@ | |||
44 | 44 | ||
45 | #define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4) | 45 | #define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4) |
46 | 46 | ||
47 | #define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5) | ||
48 | |||
49 | #define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31) | ||
50 | |||
47 | #endif /* __LINUX_IMX7_IOMUXC_GPR_H */ | 51 | #endif /* __LINUX_IMX7_IOMUXC_GPR_H */ |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 8850fcaf50db..566fda587fcf 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -428,6 +428,16 @@ struct i2c_device_id { | |||
428 | kernel_ulong_t driver_data; /* Data private to the driver */ | 428 | kernel_ulong_t driver_data; /* Data private to the driver */ |
429 | }; | 429 | }; |
430 | 430 | ||
431 | /* pci_epf */ | ||
432 | |||
433 | #define PCI_EPF_NAME_SIZE 20 | ||
434 | #define PCI_EPF_MODULE_PREFIX "pci_epf:" | ||
435 | |||
436 | struct pci_epf_device_id { | ||
437 | char name[PCI_EPF_NAME_SIZE]; | ||
438 | kernel_ulong_t driver_data; | ||
439 | }; | ||
440 | |||
431 | /* spi */ | 441 | /* spi */ |
432 | 442 | ||
433 | #define SPI_NAME_SIZE 32 | 443 | #define SPI_NAME_SIZE 32 |
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 0e0974eceb80..518c8d20647a 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
@@ -85,15 +85,4 @@ static inline int of_pci_get_host_bridge_resources(struct device_node *dev, | |||
85 | } | 85 | } |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) | ||
89 | int of_pci_msi_chip_add(struct msi_controller *chip); | ||
90 | void of_pci_msi_chip_remove(struct msi_controller *chip); | ||
91 | struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node); | ||
92 | #else | ||
93 | static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; } | ||
94 | static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { } | ||
95 | static inline struct msi_controller * | ||
96 | of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } | ||
97 | #endif | ||
98 | |||
99 | #endif | 88 | #endif |
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index f0d2b9451270..809c2f1873ac 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #ifndef DRIVERS_PCI_ECAM_H | 16 | #ifndef DRIVERS_PCI_ECAM_H |
17 | #define DRIVERS_PCI_ECAM_H | 17 | #define DRIVERS_PCI_ECAM_H |
18 | 18 | ||
19 | #include <linux/pci.h> | ||
19 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | 22 | ||
@@ -68,7 +69,7 @@ extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ | |||
68 | extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ | 69 | extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ |
69 | #endif | 70 | #endif |
70 | 71 | ||
71 | #ifdef CONFIG_PCI_HOST_GENERIC | 72 | #ifdef CONFIG_PCI_HOST_COMMON |
72 | /* for DT-based PCI controllers that support ECAM */ | 73 | /* for DT-based PCI controllers that support ECAM */ |
73 | int pci_host_common_probe(struct platform_device *pdev, | 74 | int pci_host_common_probe(struct platform_device *pdev, |
74 | struct pci_ecam_ops *ops); | 75 | struct pci_ecam_ops *ops); |
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h new file mode 100644 index 000000000000..263b89ea5705 --- /dev/null +++ b/include/linux/pci-ep-cfs.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /** | ||
2 | * PCI Endpoint ConfigFS header file | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_PCI_EP_CFS_H | ||
13 | #define __LINUX_PCI_EP_CFS_H | ||
14 | |||
15 | #include <linux/configfs.h> | ||
16 | |||
17 | #ifdef CONFIG_PCI_ENDPOINT_CONFIGFS | ||
18 | struct config_group *pci_ep_cfs_add_epc_group(const char *name); | ||
19 | void pci_ep_cfs_remove_epc_group(struct config_group *group); | ||
20 | struct config_group *pci_ep_cfs_add_epf_group(const char *name); | ||
21 | void pci_ep_cfs_remove_epf_group(struct config_group *group); | ||
22 | #else | ||
23 | static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name) | ||
24 | { | ||
25 | return 0; | ||
26 | } | ||
27 | |||
28 | static inline void pci_ep_cfs_remove_epc_group(struct config_group *group) | ||
29 | { | ||
30 | } | ||
31 | |||
32 | static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name) | ||
33 | { | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | static inline void pci_ep_cfs_remove_epf_group(struct config_group *group) | ||
38 | { | ||
39 | } | ||
40 | #endif | ||
41 | #endif /* __LINUX_PCI_EP_CFS_H */ | ||
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h new file mode 100644 index 000000000000..af5edbf3eea3 --- /dev/null +++ b/include/linux/pci-epc.h | |||
@@ -0,0 +1,144 @@ | |||
1 | /** | ||
2 | * PCI Endpoint *Controller* (EPC) header file | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_PCI_EPC_H | ||
13 | #define __LINUX_PCI_EPC_H | ||
14 | |||
15 | #include <linux/pci-epf.h> | ||
16 | |||
17 | struct pci_epc; | ||
18 | |||
19 | enum pci_epc_irq_type { | ||
20 | PCI_EPC_IRQ_UNKNOWN, | ||
21 | PCI_EPC_IRQ_LEGACY, | ||
22 | PCI_EPC_IRQ_MSI, | ||
23 | }; | ||
24 | |||
25 | /** | ||
26 | * struct pci_epc_ops - set of function pointers for performing EPC operations | ||
27 | * @write_header: ops to populate configuration space header | ||
28 | * @set_bar: ops to configure the BAR | ||
29 | * @clear_bar: ops to reset the BAR | ||
30 | * @map_addr: ops to map CPU address to PCI address | ||
31 | * @unmap_addr: ops to unmap CPU address and PCI address | ||
32 | * @set_msi: ops to set the requested number of MSI interrupts in the MSI | ||
33 | * capability register | ||
34 | * @get_msi: ops to get the number of MSI interrupts allocated by the RC from | ||
35 | * the MSI capability register | ||
36 | * @raise_irq: ops to raise a legacy or MSI interrupt | ||
37 | * @start: ops to start the PCI link | ||
38 | * @stop: ops to stop the PCI link | ||
39 | * @owner: the module owner containing the ops | ||
40 | */ | ||
41 | struct pci_epc_ops { | ||
42 | int (*write_header)(struct pci_epc *pci_epc, | ||
43 | struct pci_epf_header *hdr); | ||
44 | int (*set_bar)(struct pci_epc *epc, enum pci_barno bar, | ||
45 | dma_addr_t bar_phys, size_t size, int flags); | ||
46 | void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar); | ||
47 | int (*map_addr)(struct pci_epc *epc, phys_addr_t addr, | ||
48 | u64 pci_addr, size_t size); | ||
49 | void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr); | ||
50 | int (*set_msi)(struct pci_epc *epc, u8 interrupts); | ||
51 | int (*get_msi)(struct pci_epc *epc); | ||
52 | int (*raise_irq)(struct pci_epc *pci_epc, | ||
53 | enum pci_epc_irq_type type, u8 interrupt_num); | ||
54 | int (*start)(struct pci_epc *epc); | ||
55 | void (*stop)(struct pci_epc *epc); | ||
56 | struct module *owner; | ||
57 | }; | ||
58 | |||
59 | /** | ||
60 | * struct pci_epc_mem - address space of the endpoint controller | ||
61 | * @phys_base: physical base address of the PCI address space | ||
62 | * @size: the size of the PCI address space | ||
63 | * @bitmap: bitmap to manage the PCI address space | ||
64 | * @pages: number of bits representing the address region | ||
65 | */ | ||
66 | struct pci_epc_mem { | ||
67 | phys_addr_t phys_base; | ||
68 | size_t size; | ||
69 | unsigned long *bitmap; | ||
70 | int pages; | ||
71 | }; | ||
72 | |||
73 | /** | ||
74 | * struct pci_epc - represents the PCI EPC device | ||
75 | * @dev: PCI EPC device | ||
76 | * @pci_epf: list of endpoint functions present in this EPC device | ||
77 | * @ops: function pointers for performing endpoint operations | ||
78 | * @mem: address space of the endpoint controller | ||
79 | * @max_functions: max number of functions that can be configured in this EPC | ||
80 | * @group: configfs group representing the PCI EPC device | ||
81 | * @lock: spinlock to protect pci_epc ops | ||
82 | */ | ||
83 | struct pci_epc { | ||
84 | struct device dev; | ||
85 | struct list_head pci_epf; | ||
86 | const struct pci_epc_ops *ops; | ||
87 | struct pci_epc_mem *mem; | ||
88 | u8 max_functions; | ||
89 | struct config_group *group; | ||
90 | /* spinlock to protect against concurrent access of EP controller */ | ||
91 | spinlock_t lock; | ||
92 | }; | ||
93 | |||
94 | #define to_pci_epc(device) container_of((device), struct pci_epc, dev) | ||
95 | |||
96 | #define pci_epc_create(dev, ops) \ | ||
97 | __pci_epc_create((dev), (ops), THIS_MODULE) | ||
98 | #define devm_pci_epc_create(dev, ops) \ | ||
99 | __devm_pci_epc_create((dev), (ops), THIS_MODULE) | ||
100 | |||
101 | static inline void epc_set_drvdata(struct pci_epc *epc, void *data) | ||
102 | { | ||
103 | dev_set_drvdata(&epc->dev, data); | ||
104 | } | ||
105 | |||
106 | static inline void *epc_get_drvdata(struct pci_epc *epc) | ||
107 | { | ||
108 | return dev_get_drvdata(&epc->dev); | ||
109 | } | ||
110 | |||
111 | struct pci_epc * | ||
112 | __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, | ||
113 | struct module *owner); | ||
114 | struct pci_epc * | ||
115 | __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, | ||
116 | struct module *owner); | ||
117 | void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc); | ||
118 | void pci_epc_destroy(struct pci_epc *epc); | ||
119 | int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); | ||
120 | void pci_epc_linkup(struct pci_epc *epc); | ||
121 | void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); | ||
122 | int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr); | ||
123 | int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar, | ||
124 | dma_addr_t bar_phys, size_t size, int flags); | ||
125 | void pci_epc_clear_bar(struct pci_epc *epc, int bar); | ||
126 | int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr, | ||
127 | u64 pci_addr, size_t size); | ||
128 | void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr); | ||
129 | int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts); | ||
130 | int pci_epc_get_msi(struct pci_epc *epc); | ||
131 | int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type, | ||
132 | u8 interrupt_num); | ||
133 | int pci_epc_start(struct pci_epc *epc); | ||
134 | void pci_epc_stop(struct pci_epc *epc); | ||
135 | struct pci_epc *pci_epc_get(const char *epc_name); | ||
136 | void pci_epc_put(struct pci_epc *epc); | ||
137 | |||
138 | int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size); | ||
139 | void pci_epc_mem_exit(struct pci_epc *epc); | ||
140 | void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, | ||
141 | phys_addr_t *phys_addr, size_t size); | ||
142 | void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, | ||
143 | void __iomem *virt_addr, size_t size); | ||
144 | #endif /* __LINUX_PCI_EPC_H */ | ||
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h new file mode 100644 index 000000000000..0d529cb90143 --- /dev/null +++ b/include/linux/pci-epf.h | |||
@@ -0,0 +1,162 @@ | |||
1 | /** | ||
2 | * PCI Endpoint *Function* (EPF) header file | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_PCI_EPF_H | ||
13 | #define __LINUX_PCI_EPF_H | ||
14 | |||
15 | #include <linux/device.h> | ||
16 | #include <linux/mod_devicetable.h> | ||
17 | |||
18 | struct pci_epf; | ||
19 | |||
20 | enum pci_interrupt_pin { | ||
21 | PCI_INTERRUPT_UNKNOWN, | ||
22 | PCI_INTERRUPT_INTA, | ||
23 | PCI_INTERRUPT_INTB, | ||
24 | PCI_INTERRUPT_INTC, | ||
25 | PCI_INTERRUPT_INTD, | ||
26 | }; | ||
27 | |||
28 | enum pci_barno { | ||
29 | BAR_0, | ||
30 | BAR_1, | ||
31 | BAR_2, | ||
32 | BAR_3, | ||
33 | BAR_4, | ||
34 | BAR_5, | ||
35 | }; | ||
36 | |||
37 | /** | ||
38 | * struct pci_epf_header - represents standard configuration header | ||
39 | * @vendorid: identifies device manufacturer | ||
40 | * @deviceid: identifies a particular device | ||
41 | * @revid: specifies a device-specific revision identifier | ||
42 | * @progif_code: identifies a specific register-level programming interface | ||
43 | * @subclass_code: identifies more specifically the function of the device | ||
44 | * @baseclass_code: broadly classifies the type of function the device performs | ||
45 | * @cache_line_size: specifies the system cacheline size in units of DWORDs | ||
46 | * @subsys_vendor_id: vendor of the add-in card or subsystem | ||
47 | * @subsys_id: id specific to vendor | ||
48 | * @interrupt_pin: interrupt pin the device (or device function) uses | ||
49 | */ | ||
50 | struct pci_epf_header { | ||
51 | u16 vendorid; | ||
52 | u16 deviceid; | ||
53 | u8 revid; | ||
54 | u8 progif_code; | ||
55 | u8 subclass_code; | ||
56 | u8 baseclass_code; | ||
57 | u8 cache_line_size; | ||
58 | u16 subsys_vendor_id; | ||
59 | u16 subsys_id; | ||
60 | enum pci_interrupt_pin interrupt_pin; | ||
61 | }; | ||
62 | |||
63 | /** | ||
64 | * struct pci_epf_ops - set of function pointers for performing EPF operations | ||
65 | * @bind: ops to perform when a EPC device has been bound to EPF device | ||
66 | * @unbind: ops to perform when a binding has been lost between a EPC device | ||
67 | * and EPF device | ||
68 | * @linkup: ops to perform when the EPC device has established a connection with | ||
69 | * a host system | ||
70 | */ | ||
71 | struct pci_epf_ops { | ||
72 | int (*bind)(struct pci_epf *epf); | ||
73 | void (*unbind)(struct pci_epf *epf); | ||
74 | void (*linkup)(struct pci_epf *epf); | ||
75 | }; | ||
76 | |||
77 | /** | ||
78 | * struct pci_epf_driver - represents the PCI EPF driver | ||
79 | * @probe: ops to perform when a new EPF device has been bound to the EPF driver | ||
80 | * @remove: ops to perform when the binding between the EPF device and EPF | ||
81 | * driver is broken | ||
82 | * @driver: PCI EPF driver | ||
83 | * @ops: set of function pointers for performing EPF operations | ||
84 | * @owner: the owner of the module that registers the PCI EPF driver | ||
85 | * @group: configfs group corresponding to the PCI EPF driver | ||
86 | * @id_table: identifies EPF devices for probing | ||
87 | */ | ||
88 | struct pci_epf_driver { | ||
89 | int (*probe)(struct pci_epf *epf); | ||
90 | int (*remove)(struct pci_epf *epf); | ||
91 | |||
92 | struct device_driver driver; | ||
93 | struct pci_epf_ops *ops; | ||
94 | struct module *owner; | ||
95 | struct config_group *group; | ||
96 | const struct pci_epf_device_id *id_table; | ||
97 | }; | ||
98 | |||
99 | #define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \ | ||
100 | driver)) | ||
101 | |||
102 | /** | ||
103 | * struct pci_epf_bar - represents the BAR of EPF device | ||
104 | * @phys_addr: physical address that should be mapped to the BAR | ||
105 | * @size: the size of the address space present in BAR | ||
106 | */ | ||
107 | struct pci_epf_bar { | ||
108 | dma_addr_t phys_addr; | ||
109 | size_t size; | ||
110 | }; | ||
111 | |||
112 | /** | ||
113 | * struct pci_epf - represents the PCI EPF device | ||
114 | * @dev: the PCI EPF device | ||
115 | * @name: the name of the PCI EPF device | ||
116 | * @header: represents standard configuration header | ||
117 | * @bar: represents the BAR of EPF device | ||
118 | * @msi_interrupts: number of MSI interrupts required by this function | ||
119 | * @func_no: unique function number within this endpoint device | ||
120 | * @epc: the EPC device to which this EPF device is bound | ||
121 | * @driver: the EPF driver to which this EPF device is bound | ||
122 | * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc | ||
123 | */ | ||
124 | struct pci_epf { | ||
125 | struct device dev; | ||
126 | const char *name; | ||
127 | struct pci_epf_header *header; | ||
128 | struct pci_epf_bar bar[6]; | ||
129 | u8 msi_interrupts; | ||
130 | u8 func_no; | ||
131 | |||
132 | struct pci_epc *epc; | ||
133 | struct pci_epf_driver *driver; | ||
134 | struct list_head list; | ||
135 | }; | ||
136 | |||
137 | #define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev) | ||
138 | |||
139 | #define pci_epf_register_driver(driver) \ | ||
140 | __pci_epf_register_driver((driver), THIS_MODULE) | ||
141 | |||
142 | static inline void epf_set_drvdata(struct pci_epf *epf, void *data) | ||
143 | { | ||
144 | dev_set_drvdata(&epf->dev, data); | ||
145 | } | ||
146 | |||
147 | static inline void *epf_get_drvdata(struct pci_epf *epf) | ||
148 | { | ||
149 | return dev_get_drvdata(&epf->dev); | ||
150 | } | ||
151 | |||
152 | struct pci_epf *pci_epf_create(const char *name); | ||
153 | void pci_epf_destroy(struct pci_epf *epf); | ||
154 | int __pci_epf_register_driver(struct pci_epf_driver *driver, | ||
155 | struct module *owner); | ||
156 | void pci_epf_unregister_driver(struct pci_epf_driver *driver); | ||
157 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar); | ||
158 | void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); | ||
159 | int pci_epf_bind(struct pci_epf *epf); | ||
160 | void pci_epf_unbind(struct pci_epf *epf); | ||
161 | void pci_epf_linkup(struct pci_epf *epf); | ||
162 | #endif /* __LINUX_PCI_EPF_H */ | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index f27be8432e82..33c2b0b77429 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/kobject.h> | 28 | #include <linux/kobject.h> |
29 | #include <linux/atomic.h> | 29 | #include <linux/atomic.h> |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/interrupt.h> | ||
31 | #include <linux/io.h> | 32 | #include <linux/io.h> |
32 | #include <linux/resource_ext.h> | 33 | #include <linux/resource_ext.h> |
33 | #include <uapi/linux/pci.h> | 34 | #include <uapi/linux/pci.h> |
@@ -178,6 +179,10 @@ enum pci_dev_flags { | |||
178 | PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), | 179 | PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), |
179 | /* Get VPD from function 0 VPD */ | 180 | /* Get VPD from function 0 VPD */ |
180 | PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), | 181 | PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), |
182 | /* a non-root bridge where translation occurs, stop alias search here */ | ||
183 | PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), | ||
184 | /* Do not use FLR even if device advertises PCI_AF_CAP */ | ||
185 | PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), | ||
181 | }; | 186 | }; |
182 | 187 | ||
183 | enum pci_irq_reroute_variant { | 188 | enum pci_irq_reroute_variant { |
@@ -397,6 +402,8 @@ struct pci_dev { | |||
397 | phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ | 402 | phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ |
398 | size_t romlen; /* Length of ROM if it's not from the BAR */ | 403 | size_t romlen; /* Length of ROM if it's not from the BAR */ |
399 | char *driver_override; /* Driver name to force a match */ | 404 | char *driver_override; /* Driver name to force a match */ |
405 | |||
406 | unsigned long priv_flags; /* Private flags for the pci driver */ | ||
400 | }; | 407 | }; |
401 | 408 | ||
402 | static inline struct pci_dev *pci_physfn(struct pci_dev *dev) | 409 | static inline struct pci_dev *pci_physfn(struct pci_dev *dev) |
@@ -941,32 +948,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, | |||
941 | 948 | ||
942 | struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); | 949 | struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); |
943 | 950 | ||
944 | static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) | 951 | int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val); |
945 | { | 952 | int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val); |
946 | return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); | 953 | int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); |
947 | } | 954 | int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); |
948 | static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) | 955 | int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); |
949 | { | 956 | int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); |
950 | return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); | ||
951 | } | ||
952 | static inline int pci_read_config_dword(const struct pci_dev *dev, int where, | ||
953 | u32 *val) | ||
954 | { | ||
955 | return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); | ||
956 | } | ||
957 | static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) | ||
958 | { | ||
959 | return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); | ||
960 | } | ||
961 | static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) | ||
962 | { | ||
963 | return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); | ||
964 | } | ||
965 | static inline int pci_write_config_dword(const struct pci_dev *dev, int where, | ||
966 | u32 val) | ||
967 | { | ||
968 | return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); | ||
969 | } | ||
970 | 957 | ||
971 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); | 958 | int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); |
972 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); | 959 | int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); |
@@ -1053,6 +1040,7 @@ int pcie_get_mps(struct pci_dev *dev); | |||
1053 | int pcie_set_mps(struct pci_dev *dev, int mps); | 1040 | int pcie_set_mps(struct pci_dev *dev, int mps); |
1054 | int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, | 1041 | int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, |
1055 | enum pcie_link_width *width); | 1042 | enum pcie_link_width *width); |
1043 | void pcie_flr(struct pci_dev *dev); | ||
1056 | int __pci_reset_function(struct pci_dev *dev); | 1044 | int __pci_reset_function(struct pci_dev *dev); |
1057 | int __pci_reset_function_locked(struct pci_dev *dev); | 1045 | int __pci_reset_function_locked(struct pci_dev *dev); |
1058 | int pci_reset_function(struct pci_dev *dev); | 1046 | int pci_reset_function(struct pci_dev *dev); |
@@ -1073,6 +1061,11 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags); | |||
1073 | bool pci_device_is_present(struct pci_dev *pdev); | 1061 | bool pci_device_is_present(struct pci_dev *pdev); |
1074 | void pci_ignore_hotplug(struct pci_dev *dev); | 1062 | void pci_ignore_hotplug(struct pci_dev *dev); |
1075 | 1063 | ||
1064 | int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr, | ||
1065 | irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, | ||
1066 | const char *fmt, ...); | ||
1067 | void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id); | ||
1068 | |||
1076 | /* ROM control related routines */ | 1069 | /* ROM control related routines */ |
1077 | int pci_enable_rom(struct pci_dev *pdev); | 1070 | int pci_enable_rom(struct pci_dev *pdev); |
1078 | void pci_disable_rom(struct pci_dev *pdev); | 1071 | void pci_disable_rom(struct pci_dev *pdev); |
@@ -1200,6 +1193,11 @@ unsigned long pci_address_to_pio(phys_addr_t addr); | |||
1200 | phys_addr_t pci_pio_to_address(unsigned long pio); | 1193 | phys_addr_t pci_pio_to_address(unsigned long pio); |
1201 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); | 1194 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); |
1202 | void pci_unmap_iospace(struct resource *res); | 1195 | void pci_unmap_iospace(struct resource *res); |
1196 | void __iomem *devm_pci_remap_cfgspace(struct device *dev, | ||
1197 | resource_size_t offset, | ||
1198 | resource_size_t size); | ||
1199 | void __iomem *devm_pci_remap_cfg_resource(struct device *dev, | ||
1200 | struct resource *res); | ||
1203 | 1201 | ||
1204 | static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) | 1202 | static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) |
1205 | { | 1203 | { |
@@ -1298,10 +1296,8 @@ struct msix_entry { | |||
1298 | 1296 | ||
1299 | #ifdef CONFIG_PCI_MSI | 1297 | #ifdef CONFIG_PCI_MSI |
1300 | int pci_msi_vec_count(struct pci_dev *dev); | 1298 | int pci_msi_vec_count(struct pci_dev *dev); |
1301 | void pci_msi_shutdown(struct pci_dev *dev); | ||
1302 | void pci_disable_msi(struct pci_dev *dev); | 1299 | void pci_disable_msi(struct pci_dev *dev); |
1303 | int pci_msix_vec_count(struct pci_dev *dev); | 1300 | int pci_msix_vec_count(struct pci_dev *dev); |
1304 | void pci_msix_shutdown(struct pci_dev *dev); | ||
1305 | void pci_disable_msix(struct pci_dev *dev); | 1301 | void pci_disable_msix(struct pci_dev *dev); |
1306 | void pci_restore_msi_state(struct pci_dev *dev); | 1302 | void pci_restore_msi_state(struct pci_dev *dev); |
1307 | int pci_msi_enabled(void); | 1303 | int pci_msi_enabled(void); |
@@ -1327,10 +1323,8 @@ int pci_irq_get_node(struct pci_dev *pdev, int vec); | |||
1327 | 1323 | ||
1328 | #else | 1324 | #else |
1329 | static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } | 1325 | static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } |
1330 | static inline void pci_msi_shutdown(struct pci_dev *dev) { } | ||
1331 | static inline void pci_disable_msi(struct pci_dev *dev) { } | 1326 | static inline void pci_disable_msi(struct pci_dev *dev) { } |
1332 | static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } | 1327 | static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } |
1333 | static inline void pci_msix_shutdown(struct pci_dev *dev) { } | ||
1334 | static inline void pci_disable_msix(struct pci_dev *dev) { } | 1328 | static inline void pci_disable_msix(struct pci_dev *dev) { } |
1335 | static inline void pci_restore_msi_state(struct pci_dev *dev) { } | 1329 | static inline void pci_restore_msi_state(struct pci_dev *dev) { } |
1336 | static inline int pci_msi_enabled(void) { return 0; } | 1330 | static inline int pci_msi_enabled(void) { return 0; } |
@@ -1623,6 +1617,36 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } | |||
1623 | 1617 | ||
1624 | #include <asm/pci.h> | 1618 | #include <asm/pci.h> |
1625 | 1619 | ||
1620 | /* These two functions provide almost identical functionality. Depennding | ||
1621 | * on the architecture, one will be implemented as a wrapper around the | ||
1622 | * other (in drivers/pci/mmap.c). | ||
1623 | * | ||
1624 | * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff | ||
1625 | * is expected to be an offset within that region. | ||
1626 | * | ||
1627 | * pci_mmap_page_range() is the legacy architecture-specific interface, | ||
1628 | * which accepts a "user visible" resource address converted by | ||
1629 | * pci_resource_to_user(), as used in the legacy mmap() interface in | ||
1630 | * /proc/bus/pci/. | ||
1631 | */ | ||
1632 | int pci_mmap_resource_range(struct pci_dev *dev, int bar, | ||
1633 | struct vm_area_struct *vma, | ||
1634 | enum pci_mmap_state mmap_state, int write_combine); | ||
1635 | int pci_mmap_page_range(struct pci_dev *pdev, int bar, | ||
1636 | struct vm_area_struct *vma, | ||
1637 | enum pci_mmap_state mmap_state, int write_combine); | ||
1638 | |||
1639 | #ifndef arch_can_pci_mmap_wc | ||
1640 | #define arch_can_pci_mmap_wc() 0 | ||
1641 | #endif | ||
1642 | |||
1643 | #ifndef arch_can_pci_mmap_io | ||
1644 | #define arch_can_pci_mmap_io() 0 | ||
1645 | #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL) | ||
1646 | #else | ||
1647 | int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); | ||
1648 | #endif | ||
1649 | |||
1626 | #ifndef pci_root_bus_fwnode | 1650 | #ifndef pci_root_bus_fwnode |
1627 | #define pci_root_bus_fwnode(bus) NULL | 1651 | #define pci_root_bus_fwnode(bus) NULL |
1628 | #endif | 1652 | #endif |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a4f77feecbb0..5f6b71d15393 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -862,6 +862,8 @@ | |||
862 | #define PCI_DEVICE_ID_TI_X620 0xac8d | 862 | #define PCI_DEVICE_ID_TI_X620 0xac8d |
863 | #define PCI_DEVICE_ID_TI_X420 0xac8e | 863 | #define PCI_DEVICE_ID_TI_X420 0xac8e |
864 | #define PCI_DEVICE_ID_TI_XX20_FM 0xac8f | 864 | #define PCI_DEVICE_ID_TI_XX20_FM 0xac8f |
865 | #define PCI_DEVICE_ID_TI_DRA74x 0xb500 | ||
866 | #define PCI_DEVICE_ID_TI_DRA72x 0xb501 | ||
865 | 867 | ||
866 | #define PCI_VENDOR_ID_SONY 0x104d | 868 | #define PCI_VENDOR_ID_SONY 0x104d |
867 | 869 | ||
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 6b0e2758585f..662c592b74dd 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
@@ -333,6 +333,7 @@ header-y += parport.h | |||
333 | header-y += patchkey.h | 333 | header-y += patchkey.h |
334 | header-y += pci.h | 334 | header-y += pci.h |
335 | header-y += pci_regs.h | 335 | header-y += pci_regs.h |
336 | header-y += pcitest.h | ||
336 | header-y += perf_event.h | 337 | header-y += perf_event.h |
337 | header-y += personality.h | 338 | header-y += personality.h |
338 | header-y += pfkeyv2.h | 339 | header-y += pfkeyv2.h |
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 18a26c16bd80..d56bb0051009 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h | |||
@@ -114,7 +114,7 @@ | |||
114 | #define PCI_SUBSYSTEM_ID 0x2e | 114 | #define PCI_SUBSYSTEM_ID 0x2e |
115 | #define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ | 115 | #define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ |
116 | #define PCI_ROM_ADDRESS_ENABLE 0x01 | 116 | #define PCI_ROM_ADDRESS_ENABLE 0x01 |
117 | #define PCI_ROM_ADDRESS_MASK (~0x7ffUL) | 117 | #define PCI_ROM_ADDRESS_MASK (~0x7ffU) |
118 | 118 | ||
119 | #define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ | 119 | #define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ |
120 | 120 | ||
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h new file mode 100644 index 000000000000..a6aa10c45ad1 --- /dev/null +++ b/include/uapi/linux/pcitest.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /** | ||
2 | * pcitest.h - PCI test uapi defines | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #ifndef __UAPI_LINUX_PCITEST_H | ||
10 | #define __UAPI_LINUX_PCITEST_H | ||
11 | |||
12 | #define PCITEST_BAR _IO('P', 0x1) | ||
13 | #define PCITEST_LEGACY_IRQ _IO('P', 0x2) | ||
14 | #define PCITEST_MSI _IOW('P', 0x3, int) | ||
15 | #define PCITEST_WRITE _IOW('P', 0x4, unsigned long) | ||
16 | #define PCITEST_READ _IOW('P', 0x5, unsigned long) | ||
17 | #define PCITEST_COPY _IOW('P', 0x6, unsigned long) | ||
18 | |||
19 | #endif /* __UAPI_LINUX_PCITEST_H */ | ||
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h new file mode 100644 index 000000000000..3e824e1a6495 --- /dev/null +++ b/include/uapi/linux/switchtec_ioctl.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* | ||
2 | * Microsemi Switchtec PCIe Driver | ||
3 | * Copyright (c) 2017, Microsemi Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _UAPI_LINUX_SWITCHTEC_IOCTL_H | ||
17 | #define _UAPI_LINUX_SWITCHTEC_IOCTL_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | |||
21 | #define SWITCHTEC_IOCTL_PART_CFG0 0 | ||
22 | #define SWITCHTEC_IOCTL_PART_CFG1 1 | ||
23 | #define SWITCHTEC_IOCTL_PART_IMG0 2 | ||
24 | #define SWITCHTEC_IOCTL_PART_IMG1 3 | ||
25 | #define SWITCHTEC_IOCTL_PART_NVLOG 4 | ||
26 | #define SWITCHTEC_IOCTL_PART_VENDOR0 5 | ||
27 | #define SWITCHTEC_IOCTL_PART_VENDOR1 6 | ||
28 | #define SWITCHTEC_IOCTL_PART_VENDOR2 7 | ||
29 | #define SWITCHTEC_IOCTL_PART_VENDOR3 8 | ||
30 | #define SWITCHTEC_IOCTL_PART_VENDOR4 9 | ||
31 | #define SWITCHTEC_IOCTL_PART_VENDOR5 10 | ||
32 | #define SWITCHTEC_IOCTL_PART_VENDOR6 11 | ||
33 | #define SWITCHTEC_IOCTL_PART_VENDOR7 12 | ||
34 | #define SWITCHTEC_IOCTL_NUM_PARTITIONS 13 | ||
35 | |||
36 | struct switchtec_ioctl_flash_info { | ||
37 | __u64 flash_length; | ||
38 | __u32 num_partitions; | ||
39 | __u32 padding; | ||
40 | }; | ||
41 | |||
42 | struct switchtec_ioctl_flash_part_info { | ||
43 | __u32 flash_partition; | ||
44 | __u32 address; | ||
45 | __u32 length; | ||
46 | __u32 active; | ||
47 | }; | ||
48 | |||
49 | struct switchtec_ioctl_event_summary { | ||
50 | __u64 global; | ||
51 | __u64 part_bitmap; | ||
52 | __u32 local_part; | ||
53 | __u32 padding; | ||
54 | __u32 part[48]; | ||
55 | __u32 pff[48]; | ||
56 | }; | ||
57 | |||
58 | #define SWITCHTEC_IOCTL_EVENT_STACK_ERROR 0 | ||
59 | #define SWITCHTEC_IOCTL_EVENT_PPU_ERROR 1 | ||
60 | #define SWITCHTEC_IOCTL_EVENT_ISP_ERROR 2 | ||
61 | #define SWITCHTEC_IOCTL_EVENT_SYS_RESET 3 | ||
62 | #define SWITCHTEC_IOCTL_EVENT_FW_EXC 4 | ||
63 | #define SWITCHTEC_IOCTL_EVENT_FW_NMI 5 | ||
64 | #define SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL 6 | ||
65 | #define SWITCHTEC_IOCTL_EVENT_FW_FATAL 7 | ||
66 | #define SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP 8 | ||
67 | #define SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC 9 | ||
68 | #define SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP 10 | ||
69 | #define SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC 11 | ||
70 | #define SWITCHTEC_IOCTL_EVENT_GPIO_INT 12 | ||
71 | #define SWITCHTEC_IOCTL_EVENT_PART_RESET 13 | ||
72 | #define SWITCHTEC_IOCTL_EVENT_MRPC_COMP 14 | ||
73 | #define SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC 15 | ||
74 | #define SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP 16 | ||
75 | #define SWITCHTEC_IOCTL_EVENT_AER_IN_P2P 17 | ||
76 | #define SWITCHTEC_IOCTL_EVENT_AER_IN_VEP 18 | ||
77 | #define SWITCHTEC_IOCTL_EVENT_DPC 19 | ||
78 | #define SWITCHTEC_IOCTL_EVENT_CTS 20 | ||
79 | #define SWITCHTEC_IOCTL_EVENT_HOTPLUG 21 | ||
80 | #define SWITCHTEC_IOCTL_EVENT_IER 22 | ||
81 | #define SWITCHTEC_IOCTL_EVENT_THRESH 23 | ||
82 | #define SWITCHTEC_IOCTL_EVENT_POWER_MGMT 24 | ||
83 | #define SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING 25 | ||
84 | #define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26 | ||
85 | #define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27 | ||
86 | #define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28 | ||
87 | #define SWITCHTEC_IOCTL_MAX_EVENTS 29 | ||
88 | |||
89 | #define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1 | ||
90 | #define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2 | ||
91 | |||
92 | #define SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR (1 << 0) | ||
93 | #define SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL (1 << 1) | ||
94 | #define SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG (1 << 2) | ||
95 | #define SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI (1 << 3) | ||
96 | #define SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL (1 << 4) | ||
97 | #define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL (1 << 5) | ||
98 | #define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG (1 << 6) | ||
99 | #define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI (1 << 7) | ||
100 | #define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL (1 << 8) | ||
101 | #define SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED (~0x1ff) | ||
102 | |||
103 | struct switchtec_ioctl_event_ctl { | ||
104 | __u32 event_id; | ||
105 | __s32 index; | ||
106 | __u32 flags; | ||
107 | __u32 occurred; | ||
108 | __u32 count; | ||
109 | __u32 data[5]; | ||
110 | }; | ||
111 | |||
112 | #define SWITCHTEC_IOCTL_PFF_VEP 100 | ||
113 | struct switchtec_ioctl_pff_port { | ||
114 | __u32 pff; | ||
115 | __u32 partition; | ||
116 | __u32 port; | ||
117 | }; | ||
118 | |||
119 | #define SWITCHTEC_IOCTL_FLASH_INFO \ | ||
120 | _IOR('W', 0x40, struct switchtec_ioctl_flash_info) | ||
121 | #define SWITCHTEC_IOCTL_FLASH_PART_INFO \ | ||
122 | _IOWR('W', 0x41, struct switchtec_ioctl_flash_part_info) | ||
123 | #define SWITCHTEC_IOCTL_EVENT_SUMMARY \ | ||
124 | _IOR('W', 0x42, struct switchtec_ioctl_event_summary) | ||
125 | #define SWITCHTEC_IOCTL_EVENT_CTL \ | ||
126 | _IOWR('W', 0x43, struct switchtec_ioctl_event_ctl) | ||
127 | #define SWITCHTEC_IOCTL_PFF_TO_PORT \ | ||
128 | _IOWR('W', 0x44, struct switchtec_ioctl_pff_port) | ||
129 | #define SWITCHTEC_IOCTL_PORT_TO_PFF \ | ||
130 | _IOWR('W', 0x45, struct switchtec_ioctl_pff_port) | ||
131 | |||
132 | #endif | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ae1c90f20381..070be980c37a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1559,7 +1559,7 @@ void remove_irq(unsigned int irq, struct irqaction *act) | |||
1559 | struct irq_desc *desc = irq_to_desc(irq); | 1559 | struct irq_desc *desc = irq_to_desc(irq); |
1560 | 1560 | ||
1561 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1561 | if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1562 | __free_irq(irq, act->dev_id); | 1562 | __free_irq(irq, act->dev_id); |
1563 | } | 1563 | } |
1564 | EXPORT_SYMBOL_GPL(remove_irq); | 1564 | EXPORT_SYMBOL_GPL(remove_irq); |
1565 | 1565 | ||
@@ -1576,20 +1576,27 @@ EXPORT_SYMBOL_GPL(remove_irq); | |||
1576 | * have completed. | 1576 | * have completed. |
1577 | * | 1577 | * |
1578 | * This function must not be called from interrupt context. | 1578 | * This function must not be called from interrupt context. |
1579 | * | ||
1580 | * Returns the devname argument passed to request_irq. | ||
1579 | */ | 1581 | */ |
1580 | void free_irq(unsigned int irq, void *dev_id) | 1582 | const void *free_irq(unsigned int irq, void *dev_id) |
1581 | { | 1583 | { |
1582 | struct irq_desc *desc = irq_to_desc(irq); | 1584 | struct irq_desc *desc = irq_to_desc(irq); |
1585 | struct irqaction *action; | ||
1586 | const char *devname; | ||
1583 | 1587 | ||
1584 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) | 1588 | if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
1585 | return; | 1589 | return NULL; |
1586 | 1590 | ||
1587 | #ifdef CONFIG_SMP | 1591 | #ifdef CONFIG_SMP |
1588 | if (WARN_ON(desc->affinity_notify)) | 1592 | if (WARN_ON(desc->affinity_notify)) |
1589 | desc->affinity_notify = NULL; | 1593 | desc->affinity_notify = NULL; |
1590 | #endif | 1594 | #endif |
1591 | 1595 | ||
1592 | kfree(__free_irq(irq, dev_id)); | 1596 | action = __free_irq(irq, dev_id); |
1597 | devname = action->name; | ||
1598 | kfree(action); | ||
1599 | return devname; | ||
1593 | } | 1600 | } |
1594 | EXPORT_SYMBOL(free_irq); | 1601 | EXPORT_SYMBOL(free_irq); |
1595 | 1602 | ||
diff --git a/lib/devres.c b/lib/devres.c index cb1464c411a2..78eca713b1d9 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -17,7 +17,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data) | |||
17 | /** | 17 | /** |
18 | * devm_ioremap - Managed ioremap() | 18 | * devm_ioremap - Managed ioremap() |
19 | * @dev: Generic device to remap IO address for | 19 | * @dev: Generic device to remap IO address for |
20 | * @offset: BUS offset to map | 20 | * @offset: Resource address to map |
21 | * @size: Size of map | 21 | * @size: Size of map |
22 | * | 22 | * |
23 | * Managed ioremap(). Map is automatically unmapped on driver detach. | 23 | * Managed ioremap(). Map is automatically unmapped on driver detach. |
@@ -45,7 +45,7 @@ EXPORT_SYMBOL(devm_ioremap); | |||
45 | /** | 45 | /** |
46 | * devm_ioremap_nocache - Managed ioremap_nocache() | 46 | * devm_ioremap_nocache - Managed ioremap_nocache() |
47 | * @dev: Generic device to remap IO address for | 47 | * @dev: Generic device to remap IO address for |
48 | * @offset: BUS offset to map | 48 | * @offset: Resource address to map |
49 | * @size: Size of map | 49 | * @size: Size of map |
50 | * | 50 | * |
51 | * Managed ioremap_nocache(). Map is automatically unmapped on driver | 51 | * Managed ioremap_nocache(). Map is automatically unmapped on driver |
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache); | |||
74 | /** | 74 | /** |
75 | * devm_ioremap_wc - Managed ioremap_wc() | 75 | * devm_ioremap_wc - Managed ioremap_wc() |
76 | * @dev: Generic device to remap IO address for | 76 | * @dev: Generic device to remap IO address for |
77 | * @offset: BUS offset to map | 77 | * @offset: Resource address to map |
78 | * @size: Size of map | 78 | * @size: Size of map |
79 | * | 79 | * |
80 | * Managed ioremap_wc(). Map is automatically unmapped on driver detach. | 80 | * Managed ioremap_wc(). Map is automatically unmapped on driver detach. |
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c new file mode 100644 index 000000000000..ad54a58d7dda --- /dev/null +++ b/tools/pci/pcitest.c | |||
@@ -0,0 +1,186 @@ | |||
1 | /** | ||
2 | * Userspace PCI Endpoint Test Module | ||
3 | * | ||
4 | * Copyright (C) 2017 Texas Instruments | ||
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <errno.h> | ||
21 | #include <fcntl.h> | ||
22 | #include <stdbool.h> | ||
23 | #include <stdio.h> | ||
24 | #include <stdlib.h> | ||
25 | #include <sys/ioctl.h> | ||
26 | #include <time.h> | ||
27 | #include <unistd.h> | ||
28 | |||
29 | #include <linux/pcitest.h> | ||
30 | |||
31 | #define BILLION 1E9 | ||
32 | |||
33 | static char *result[] = { "NOT OKAY", "OKAY" }; | ||
34 | |||
35 | struct pci_test { | ||
36 | char *device; | ||
37 | char barnum; | ||
38 | bool legacyirq; | ||
39 | unsigned int msinum; | ||
40 | bool read; | ||
41 | bool write; | ||
42 | bool copy; | ||
43 | unsigned long size; | ||
44 | }; | ||
45 | |||
46 | static int run_test(struct pci_test *test) | ||
47 | { | ||
48 | long ret; | ||
49 | int fd; | ||
50 | struct timespec start, end; | ||
51 | double time; | ||
52 | |||
53 | fd = open(test->device, O_RDWR); | ||
54 | if (fd < 0) { | ||
55 | perror("can't open PCI Endpoint Test device"); | ||
56 | return fd; | ||
57 | } | ||
58 | |||
59 | if (test->barnum >= 0 && test->barnum <= 5) { | ||
60 | ret = ioctl(fd, PCITEST_BAR, test->barnum); | ||
61 | fprintf(stdout, "BAR%d:\t\t", test->barnum); | ||
62 | if (ret < 0) | ||
63 | fprintf(stdout, "TEST FAILED\n"); | ||
64 | else | ||
65 | fprintf(stdout, "%s\n", result[ret]); | ||
66 | } | ||
67 | |||
68 | if (test->legacyirq) { | ||
69 | ret = ioctl(fd, PCITEST_LEGACY_IRQ, 0); | ||
70 | fprintf(stdout, "LEGACY IRQ:\t"); | ||
71 | if (ret < 0) | ||
72 | fprintf(stdout, "TEST FAILED\n"); | ||
73 | else | ||
74 | fprintf(stdout, "%s\n", result[ret]); | ||
75 | } | ||
76 | |||
77 | if (test->msinum > 0 && test->msinum <= 32) { | ||
78 | ret = ioctl(fd, PCITEST_MSI, test->msinum); | ||
79 | fprintf(stdout, "MSI%d:\t\t", test->msinum); | ||
80 | if (ret < 0) | ||
81 | fprintf(stdout, "TEST FAILED\n"); | ||
82 | else | ||
83 | fprintf(stdout, "%s\n", result[ret]); | ||
84 | } | ||
85 | |||
86 | if (test->write) { | ||
87 | ret = ioctl(fd, PCITEST_WRITE, test->size); | ||
88 | fprintf(stdout, "WRITE (%7ld bytes):\t\t", test->size); | ||
89 | if (ret < 0) | ||
90 | fprintf(stdout, "TEST FAILED\n"); | ||
91 | else | ||
92 | fprintf(stdout, "%s\n", result[ret]); | ||
93 | } | ||
94 | |||
95 | if (test->read) { | ||
96 | ret = ioctl(fd, PCITEST_READ, test->size); | ||
97 | fprintf(stdout, "READ (%7ld bytes):\t\t", test->size); | ||
98 | if (ret < 0) | ||
99 | fprintf(stdout, "TEST FAILED\n"); | ||
100 | else | ||
101 | fprintf(stdout, "%s\n", result[ret]); | ||
102 | } | ||
103 | |||
104 | if (test->copy) { | ||
105 | ret = ioctl(fd, PCITEST_COPY, test->size); | ||
106 | fprintf(stdout, "COPY (%7ld bytes):\t\t", test->size); | ||
107 | if (ret < 0) | ||
108 | fprintf(stdout, "TEST FAILED\n"); | ||
109 | else | ||
110 | fprintf(stdout, "%s\n", result[ret]); | ||
111 | } | ||
112 | |||
113 | fflush(stdout); | ||
114 | } | ||
115 | |||
116 | int main(int argc, char **argv) | ||
117 | { | ||
118 | int c; | ||
119 | struct pci_test *test; | ||
120 | |||
121 | test = calloc(1, sizeof(*test)); | ||
122 | if (!test) { | ||
123 | perror("Fail to allocate memory for pci_test\n"); | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | |||
127 | /* since '0' is a valid BAR number, initialize it to -1 */ | ||
128 | test->barnum = -1; | ||
129 | |||
130 | /* set default size as 100KB */ | ||
131 | test->size = 0x19000; | ||
132 | |||
133 | /* set default endpoint device */ | ||
134 | test->device = "/dev/pci-endpoint-test.0"; | ||
135 | |||
136 | while ((c = getopt(argc, argv, "D:b:m:lrwcs:")) != EOF) | ||
137 | switch (c) { | ||
138 | case 'D': | ||
139 | test->device = optarg; | ||
140 | continue; | ||
141 | case 'b': | ||
142 | test->barnum = atoi(optarg); | ||
143 | if (test->barnum < 0 || test->barnum > 5) | ||
144 | goto usage; | ||
145 | continue; | ||
146 | case 'l': | ||
147 | test->legacyirq = true; | ||
148 | continue; | ||
149 | case 'm': | ||
150 | test->msinum = atoi(optarg); | ||
151 | if (test->msinum < 1 || test->msinum > 32) | ||
152 | goto usage; | ||
153 | continue; | ||
154 | case 'r': | ||
155 | test->read = true; | ||
156 | continue; | ||
157 | case 'w': | ||
158 | test->write = true; | ||
159 | continue; | ||
160 | case 'c': | ||
161 | test->copy = true; | ||
162 | continue; | ||
163 | case 's': | ||
164 | test->size = strtoul(optarg, NULL, 0); | ||
165 | continue; | ||
166 | case '?': | ||
167 | case 'h': | ||
168 | default: | ||
169 | usage: | ||
170 | fprintf(stderr, | ||
171 | "usage: %s [options]\n" | ||
172 | "Options:\n" | ||
173 | "\t-D <dev> PCI endpoint test device {default: /dev/pci-endpoint-test.0}\n" | ||
174 | "\t-b <bar num> BAR test (bar number between 0..5)\n" | ||
175 | "\t-m <msi num> MSI test (msi number between 1..32)\n" | ||
176 | "\t-r Read buffer test\n" | ||
177 | "\t-w Write buffer test\n" | ||
178 | "\t-c Copy buffer test\n" | ||
179 | "\t-s <size> Size of buffer {default: 100KB}\n", | ||
180 | argv[0]); | ||
181 | return -EINVAL; | ||
182 | } | ||
183 | |||
184 | run_test(test); | ||
185 | return 0; | ||
186 | } | ||
diff --git a/tools/pci/pcitest.sh b/tools/pci/pcitest.sh new file mode 100644 index 000000000000..5442bbea4c22 --- /dev/null +++ b/tools/pci/pcitest.sh | |||
@@ -0,0 +1,56 @@ | |||
1 | #!/bin/sh | ||
2 | |||
3 | echo "BAR tests" | ||
4 | echo | ||
5 | |||
6 | bar=0 | ||
7 | |||
8 | while [ $bar -lt 6 ] | ||
9 | do | ||
10 | pcitest -b $bar | ||
11 | bar=`expr $bar + 1` | ||
12 | done | ||
13 | echo | ||
14 | |||
15 | echo "Interrupt tests" | ||
16 | echo | ||
17 | |||
18 | pcitest -l | ||
19 | msi=1 | ||
20 | |||
21 | while [ $msi -lt 33 ] | ||
22 | do | ||
23 | pcitest -m $msi | ||
24 | msi=`expr $msi + 1` | ||
25 | done | ||
26 | echo | ||
27 | |||
28 | echo "Read Tests" | ||
29 | echo | ||
30 | |||
31 | pcitest -r -s 1 | ||
32 | pcitest -r -s 1024 | ||
33 | pcitest -r -s 1025 | ||
34 | pcitest -r -s 1024000 | ||
35 | pcitest -r -s 1024001 | ||
36 | echo | ||
37 | |||
38 | echo "Write Tests" | ||
39 | echo | ||
40 | |||
41 | pcitest -w -s 1 | ||
42 | pcitest -w -s 1024 | ||
43 | pcitest -w -s 1025 | ||
44 | pcitest -w -s 1024000 | ||
45 | pcitest -w -s 1024001 | ||
46 | echo | ||
47 | |||
48 | echo "Copy Tests" | ||
49 | echo | ||
50 | |||
51 | pcitest -c -s 1 | ||
52 | pcitest -c -s 1024 | ||
53 | pcitest -c -s 1025 | ||
54 | pcitest -c -s 1024000 | ||
55 | pcitest -c -s 1024001 | ||
56 | echo | ||