diff options
96 files changed, 6410 insertions, 3766 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 index 4b8d6ec92e2b..b5f526081711 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etb10 | |||
| @@ -6,13 +6,6 @@ Description: (RW) Add/remove a sink from a trace path. There can be multiple | |||
| 6 | source for a single sink. | 6 | source for a single sink. |
| 7 | ex: echo 1 > /sys/bus/coresight/devices/20010000.etb/enable_sink | 7 | ex: echo 1 > /sys/bus/coresight/devices/20010000.etb/enable_sink |
| 8 | 8 | ||
| 9 | What: /sys/bus/coresight/devices/<memory_map>.etb/status | ||
| 10 | Date: November 2014 | ||
| 11 | KernelVersion: 3.19 | ||
| 12 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 13 | Description: (R) List various control and status registers. The specific | ||
| 14 | layout and content is driver specific. | ||
| 15 | |||
| 16 | What: /sys/bus/coresight/devices/<memory_map>.etb/trigger_cntr | 9 | What: /sys/bus/coresight/devices/<memory_map>.etb/trigger_cntr |
| 17 | Date: November 2014 | 10 | Date: November 2014 |
| 18 | KernelVersion: 3.19 | 11 | KernelVersion: 3.19 |
| @@ -22,3 +15,65 @@ Description: (RW) Disables write access to the Trace RAM by stopping the | |||
| 22 | following the trigger event. The number of 32-bit words written | 15 | following the trigger event. The number of 32-bit words written |
| 23 | into the Trace RAM following the trigger event is equal to the | 16 | into the Trace RAM following the trigger event is equal to the |
| 24 | value stored in this register+1 (from ARM ETB-TRM). | 17 | value stored in this register+1 (from ARM ETB-TRM). |
| 18 | |||
| 19 | What: /sys/bus/coresight/devices/<memory_map>.etb/mgmt/rdp | ||
| 20 | Date: March 2016 | ||
| 21 | KernelVersion: 4.7 | ||
| 22 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 23 | Description: (R) Defines the depth, in words, of the trace RAM in powers of | ||
| 24 | 2. The value is read directly from HW register RDP, 0x004. | ||
| 25 | |||
| 26 | What: /sys/bus/coresight/devices/<memory_map>.etb/mgmt/sts | ||
| 27 | Date: March 2016 | ||
| 28 | KernelVersion: 4.7 | ||
| 29 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 30 | Description: (R) Shows the value held by the ETB status register. The value | ||
| 31 | is read directly from HW register STS, 0x00C. | ||
| 32 | |||
| 33 | What: /sys/bus/coresight/devices/<memory_map>.etb/mgmt/rrp | ||
| 34 | Date: March 2016 | ||
| 35 | KernelVersion: 4.7 | ||
| 36 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 37 | Description: (R) Shows the value held by the ETB RAM Read Pointer register | ||
| 38 | that is used to read entries from the Trace RAM over the APB | ||
| 39 | interface. The value is read directly from HW register RRP, | ||
| 40 | 0x014. | ||
| 41 | |||
| 42 | What: /sys/bus/coresight/devices/<memory_map>.etb/mgmt/rwp | ||
| 43 | Date: March 2016 | ||
| 44 | KernelVersion: 4.7 | ||
| 45 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 46 | Description: (R) Shows the value held by the ETB RAM Write Pointer register | ||
| 47 | that is used to sets the write pointer to write entries from | ||
| 48 | the CoreSight bus into the Trace RAM. The value is read directly | ||
| 49 | from HW register RWP, 0x018. | ||
| 50 | |||
| 51 | What: /sys/bus/coresight/devices/<memory_map>.etb/mgmt/trg | ||
| 52 | Date: March 2016 | ||
| 53 | KernelVersion: 4.7 | ||
| 54 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 55 | Description: (R) Similar to "trigger_cntr" above except that this value is | ||
| 56 | read directly from HW register TRG, 0x01C. | ||
| 57 | |||
| 58 | What: /sys/bus/coresight/devices/<memory_map>.etb/mgmt/ctl | ||
| 59 | Date: March 2016 | ||
| 60 | KernelVersion: 4.7 | ||
| 61 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 62 | Description: (R) Shows the value held by the ETB Control register. The value | ||
| 63 | is read directly from HW register CTL, 0x020. | ||
| 64 | |||
| 65 | What: /sys/bus/coresight/devices/<memory_map>.etb/mgmt/ffsr | ||
| 66 | Date: March 2016 | ||
| 67 | KernelVersion: 4.7 | ||
| 68 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 69 | Description: (R) Shows the value held by the ETB Formatter and Flush Status | ||
| 70 | register. The value is read directly from HW register FFSR, | ||
| 71 | 0x300. | ||
| 72 | |||
| 73 | What: /sys/bus/coresight/devices/<memory_map>.etb/mgmt/ffcr | ||
| 74 | Date: March 2016 | ||
| 75 | KernelVersion: 4.7 | ||
| 76 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 77 | Description: (R) Shows the value held by the ETB Formatter and Flush Control | ||
| 78 | register. The value is read directly from HW register FFCR, | ||
| 79 | 0x304. | ||
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x index 2355ed8ae31f..36258bc1b473 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x | |||
| @@ -359,6 +359,19 @@ Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | |||
| 359 | Description: (R) Print the content of the Peripheral ID3 Register | 359 | Description: (R) Print the content of the Peripheral ID3 Register |
| 360 | (0xFEC). The value is taken directly from the HW. | 360 | (0xFEC). The value is taken directly from the HW. |
| 361 | 361 | ||
| 362 | What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trcconfig | ||
| 363 | Date: February 2016 | ||
| 364 | KernelVersion: 4.07 | ||
| 365 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 366 | Description: (R) Print the content of the trace configuration register | ||
| 367 | (0x010) as currently set by SW. | ||
| 368 | |||
| 369 | What: /sys/bus/coresight/devices/<memory_map>.etm/mgmt/trctraceid | ||
| 370 | Date: February 2016 | ||
| 371 | KernelVersion: 4.07 | ||
| 372 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 373 | Description: (R) Print the content of the trace ID register (0x040). | ||
| 374 | |||
| 362 | What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr0 | 375 | What: /sys/bus/coresight/devices/<memory_map>.etm/trcidr/trcidr0 |
| 363 | Date: April 2015 | 376 | Date: April 2015 |
| 364 | KernelVersion: 4.01 | 377 | KernelVersion: 4.01 |
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-stm b/Documentation/ABI/testing/sysfs-bus-coresight-devices-stm new file mode 100644 index 000000000000..1dffabe7f48d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-stm | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | What: /sys/bus/coresight/devices/<memory_map>.stm/enable_source | ||
| 2 | Date: April 2016 | ||
| 3 | KernelVersion: 4.7 | ||
| 4 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 5 | Description: (RW) Enable/disable tracing on this specific trace macrocell. | ||
| 6 | Enabling the trace macrocell implies it has been configured | ||
| 7 | properly and a sink has been identified for it. The path | ||
| 8 | of coresight components linking the source to the sink is | ||
| 9 | configured and managed automatically by the coresight framework. | ||
| 10 | |||
| 11 | What: /sys/bus/coresight/devices/<memory_map>.stm/hwevent_enable | ||
| 12 | Date: April 2016 | ||
| 13 | KernelVersion: 4.7 | ||
| 14 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 15 | Description: (RW) Provides access to the HW event enable register, used in | ||
| 16 | conjunction with HW event bank select register. | ||
| 17 | |||
| 18 | What: /sys/bus/coresight/devices/<memory_map>.stm/hwevent_select | ||
| 19 | Date: April 2016 | ||
| 20 | KernelVersion: 4.7 | ||
| 21 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 22 | Description: (RW) Gives access to the HW event block select register | ||
| 23 | (STMHEBSR) in order to configure up to 256 channels. Used in | ||
| 24 | conjunction with "hwevent_enable" register as described above. | ||
| 25 | |||
| 26 | What: /sys/bus/coresight/devices/<memory_map>.stm/port_enable | ||
| 27 | Date: April 2016 | ||
| 28 | KernelVersion: 4.7 | ||
| 29 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 30 | Description: (RW) Provides access to the stimulus port enable register | ||
| 31 | (STMSPER). Used in conjunction with "port_select" described | ||
| 32 | below. | ||
| 33 | |||
| 34 | What: /sys/bus/coresight/devices/<memory_map>.stm/port_select | ||
| 35 | Date: April 2016 | ||
| 36 | KernelVersion: 4.7 | ||
| 37 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 38 | Description: (RW) Used to determine which bank of stimulus port bit in | ||
| 39 | register STMSPER (see above) apply to. | ||
| 40 | |||
| 41 | What: /sys/bus/coresight/devices/<memory_map>.stm/status | ||
| 42 | Date: April 2016 | ||
| 43 | KernelVersion: 4.7 | ||
| 44 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 45 | Description: (R) List various control and status registers. The specific | ||
| 46 | layout and content is driver specific. | ||
| 47 | |||
| 48 | What: /sys/bus/coresight/devices/<memory_map>.stm/traceid | ||
| 49 | Date: April 2016 | ||
| 50 | KernelVersion: 4.7 | ||
| 51 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 52 | Description: (RW) Holds the trace ID that will appear in the trace stream | ||
| 53 | coming from this trace entity. | ||
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc index f38cded5fa22..4fe677ed1305 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc | |||
| @@ -6,3 +6,80 @@ Description: (RW) Disables write access to the Trace RAM by stopping the | |||
| 6 | formatter after a defined number of words have been stored | 6 | formatter after a defined number of words have been stored |
| 7 | following the trigger event. Additional interface for this | 7 | following the trigger event. Additional interface for this |
| 8 | driver are expected to be added as it matures. | 8 | driver are expected to be added as it matures. |
| 9 | |||
| 10 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/rsz | ||
| 11 | Date: March 2016 | ||
| 12 | KernelVersion: 4.7 | ||
| 13 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 14 | Description: (R) Defines the size, in 32-bit words, of the local RAM buffer. | ||
| 15 | The value is read directly from HW register RSZ, 0x004. | ||
| 16 | |||
| 17 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/sts | ||
| 18 | Date: March 2016 | ||
| 19 | KernelVersion: 4.7 | ||
| 20 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 21 | Description: (R) Shows the value held by the TMC status register. The value | ||
| 22 | is read directly from HW register STS, 0x00C. | ||
| 23 | |||
| 24 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/rrp | ||
| 25 | Date: March 2016 | ||
| 26 | KernelVersion: 4.7 | ||
| 27 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 28 | Description: (R) Shows the value held by the TMC RAM Read Pointer register | ||
| 29 | that is used to read entries from the Trace RAM over the APB | ||
| 30 | interface. The value is read directly from HW register RRP, | ||
| 31 | 0x014. | ||
| 32 | |||
| 33 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/rwp | ||
| 34 | Date: March 2016 | ||
| 35 | KernelVersion: 4.7 | ||
| 36 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 37 | Description: (R) Shows the value held by the TMC RAM Write Pointer register | ||
| 38 | that is used to sets the write pointer to write entries from | ||
| 39 | the CoreSight bus into the Trace RAM. The value is read directly | ||
| 40 | from HW register RWP, 0x018. | ||
| 41 | |||
| 42 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/trg | ||
| 43 | Date: March 2016 | ||
| 44 | KernelVersion: 4.7 | ||
| 45 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 46 | Description: (R) Similar to "trigger_cntr" above except that this value is | ||
| 47 | read directly from HW register TRG, 0x01C. | ||
| 48 | |||
| 49 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/ctl | ||
| 50 | Date: March 2016 | ||
| 51 | KernelVersion: 4.7 | ||
| 52 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 53 | Description: (R) Shows the value held by the TMC Control register. The value | ||
| 54 | is read directly from HW register CTL, 0x020. | ||
| 55 | |||
| 56 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/ffsr | ||
| 57 | Date: March 2016 | ||
| 58 | KernelVersion: 4.7 | ||
| 59 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 60 | Description: (R) Shows the value held by the TMC Formatter and Flush Status | ||
| 61 | register. The value is read directly from HW register FFSR, | ||
| 62 | 0x300. | ||
| 63 | |||
| 64 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/ffcr | ||
| 65 | Date: March 2016 | ||
| 66 | KernelVersion: 4.7 | ||
| 67 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 68 | Description: (R) Shows the value held by the TMC Formatter and Flush Control | ||
| 69 | register. The value is read directly from HW register FFCR, | ||
| 70 | 0x304. | ||
| 71 | |||
| 72 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/mode | ||
| 73 | Date: March 2016 | ||
| 74 | KernelVersion: 4.7 | ||
| 75 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 76 | Description: (R) Shows the value held by the TMC Mode register, which | ||
| 77 | indicate the mode the device has been configured to enact. The | ||
| 78 | The value is read directly from the MODE register, 0x028. | ||
| 79 | |||
| 80 | What: /sys/bus/coresight/devices/<memory_map>.tmc/mgmt/devid | ||
| 81 | Date: March 2016 | ||
| 82 | KernelVersion: 4.7 | ||
| 83 | Contact: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 84 | Description: (R) Indicates the capabilities of the Coresight TMC. | ||
| 85 | The value is read directly from the DEVID register, 0xFC8, | ||
diff --git a/Documentation/ABI/testing/sysfs-bus-mcb b/Documentation/ABI/testing/sysfs-bus-mcb new file mode 100644 index 000000000000..77947c509796 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-mcb | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | What: /sys/bus/mcb/devices/mcb:X | ||
| 2 | Date: March 2016 | ||
| 3 | KernelVersion: 4.7 | ||
| 4 | Contact: Johannes Thumshirn <jth@kernel.org> | ||
| 5 | Description: Hardware chip or device hosting the MEN chameleon bus | ||
| 6 | |||
| 7 | What: /sys/bus/mcb/devices/mcb:X/revision | ||
| 8 | Date: March 2016 | ||
| 9 | KernelVersion: 4.7 | ||
| 10 | Contact: Johannes Thumshirn <jth@kernel.org> | ||
| 11 | Description: The FPGA's revision number | ||
| 12 | |||
| 13 | What: /sys/bus/mcb/devices/mcb:X/minor | ||
| 14 | Date: March 2016 | ||
| 15 | KernelVersion: 4.7 | ||
| 16 | Contact: Johannes Thumshirn <jth@kernel.org> | ||
| 17 | Description: The FPGA's minor number | ||
| 18 | |||
| 19 | What: /sys/bus/mcb/devices/mcb:X/model | ||
| 20 | Date: March 2016 | ||
| 21 | KernelVersion: 4.7 | ||
| 22 | Contact: Johannes Thumshirn <jth@kernel.org> | ||
| 23 | Description: The FPGA's model number | ||
| 24 | |||
| 25 | What: /sys/bus/mcb/devices/mcb:X/name | ||
| 26 | Date: March 2016 | ||
| 27 | KernelVersion: 4.7 | ||
| 28 | Contact: Johannes Thumshirn <jth@kernel.org> | ||
| 29 | Description: The FPGA's name | ||
diff --git a/Documentation/ABI/testing/sysfs-class-stm b/Documentation/ABI/testing/sysfs-class-stm index c9aa4f3fc9a7..77ed3da0f68e 100644 --- a/Documentation/ABI/testing/sysfs-class-stm +++ b/Documentation/ABI/testing/sysfs-class-stm | |||
| @@ -12,3 +12,13 @@ KernelVersion: 4.3 | |||
| 12 | Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com> | 12 | Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com> |
| 13 | Description: | 13 | Description: |
| 14 | Shows the number of channels per master on this STM device. | 14 | Shows the number of channels per master on this STM device. |
| 15 | |||
| 16 | What: /sys/class/stm/<stm>/hw_override | ||
| 17 | Date: March 2016 | ||
| 18 | KernelVersion: 4.7 | ||
| 19 | Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com> | ||
| 20 | Description: | ||
| 21 | Reads as 0 if master numbers in the STP stream produced by | ||
| 22 | this stm device will match the master numbers assigned by | ||
| 23 | the software or 1 if the stm hardware overrides software | ||
| 24 | assigned masters. | ||
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt index 62938eb9697f..93147c0c8a0e 100644 --- a/Documentation/devicetree/bindings/arm/coresight.txt +++ b/Documentation/devicetree/bindings/arm/coresight.txt | |||
| @@ -19,6 +19,7 @@ its hardware characteristcs. | |||
| 19 | - "arm,coresight-etm3x", "arm,primecell"; | 19 | - "arm,coresight-etm3x", "arm,primecell"; |
| 20 | - "arm,coresight-etm4x", "arm,primecell"; | 20 | - "arm,coresight-etm4x", "arm,primecell"; |
| 21 | - "qcom,coresight-replicator1x", "arm,primecell"; | 21 | - "qcom,coresight-replicator1x", "arm,primecell"; |
| 22 | - "arm,coresight-stm", "arm,primecell"; [1] | ||
| 22 | 23 | ||
| 23 | * reg: physical base address and length of the register | 24 | * reg: physical base address and length of the register |
| 24 | set(s) of the component. | 25 | set(s) of the component. |
| @@ -36,6 +37,14 @@ its hardware characteristcs. | |||
| 36 | layout using the generic DT graph presentation found in | 37 | layout using the generic DT graph presentation found in |
| 37 | "bindings/graph.txt". | 38 | "bindings/graph.txt". |
| 38 | 39 | ||
| 40 | * Additional required properties for System Trace Macrocells (STM): | ||
| 41 | * reg: along with the physical base address and length of the register | ||
| 42 | set as described above, another entry is required to describe the | ||
| 43 | mapping of the extended stimulus port area. | ||
| 44 | |||
| 45 | * reg-names: the only acceptable values are "stm-base" and | ||
| 46 | "stm-stimulus-base", each corresponding to the areas defined in "reg". | ||
| 47 | |||
| 39 | * Required properties for devices that don't show up on the AMBA bus, such as | 48 | * Required properties for devices that don't show up on the AMBA bus, such as |
| 40 | non-configurable replicators: | 49 | non-configurable replicators: |
| 41 | 50 | ||
| @@ -202,3 +211,22 @@ Example: | |||
| 202 | }; | 211 | }; |
| 203 | }; | 212 | }; |
| 204 | }; | 213 | }; |
| 214 | |||
| 215 | 4. STM | ||
| 216 | stm@20100000 { | ||
| 217 | compatible = "arm,coresight-stm", "arm,primecell"; | ||
| 218 | reg = <0 0x20100000 0 0x1000>, | ||
| 219 | <0 0x28000000 0 0x180000>; | ||
| 220 | reg-names = "stm-base", "stm-stimulus-base"; | ||
| 221 | |||
| 222 | clocks = <&soc_smc50mhz>; | ||
| 223 | clock-names = "apb_pclk"; | ||
| 224 | port { | ||
| 225 | stm_out_port: endpoint { | ||
| 226 | remote-endpoint = <&main_funnel_in_port2>; | ||
| 227 | }; | ||
| 228 | }; | ||
| 229 | }; | ||
| 230 | |||
| 231 | [1]. There is currently two version of STM: STM32 and STM500. Both | ||
| 232 | have the same HW interface and as such don't need an explicit binding name. | ||
diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt index 0a5c3290e732..a33c88cd5d1d 100644 --- a/Documentation/trace/coresight.txt +++ b/Documentation/trace/coresight.txt | |||
| @@ -190,8 +190,8 @@ expected to be accessed and controlled using those entries. | |||
| 190 | Last but not least, "struct module *owner" is expected to be set to reflect | 190 | Last but not least, "struct module *owner" is expected to be set to reflect |
| 191 | the information carried in "THIS_MODULE". | 191 | the information carried in "THIS_MODULE". |
| 192 | 192 | ||
| 193 | How to use | 193 | How to use the tracer modules |
| 194 | ---------- | 194 | ----------------------------- |
| 195 | 195 | ||
| 196 | Before trace collection can start, a coresight sink needs to be identify. | 196 | Before trace collection can start, a coresight sink needs to be identify. |
| 197 | There is no limit on the amount of sinks (nor sources) that can be enabled at | 197 | There is no limit on the amount of sinks (nor sources) that can be enabled at |
| @@ -297,3 +297,36 @@ Info Tracing enabled | |||
| 297 | Instruction 13570831 0x8026B584 E28DD00C false ADD sp,sp,#0xc | 297 | Instruction 13570831 0x8026B584 E28DD00C false ADD sp,sp,#0xc |
| 298 | Instruction 0 0x8026B588 E8BD8000 true LDM sp!,{pc} | 298 | Instruction 0 0x8026B588 E8BD8000 true LDM sp!,{pc} |
| 299 | Timestamp Timestamp: 17107041535 | 299 | Timestamp Timestamp: 17107041535 |
| 300 | |||
| 301 | How to use the STM module | ||
| 302 | ------------------------- | ||
| 303 | |||
| 304 | Using the System Trace Macrocell module is the same as the tracers - the only | ||
| 305 | difference is that clients are driving the trace capture rather | ||
| 306 | than the program flow through the code. | ||
| 307 | |||
| 308 | As with any other CoreSight component, specifics about the STM tracer can be | ||
| 309 | found in sysfs with more information on each entry being found in [1]: | ||
| 310 | |||
| 311 | root@genericarmv8:~# ls /sys/bus/coresight/devices/20100000.stm | ||
| 312 | enable_source hwevent_select port_enable subsystem uevent | ||
| 313 | hwevent_enable mgmt port_select traceid | ||
| 314 | root@genericarmv8:~# | ||
| 315 | |||
| 316 | Like any other source a sink needs to be identified and the STM enabled before | ||
| 317 | being used: | ||
| 318 | |||
| 319 | root@genericarmv8:~# echo 1 > /sys/bus/coresight/devices/20010000.etf/enable_sink | ||
| 320 | root@genericarmv8:~# echo 1 > /sys/bus/coresight/devices/20100000.stm/enable_source | ||
| 321 | |||
| 322 | From there user space applications can request and use channels using the devfs | ||
| 323 | interface provided for that purpose by the generic STM API: | ||
| 324 | |||
| 325 | root@genericarmv8:~# ls -l /dev/20100000.stm | ||
| 326 | crw------- 1 root root 10, 61 Jan 3 18:11 /dev/20100000.stm | ||
| 327 | root@genericarmv8:~# | ||
| 328 | |||
| 329 | Details on how to use the generic STM API can be found here [2]. | ||
| 330 | |||
| 331 | [1]. Documentation/ABI/testing/sysfs-bus-coresight-devices-stm | ||
| 332 | [2]. Documentation/trace/stm.txt | ||
diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm index 13411fe52f7f..d1f93af36f38 100644 --- a/Documentation/w1/slaves/w1_therm +++ b/Documentation/w1/slaves/w1_therm | |||
| @@ -33,7 +33,15 @@ temperature conversion at a time. If none of the devices are parasite | |||
| 33 | powered it would be possible to convert all the devices at the same | 33 | powered it would be possible to convert all the devices at the same |
| 34 | time and then go back to read individual sensors. That isn't | 34 | time and then go back to read individual sensors. That isn't |
| 35 | currently supported. The driver also doesn't support reduced | 35 | currently supported. The driver also doesn't support reduced |
| 36 | precision (which would also reduce the conversion time). | 36 | precision (which would also reduce the conversion time) when reading values. |
| 37 | |||
| 38 | Writing a value between 9 and 12 to the sysfs w1_slave file will change the | ||
| 39 | precision of the sensor for the next readings. This value is in (volatile) | ||
| 40 | SRAM, so it is reset when the sensor gets power-cycled. | ||
| 41 | |||
| 42 | To store the current precision configuration into EEPROM, the value 0 | ||
| 43 | has to be written to the sysfs w1_slave file. Since the EEPROM has a limited | ||
| 44 | amount of writes (>50k), this command should be used wisely. | ||
| 37 | 45 | ||
| 38 | The module parameter strong_pullup can be set to 0 to disable the | 46 | The module parameter strong_pullup can be set to 0 to disable the |
| 39 | strong pullup, 1 to enable autodetection or 2 to force strong pullup. | 47 | strong pullup, 1 to enable autodetection or 2 to force strong pullup. |
diff --git a/MAINTAINERS b/MAINTAINERS index 832f070cbf0f..3dc5d894713b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -9843,6 +9843,7 @@ F: drivers/mmc/host/dw_mmc* | |||
| 9843 | SYSTEM TRACE MODULE CLASS | 9843 | SYSTEM TRACE MODULE CLASS |
| 9844 | M: Alexander Shishkin <alexander.shishkin@linux.intel.com> | 9844 | M: Alexander Shishkin <alexander.shishkin@linux.intel.com> |
| 9845 | S: Maintained | 9845 | S: Maintained |
| 9846 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git | ||
| 9846 | F: Documentation/trace/stm.txt | 9847 | F: Documentation/trace/stm.txt |
| 9847 | F: drivers/hwtracing/stm/ | 9848 | F: drivers/hwtracing/stm/ |
| 9848 | F: include/linux/stm.h | 9849 | F: include/linux/stm.h |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3ec0766ed5e9..601f64fcc890 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
| @@ -279,8 +279,7 @@ if RTC_LIB=n | |||
| 279 | 279 | ||
| 280 | config RTC | 280 | config RTC |
| 281 | tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)" | 281 | tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)" |
| 282 | depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \ | 282 | depends on ALPHA || (MIPS && MACH_LOONGSON64) || MN10300 |
| 283 | && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN && !UML | ||
| 284 | ---help--- | 283 | ---help--- |
| 285 | If you say Y here and create a character special file /dev/rtc with | 284 | If you say Y here and create a character special file /dev/rtc with |
| 286 | major number 10 and minor number 135 using mknod ("man mknod"), you | 285 | major number 10 and minor number 135 using mknod ("man mknod"), you |
| @@ -585,7 +584,6 @@ config TELCLOCK | |||
| 585 | 584 | ||
| 586 | config DEVPORT | 585 | config DEVPORT |
| 587 | bool | 586 | bool |
| 588 | depends on !M68K | ||
| 589 | depends on ISA || PCI | 587 | depends on ISA || PCI |
| 590 | default y | 588 | default y |
| 591 | 589 | ||
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c index 781865084dc1..78a492f5acfb 100644 --- a/drivers/char/xillybus/xillybus_of.c +++ b/drivers/char/xillybus/xillybus_of.c | |||
| @@ -81,7 +81,6 @@ static int xilly_map_single_of(struct xilly_endpoint *ep, | |||
| 81 | { | 81 | { |
| 82 | dma_addr_t addr; | 82 | dma_addr_t addr; |
| 83 | struct xilly_mapping *this; | 83 | struct xilly_mapping *this; |
| 84 | int rc; | ||
| 85 | 84 | ||
| 86 | this = kzalloc(sizeof(*this), GFP_KERNEL); | 85 | this = kzalloc(sizeof(*this), GFP_KERNEL); |
| 87 | if (!this) | 86 | if (!this) |
| @@ -101,15 +100,7 @@ static int xilly_map_single_of(struct xilly_endpoint *ep, | |||
| 101 | 100 | ||
| 102 | *ret_dma_handle = addr; | 101 | *ret_dma_handle = addr; |
| 103 | 102 | ||
| 104 | rc = devm_add_action(ep->dev, xilly_of_unmap, this); | 103 | return devm_add_action_or_reset(ep->dev, xilly_of_unmap, this); |
| 105 | |||
| 106 | if (rc) { | ||
| 107 | dma_unmap_single(ep->dev, addr, size, direction); | ||
| 108 | kfree(this); | ||
| 109 | return rc; | ||
| 110 | } | ||
| 111 | |||
| 112 | return 0; | ||
| 113 | } | 104 | } |
| 114 | 105 | ||
| 115 | static struct xilly_endpoint_hardware of_hw = { | 106 | static struct xilly_endpoint_hardware of_hw = { |
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c index 9418300214e9..dff2d1538164 100644 --- a/drivers/char/xillybus/xillybus_pcie.c +++ b/drivers/char/xillybus/xillybus_pcie.c | |||
| @@ -98,7 +98,6 @@ static int xilly_map_single_pci(struct xilly_endpoint *ep, | |||
| 98 | int pci_direction; | 98 | int pci_direction; |
| 99 | dma_addr_t addr; | 99 | dma_addr_t addr; |
| 100 | struct xilly_mapping *this; | 100 | struct xilly_mapping *this; |
| 101 | int rc; | ||
| 102 | 101 | ||
| 103 | this = kzalloc(sizeof(*this), GFP_KERNEL); | 102 | this = kzalloc(sizeof(*this), GFP_KERNEL); |
| 104 | if (!this) | 103 | if (!this) |
| @@ -120,14 +119,7 @@ static int xilly_map_single_pci(struct xilly_endpoint *ep, | |||
| 120 | 119 | ||
| 121 | *ret_dma_handle = addr; | 120 | *ret_dma_handle = addr; |
| 122 | 121 | ||
| 123 | rc = devm_add_action(ep->dev, xilly_pci_unmap, this); | 122 | return devm_add_action_or_reset(ep->dev, xilly_pci_unmap, this); |
| 124 | if (rc) { | ||
| 125 | pci_unmap_single(ep->pdev, addr, size, pci_direction); | ||
| 126 | kfree(this); | ||
| 127 | return rc; | ||
| 128 | } | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | 123 | } |
| 132 | 124 | ||
| 133 | static struct xilly_endpoint_hardware pci_hw = { | 125 | static struct xilly_endpoint_hardware pci_hw = { |
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 38b682bab85a..b6c1211b4df7 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
| @@ -597,27 +597,55 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type) | |||
| 597 | 597 | ||
| 598 | static void vmbus_wait_for_unload(void) | 598 | static void vmbus_wait_for_unload(void) |
| 599 | { | 599 | { |
| 600 | int cpu = smp_processor_id(); | 600 | int cpu; |
| 601 | void *page_addr = hv_context.synic_message_page[cpu]; | 601 | void *page_addr; |
| 602 | struct hv_message *msg = (struct hv_message *)page_addr + | 602 | struct hv_message *msg; |
| 603 | VMBUS_MESSAGE_SINT; | ||
| 604 | struct vmbus_channel_message_header *hdr; | 603 | struct vmbus_channel_message_header *hdr; |
| 605 | bool unloaded = false; | 604 | u32 message_type; |
| 606 | 605 | ||
| 606 | /* | ||
| 607 | * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was | ||
| 608 | * used for initial contact or to CPU0 depending on host version. When | ||
| 609 | * we're crashing on a different CPU let's hope that IRQ handler on | ||
| 610 | * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still | ||
| 611 | * functional and vmbus_unload_response() will complete | ||
| 612 | * vmbus_connection.unload_event. If not, the last thing we can do is | ||
| 613 | * read message pages for all CPUs directly. | ||
| 614 | */ | ||
| 607 | while (1) { | 615 | while (1) { |
| 608 | if (READ_ONCE(msg->header.message_type) == HVMSG_NONE) { | 616 | if (completion_done(&vmbus_connection.unload_event)) |
| 609 | mdelay(10); | 617 | break; |
| 610 | continue; | ||
| 611 | } | ||
| 612 | 618 | ||
| 613 | hdr = (struct vmbus_channel_message_header *)msg->u.payload; | 619 | for_each_online_cpu(cpu) { |
| 614 | if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE) | 620 | page_addr = hv_context.synic_message_page[cpu]; |
| 615 | unloaded = true; | 621 | msg = (struct hv_message *)page_addr + |
| 622 | VMBUS_MESSAGE_SINT; | ||
| 616 | 623 | ||
| 617 | vmbus_signal_eom(msg); | 624 | message_type = READ_ONCE(msg->header.message_type); |
| 625 | if (message_type == HVMSG_NONE) | ||
| 626 | continue; | ||
| 618 | 627 | ||
| 619 | if (unloaded) | 628 | hdr = (struct vmbus_channel_message_header *) |
| 620 | break; | 629 | msg->u.payload; |
| 630 | |||
| 631 | if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE) | ||
| 632 | complete(&vmbus_connection.unload_event); | ||
| 633 | |||
| 634 | vmbus_signal_eom(msg, message_type); | ||
| 635 | } | ||
| 636 | |||
| 637 | mdelay(10); | ||
| 638 | } | ||
| 639 | |||
| 640 | /* | ||
| 641 | * We're crashing and already got the UNLOAD_RESPONSE, cleanup all | ||
| 642 | * maybe-pending messages on all CPUs to be able to receive new | ||
| 643 | * messages after we reconnect. | ||
| 644 | */ | ||
| 645 | for_each_online_cpu(cpu) { | ||
| 646 | page_addr = hv_context.synic_message_page[cpu]; | ||
| 647 | msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; | ||
| 648 | msg->header.message_type = HVMSG_NONE; | ||
| 621 | } | 649 | } |
| 622 | } | 650 | } |
| 623 | 651 | ||
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index d02f1373dd98..fcf8a02dc0ea 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c | |||
| @@ -495,3 +495,4 @@ void vmbus_set_event(struct vmbus_channel *channel) | |||
| 495 | 495 | ||
| 496 | hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL); | 496 | hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL); |
| 497 | } | 497 | } |
| 498 | EXPORT_SYMBOL_GPL(vmbus_set_event); | ||
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index b853b4b083bd..df35fb7ed5df 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c | |||
| @@ -714,7 +714,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) | |||
| 714 | * If the pfn range we are dealing with is not in the current | 714 | * If the pfn range we are dealing with is not in the current |
| 715 | * "hot add block", move on. | 715 | * "hot add block", move on. |
| 716 | */ | 716 | */ |
| 717 | if ((start_pfn >= has->end_pfn)) | 717 | if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) |
| 718 | continue; | 718 | continue; |
| 719 | /* | 719 | /* |
| 720 | * If the current hot add-request extends beyond | 720 | * If the current hot add-request extends beyond |
| @@ -768,7 +768,7 @@ static unsigned long handle_pg_range(unsigned long pg_start, | |||
| 768 | * If the pfn range we are dealing with is not in the current | 768 | * If the pfn range we are dealing with is not in the current |
| 769 | * "hot add block", move on. | 769 | * "hot add block", move on. |
| 770 | */ | 770 | */ |
| 771 | if ((start_pfn >= has->end_pfn)) | 771 | if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) |
| 772 | continue; | 772 | continue; |
| 773 | 773 | ||
| 774 | old_covered_state = has->covered_end_pfn; | 774 | old_covered_state = has->covered_end_pfn; |
| @@ -1400,6 +1400,7 @@ static void balloon_onchannelcallback(void *context) | |||
| 1400 | * This is a normal hot-add request specifying | 1400 | * This is a normal hot-add request specifying |
| 1401 | * hot-add memory. | 1401 | * hot-add memory. |
| 1402 | */ | 1402 | */ |
| 1403 | dm->host_specified_ha_region = false; | ||
| 1403 | ha_pg_range = &ha_msg->range; | 1404 | ha_pg_range = &ha_msg->range; |
| 1404 | dm->ha_wrk.ha_page_range = *ha_pg_range; | 1405 | dm->ha_wrk.ha_page_range = *ha_pg_range; |
| 1405 | dm->ha_wrk.ha_region_range.page_range = 0; | 1406 | dm->ha_wrk.ha_region_range.page_range = 0; |
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index 9b9b370fe22a..cb1a9160aab1 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c | |||
| @@ -78,9 +78,11 @@ static void kvp_send_key(struct work_struct *dummy); | |||
| 78 | 78 | ||
| 79 | static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error); | 79 | static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error); |
| 80 | static void kvp_timeout_func(struct work_struct *dummy); | 80 | static void kvp_timeout_func(struct work_struct *dummy); |
| 81 | static void kvp_host_handshake_func(struct work_struct *dummy); | ||
| 81 | static void kvp_register(int); | 82 | static void kvp_register(int); |
| 82 | 83 | ||
| 83 | static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func); | 84 | static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func); |
| 85 | static DECLARE_DELAYED_WORK(kvp_host_handshake_work, kvp_host_handshake_func); | ||
| 84 | static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); | 86 | static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); |
| 85 | 87 | ||
| 86 | static const char kvp_devname[] = "vmbus/hv_kvp"; | 88 | static const char kvp_devname[] = "vmbus/hv_kvp"; |
| @@ -130,6 +132,11 @@ static void kvp_timeout_func(struct work_struct *dummy) | |||
| 130 | hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); | 132 | hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); |
| 131 | } | 133 | } |
| 132 | 134 | ||
| 135 | static void kvp_host_handshake_func(struct work_struct *dummy) | ||
| 136 | { | ||
| 137 | hv_poll_channel(kvp_transaction.recv_channel, hv_kvp_onchannelcallback); | ||
| 138 | } | ||
| 139 | |||
| 133 | static int kvp_handle_handshake(struct hv_kvp_msg *msg) | 140 | static int kvp_handle_handshake(struct hv_kvp_msg *msg) |
| 134 | { | 141 | { |
| 135 | switch (msg->kvp_hdr.operation) { | 142 | switch (msg->kvp_hdr.operation) { |
| @@ -154,6 +161,12 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg) | |||
| 154 | pr_debug("KVP: userspace daemon ver. %d registered\n", | 161 | pr_debug("KVP: userspace daemon ver. %d registered\n", |
| 155 | KVP_OP_REGISTER); | 162 | KVP_OP_REGISTER); |
| 156 | kvp_register(dm_reg_value); | 163 | kvp_register(dm_reg_value); |
| 164 | |||
| 165 | /* | ||
| 166 | * If we're still negotiating with the host cancel the timeout | ||
| 167 | * work to not poll the channel twice. | ||
| 168 | */ | ||
| 169 | cancel_delayed_work_sync(&kvp_host_handshake_work); | ||
| 157 | hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); | 170 | hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper); |
| 158 | 171 | ||
| 159 | return 0; | 172 | return 0; |
| @@ -594,7 +607,22 @@ void hv_kvp_onchannelcallback(void *context) | |||
| 594 | struct icmsg_negotiate *negop = NULL; | 607 | struct icmsg_negotiate *negop = NULL; |
| 595 | int util_fw_version; | 608 | int util_fw_version; |
| 596 | int kvp_srv_version; | 609 | int kvp_srv_version; |
| 610 | static enum {NEGO_NOT_STARTED, | ||
| 611 | NEGO_IN_PROGRESS, | ||
| 612 | NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED; | ||
| 597 | 613 | ||
| 614 | if (host_negotiatied == NEGO_NOT_STARTED && | ||
| 615 | kvp_transaction.state < HVUTIL_READY) { | ||
| 616 | /* | ||
| 617 | * If userspace daemon is not connected and host is asking | ||
| 618 | * us to negotiate we need to delay to not lose messages. | ||
| 619 | * This is important for Failover IP setting. | ||
| 620 | */ | ||
| 621 | host_negotiatied = NEGO_IN_PROGRESS; | ||
| 622 | schedule_delayed_work(&kvp_host_handshake_work, | ||
| 623 | HV_UTIL_NEGO_TIMEOUT * HZ); | ||
| 624 | return; | ||
| 625 | } | ||
| 598 | if (kvp_transaction.state > HVUTIL_READY) | 626 | if (kvp_transaction.state > HVUTIL_READY) |
| 599 | return; | 627 | return; |
| 600 | 628 | ||
| @@ -672,6 +700,8 @@ void hv_kvp_onchannelcallback(void *context) | |||
| 672 | vmbus_sendpacket(channel, recv_buffer, | 700 | vmbus_sendpacket(channel, recv_buffer, |
| 673 | recvlen, requestid, | 701 | recvlen, requestid, |
| 674 | VM_PKT_DATA_INBAND, 0); | 702 | VM_PKT_DATA_INBAND, 0); |
| 703 | |||
| 704 | host_negotiatied = NEGO_FINISHED; | ||
| 675 | } | 705 | } |
| 676 | 706 | ||
| 677 | } | 707 | } |
| @@ -708,6 +738,7 @@ hv_kvp_init(struct hv_util_service *srv) | |||
| 708 | void hv_kvp_deinit(void) | 738 | void hv_kvp_deinit(void) |
| 709 | { | 739 | { |
| 710 | kvp_transaction.state = HVUTIL_DEVICE_DYING; | 740 | kvp_transaction.state = HVUTIL_DEVICE_DYING; |
| 741 | cancel_delayed_work_sync(&kvp_host_handshake_work); | ||
| 711 | cancel_delayed_work_sync(&kvp_timeout_work); | 742 | cancel_delayed_work_sync(&kvp_timeout_work); |
| 712 | cancel_work_sync(&kvp_sendkey_work); | 743 | cancel_work_sync(&kvp_sendkey_work); |
| 713 | hvutil_transport_destroy(hvt); | 744 | hvutil_transport_destroy(hvt); |
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 12321b93a756..718b5c72f0c8 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h | |||
| @@ -36,6 +36,11 @@ | |||
| 36 | #define HV_UTIL_TIMEOUT 30 | 36 | #define HV_UTIL_TIMEOUT 30 |
| 37 | 37 | ||
| 38 | /* | 38 | /* |
| 39 | * Timeout for guest-host handshake for services. | ||
| 40 | */ | ||
| 41 | #define HV_UTIL_NEGO_TIMEOUT 60 | ||
| 42 | |||
| 43 | /* | ||
| 39 | * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent | 44 | * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent |
| 40 | * is set by CPUID(HVCPUID_VERSION_FEATURES). | 45 | * is set by CPUID(HVCPUID_VERSION_FEATURES). |
| 41 | */ | 46 | */ |
| @@ -620,9 +625,21 @@ extern struct vmbus_channel_message_table_entry | |||
| 620 | channel_message_table[CHANNELMSG_COUNT]; | 625 | channel_message_table[CHANNELMSG_COUNT]; |
| 621 | 626 | ||
| 622 | /* Free the message slot and signal end-of-message if required */ | 627 | /* Free the message slot and signal end-of-message if required */ |
| 623 | static inline void vmbus_signal_eom(struct hv_message *msg) | 628 | static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) |
| 624 | { | 629 | { |
| 625 | msg->header.message_type = HVMSG_NONE; | 630 | /* |
| 631 | * On crash we're reading some other CPU's message page and we need | ||
| 632 | * to be careful: this other CPU may already had cleared the header | ||
| 633 | * and the host may already had delivered some other message there. | ||
| 634 | * In case we blindly write msg->header.message_type we're going | ||
| 635 | * to lose it. We can still lose a message of the same type but | ||
| 636 | * we count on the fact that there can only be one | ||
| 637 | * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages | ||
| 638 | * on crash. | ||
| 639 | */ | ||
| 640 | if (cmpxchg(&msg->header.message_type, old_msg_type, | ||
| 641 | HVMSG_NONE) != old_msg_type) | ||
| 642 | return; | ||
| 626 | 643 | ||
| 627 | /* | 644 | /* |
| 628 | * Make sure the write to MessageType (ie set to | 645 | * Make sure the write to MessageType (ie set to |
| @@ -667,8 +684,6 @@ void vmbus_disconnect(void); | |||
| 667 | 684 | ||
| 668 | int vmbus_post_msg(void *buffer, size_t buflen); | 685 | int vmbus_post_msg(void *buffer, size_t buflen); |
| 669 | 686 | ||
| 670 | void vmbus_set_event(struct vmbus_channel *channel); | ||
| 671 | |||
| 672 | void vmbus_on_event(unsigned long data); | 687 | void vmbus_on_event(unsigned long data); |
| 673 | void vmbus_on_msg_dpc(unsigned long data); | 688 | void vmbus_on_msg_dpc(unsigned long data); |
| 674 | 689 | ||
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index a40a73a7b71d..fe586bf74e17 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
| @@ -33,25 +33,21 @@ | |||
| 33 | void hv_begin_read(struct hv_ring_buffer_info *rbi) | 33 | void hv_begin_read(struct hv_ring_buffer_info *rbi) |
| 34 | { | 34 | { |
| 35 | rbi->ring_buffer->interrupt_mask = 1; | 35 | rbi->ring_buffer->interrupt_mask = 1; |
| 36 | mb(); | 36 | virt_mb(); |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) | 39 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) |
| 40 | { | 40 | { |
| 41 | u32 read; | ||
| 42 | u32 write; | ||
| 43 | 41 | ||
| 44 | rbi->ring_buffer->interrupt_mask = 0; | 42 | rbi->ring_buffer->interrupt_mask = 0; |
| 45 | mb(); | 43 | virt_mb(); |
| 46 | 44 | ||
| 47 | /* | 45 | /* |
| 48 | * Now check to see if the ring buffer is still empty. | 46 | * Now check to see if the ring buffer is still empty. |
| 49 | * If it is not, we raced and we need to process new | 47 | * If it is not, we raced and we need to process new |
| 50 | * incoming messages. | 48 | * incoming messages. |
| 51 | */ | 49 | */ |
| 52 | hv_get_ringbuffer_availbytes(rbi, &read, &write); | 50 | return hv_get_bytes_to_read(rbi); |
| 53 | |||
| 54 | return read; | ||
| 55 | } | 51 | } |
| 56 | 52 | ||
| 57 | /* | 53 | /* |
| @@ -72,69 +68,17 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) | |||
| 72 | 68 | ||
| 73 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) | 69 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) |
| 74 | { | 70 | { |
| 75 | mb(); | 71 | virt_mb(); |
| 76 | if (rbi->ring_buffer->interrupt_mask) | 72 | if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) |
| 77 | return false; | 73 | return false; |
| 78 | 74 | ||
| 79 | /* check interrupt_mask before read_index */ | 75 | /* check interrupt_mask before read_index */ |
| 80 | rmb(); | 76 | virt_rmb(); |
| 81 | /* | 77 | /* |
| 82 | * This is the only case we need to signal when the | 78 | * This is the only case we need to signal when the |
| 83 | * ring transitions from being empty to non-empty. | 79 | * ring transitions from being empty to non-empty. |
| 84 | */ | 80 | */ |
| 85 | if (old_write == rbi->ring_buffer->read_index) | 81 | if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) |
| 86 | return true; | ||
| 87 | |||
| 88 | return false; | ||
| 89 | } | ||
| 90 | |||
| 91 | /* | ||
| 92 | * To optimize the flow management on the send-side, | ||
| 93 | * when the sender is blocked because of lack of | ||
| 94 | * sufficient space in the ring buffer, potential the | ||
| 95 | * consumer of the ring buffer can signal the producer. | ||
| 96 | * This is controlled by the following parameters: | ||
| 97 | * | ||
| 98 | * 1. pending_send_sz: This is the size in bytes that the | ||
| 99 | * producer is trying to send. | ||
| 100 | * 2. The feature bit feat_pending_send_sz set to indicate if | ||
| 101 | * the consumer of the ring will signal when the ring | ||
| 102 | * state transitions from being full to a state where | ||
| 103 | * there is room for the producer to send the pending packet. | ||
| 104 | */ | ||
| 105 | |||
| 106 | static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) | ||
| 107 | { | ||
| 108 | u32 cur_write_sz; | ||
| 109 | u32 r_size; | ||
| 110 | u32 write_loc; | ||
| 111 | u32 read_loc = rbi->ring_buffer->read_index; | ||
| 112 | u32 pending_sz; | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Issue a full memory barrier before making the signaling decision. | ||
| 116 | * Here is the reason for having this barrier: | ||
| 117 | * If the reading of the pend_sz (in this function) | ||
| 118 | * were to be reordered and read before we commit the new read | ||
| 119 | * index (in the calling function) we could | ||
| 120 | * have a problem. If the host were to set the pending_sz after we | ||
| 121 | * have sampled pending_sz and go to sleep before we commit the | ||
| 122 | * read index, we could miss sending the interrupt. Issue a full | ||
| 123 | * memory barrier to address this. | ||
| 124 | */ | ||
| 125 | mb(); | ||
| 126 | |||
| 127 | pending_sz = rbi->ring_buffer->pending_send_sz; | ||
| 128 | write_loc = rbi->ring_buffer->write_index; | ||
| 129 | /* If the other end is not blocked on write don't bother. */ | ||
| 130 | if (pending_sz == 0) | ||
| 131 | return false; | ||
| 132 | |||
| 133 | r_size = rbi->ring_datasize; | ||
| 134 | cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) : | ||
| 135 | read_loc - write_loc; | ||
| 136 | |||
| 137 | if (cur_write_sz >= pending_sz) | ||
| 138 | return true; | 82 | return true; |
| 139 | 83 | ||
| 140 | return false; | 84 | return false; |
| @@ -188,17 +132,9 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, | |||
| 188 | u32 next_read_location) | 132 | u32 next_read_location) |
| 189 | { | 133 | { |
| 190 | ring_info->ring_buffer->read_index = next_read_location; | 134 | ring_info->ring_buffer->read_index = next_read_location; |
| 135 | ring_info->priv_read_index = next_read_location; | ||
| 191 | } | 136 | } |
| 192 | 137 | ||
| 193 | |||
| 194 | /* Get the start of the ring buffer. */ | ||
| 195 | static inline void * | ||
| 196 | hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) | ||
| 197 | { | ||
| 198 | return (void *)ring_info->ring_buffer->buffer; | ||
| 199 | } | ||
| 200 | |||
| 201 | |||
| 202 | /* Get the size of the ring buffer. */ | 138 | /* Get the size of the ring buffer. */ |
| 203 | static inline u32 | 139 | static inline u32 |
| 204 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) | 140 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) |
| @@ -332,7 +268,6 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 332 | { | 268 | { |
| 333 | int i = 0; | 269 | int i = 0; |
| 334 | u32 bytes_avail_towrite; | 270 | u32 bytes_avail_towrite; |
| 335 | u32 bytes_avail_toread; | ||
| 336 | u32 totalbytes_towrite = 0; | 271 | u32 totalbytes_towrite = 0; |
| 337 | 272 | ||
| 338 | u32 next_write_location; | 273 | u32 next_write_location; |
| @@ -348,9 +283,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 348 | if (lock) | 283 | if (lock) |
| 349 | spin_lock_irqsave(&outring_info->ring_lock, flags); | 284 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
| 350 | 285 | ||
| 351 | hv_get_ringbuffer_availbytes(outring_info, | 286 | bytes_avail_towrite = hv_get_bytes_to_write(outring_info); |
| 352 | &bytes_avail_toread, | ||
| 353 | &bytes_avail_towrite); | ||
| 354 | 287 | ||
| 355 | /* | 288 | /* |
| 356 | * If there is only room for the packet, assume it is full. | 289 | * If there is only room for the packet, assume it is full. |
| @@ -384,7 +317,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 384 | sizeof(u64)); | 317 | sizeof(u64)); |
| 385 | 318 | ||
| 386 | /* Issue a full memory barrier before updating the write index */ | 319 | /* Issue a full memory barrier before updating the write index */ |
| 387 | mb(); | 320 | virt_mb(); |
| 388 | 321 | ||
| 389 | /* Now, update the write location */ | 322 | /* Now, update the write location */ |
| 390 | hv_set_next_write_location(outring_info, next_write_location); | 323 | hv_set_next_write_location(outring_info, next_write_location); |
| @@ -401,7 +334,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
| 401 | void *buffer, u32 buflen, u32 *buffer_actual_len, | 334 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
| 402 | u64 *requestid, bool *signal, bool raw) | 335 | u64 *requestid, bool *signal, bool raw) |
| 403 | { | 336 | { |
| 404 | u32 bytes_avail_towrite; | ||
| 405 | u32 bytes_avail_toread; | 337 | u32 bytes_avail_toread; |
| 406 | u32 next_read_location = 0; | 338 | u32 next_read_location = 0; |
| 407 | u64 prev_indices = 0; | 339 | u64 prev_indices = 0; |
| @@ -417,10 +349,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
| 417 | *buffer_actual_len = 0; | 349 | *buffer_actual_len = 0; |
| 418 | *requestid = 0; | 350 | *requestid = 0; |
| 419 | 351 | ||
| 420 | hv_get_ringbuffer_availbytes(inring_info, | 352 | bytes_avail_toread = hv_get_bytes_to_read(inring_info); |
| 421 | &bytes_avail_toread, | ||
| 422 | &bytes_avail_towrite); | ||
| 423 | |||
| 424 | /* Make sure there is something to read */ | 353 | /* Make sure there is something to read */ |
| 425 | if (bytes_avail_toread < sizeof(desc)) { | 354 | if (bytes_avail_toread < sizeof(desc)) { |
| 426 | /* | 355 | /* |
| @@ -464,7 +393,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, | |||
| 464 | * the writer may start writing to the read area once the read index | 393 | * the writer may start writing to the read area once the read index |
| 465 | * is updated. | 394 | * is updated. |
| 466 | */ | 395 | */ |
| 467 | mb(); | 396 | virt_mb(); |
| 468 | 397 | ||
| 469 | /* Update the read index */ | 398 | /* Update the read index */ |
| 470 | hv_set_next_read_location(inring_info, next_read_location); | 399 | hv_set_next_read_location(inring_info, next_read_location); |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 64713ff47e36..952f20fdc7e3 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <linux/ptrace.h> | 41 | #include <linux/ptrace.h> |
| 42 | #include <linux/screen_info.h> | 42 | #include <linux/screen_info.h> |
| 43 | #include <linux/kdebug.h> | 43 | #include <linux/kdebug.h> |
| 44 | #include <linux/efi.h> | ||
| 44 | #include "hyperv_vmbus.h" | 45 | #include "hyperv_vmbus.h" |
| 45 | 46 | ||
| 46 | static struct acpi_device *hv_acpi_dev; | 47 | static struct acpi_device *hv_acpi_dev; |
| @@ -101,7 +102,10 @@ static struct notifier_block hyperv_panic_block = { | |||
| 101 | .notifier_call = hyperv_panic_event, | 102 | .notifier_call = hyperv_panic_event, |
| 102 | }; | 103 | }; |
| 103 | 104 | ||
| 105 | static const char *fb_mmio_name = "fb_range"; | ||
| 106 | static struct resource *fb_mmio; | ||
| 104 | struct resource *hyperv_mmio; | 107 | struct resource *hyperv_mmio; |
| 108 | DEFINE_SEMAPHORE(hyperv_mmio_lock); | ||
| 105 | 109 | ||
| 106 | static int vmbus_exists(void) | 110 | static int vmbus_exists(void) |
| 107 | { | 111 | { |
| @@ -708,7 +712,7 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu) | |||
| 708 | if (dev->event_handler) | 712 | if (dev->event_handler) |
| 709 | dev->event_handler(dev); | 713 | dev->event_handler(dev); |
| 710 | 714 | ||
| 711 | vmbus_signal_eom(msg); | 715 | vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED); |
| 712 | } | 716 | } |
| 713 | 717 | ||
| 714 | void vmbus_on_msg_dpc(unsigned long data) | 718 | void vmbus_on_msg_dpc(unsigned long data) |
| @@ -720,8 +724,9 @@ void vmbus_on_msg_dpc(unsigned long data) | |||
| 720 | struct vmbus_channel_message_header *hdr; | 724 | struct vmbus_channel_message_header *hdr; |
| 721 | struct vmbus_channel_message_table_entry *entry; | 725 | struct vmbus_channel_message_table_entry *entry; |
| 722 | struct onmessage_work_context *ctx; | 726 | struct onmessage_work_context *ctx; |
| 727 | u32 message_type = msg->header.message_type; | ||
| 723 | 728 | ||
| 724 | if (msg->header.message_type == HVMSG_NONE) | 729 | if (message_type == HVMSG_NONE) |
| 725 | /* no msg */ | 730 | /* no msg */ |
| 726 | return; | 731 | return; |
| 727 | 732 | ||
| @@ -746,7 +751,7 @@ void vmbus_on_msg_dpc(unsigned long data) | |||
| 746 | entry->message_handler(hdr); | 751 | entry->message_handler(hdr); |
| 747 | 752 | ||
| 748 | msg_handled: | 753 | msg_handled: |
| 749 | vmbus_signal_eom(msg); | 754 | vmbus_signal_eom(msg, message_type); |
| 750 | } | 755 | } |
| 751 | 756 | ||
| 752 | static void vmbus_isr(void) | 757 | static void vmbus_isr(void) |
| @@ -1048,7 +1053,6 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) | |||
| 1048 | new_res->end = end; | 1053 | new_res->end = end; |
| 1049 | 1054 | ||
| 1050 | /* | 1055 | /* |
| 1051 | * Stick ranges from higher in address space at the front of the list. | ||
| 1052 | * If two ranges are adjacent, merge them. | 1056 | * If two ranges are adjacent, merge them. |
| 1053 | */ | 1057 | */ |
| 1054 | do { | 1058 | do { |
| @@ -1069,7 +1073,7 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) | |||
| 1069 | break; | 1073 | break; |
| 1070 | } | 1074 | } |
| 1071 | 1075 | ||
| 1072 | if ((*old_res)->end < new_res->start) { | 1076 | if ((*old_res)->start > new_res->end) { |
| 1073 | new_res->sibling = *old_res; | 1077 | new_res->sibling = *old_res; |
| 1074 | if (prev_res) | 1078 | if (prev_res) |
| 1075 | (*prev_res)->sibling = new_res; | 1079 | (*prev_res)->sibling = new_res; |
| @@ -1091,6 +1095,12 @@ static int vmbus_acpi_remove(struct acpi_device *device) | |||
| 1091 | struct resource *next_res; | 1095 | struct resource *next_res; |
| 1092 | 1096 | ||
| 1093 | if (hyperv_mmio) { | 1097 | if (hyperv_mmio) { |
| 1098 | if (fb_mmio) { | ||
| 1099 | __release_region(hyperv_mmio, fb_mmio->start, | ||
| 1100 | resource_size(fb_mmio)); | ||
| 1101 | fb_mmio = NULL; | ||
| 1102 | } | ||
| 1103 | |||
| 1094 | for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) { | 1104 | for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) { |
| 1095 | next_res = cur_res->sibling; | 1105 | next_res = cur_res->sibling; |
| 1096 | kfree(cur_res); | 1106 | kfree(cur_res); |
| @@ -1100,6 +1110,30 @@ static int vmbus_acpi_remove(struct acpi_device *device) | |||
| 1100 | return 0; | 1110 | return 0; |
| 1101 | } | 1111 | } |
| 1102 | 1112 | ||
| 1113 | static void vmbus_reserve_fb(void) | ||
| 1114 | { | ||
| 1115 | int size; | ||
| 1116 | /* | ||
| 1117 | * Make a claim for the frame buffer in the resource tree under the | ||
| 1118 | * first node, which will be the one below 4GB. The length seems to | ||
| 1119 | * be underreported, particularly in a Generation 1 VM. So start out | ||
| 1120 | * reserving a larger area and make it smaller until it succeeds. | ||
| 1121 | */ | ||
| 1122 | |||
| 1123 | if (screen_info.lfb_base) { | ||
| 1124 | if (efi_enabled(EFI_BOOT)) | ||
| 1125 | size = max_t(__u32, screen_info.lfb_size, 0x800000); | ||
| 1126 | else | ||
| 1127 | size = max_t(__u32, screen_info.lfb_size, 0x4000000); | ||
| 1128 | |||
| 1129 | for (; !fb_mmio && (size >= 0x100000); size >>= 1) { | ||
| 1130 | fb_mmio = __request_region(hyperv_mmio, | ||
| 1131 | screen_info.lfb_base, size, | ||
| 1132 | fb_mmio_name, 0); | ||
| 1133 | } | ||
| 1134 | } | ||
| 1135 | } | ||
| 1136 | |||
| 1103 | /** | 1137 | /** |
| 1104 | * vmbus_allocate_mmio() - Pick a memory-mapped I/O range. | 1138 | * vmbus_allocate_mmio() - Pick a memory-mapped I/O range. |
| 1105 | * @new: If successful, supplied a pointer to the | 1139 | * @new: If successful, supplied a pointer to the |
| @@ -1128,11 +1162,33 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, | |||
| 1128 | resource_size_t size, resource_size_t align, | 1162 | resource_size_t size, resource_size_t align, |
| 1129 | bool fb_overlap_ok) | 1163 | bool fb_overlap_ok) |
| 1130 | { | 1164 | { |
| 1131 | struct resource *iter; | 1165 | struct resource *iter, *shadow; |
| 1132 | resource_size_t range_min, range_max, start, local_min, local_max; | 1166 | resource_size_t range_min, range_max, start; |
| 1133 | const char *dev_n = dev_name(&device_obj->device); | 1167 | const char *dev_n = dev_name(&device_obj->device); |
| 1134 | u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1); | 1168 | int retval; |
| 1135 | int i; | 1169 | |
| 1170 | retval = -ENXIO; | ||
| 1171 | down(&hyperv_mmio_lock); | ||
| 1172 | |||
| 1173 | /* | ||
| 1174 | * If overlaps with frame buffers are allowed, then first attempt to | ||
| 1175 | * make the allocation from within the reserved region. Because it | ||
| 1176 | * is already reserved, no shadow allocation is necessary. | ||
| 1177 | */ | ||
| 1178 | if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) && | ||
| 1179 | !(max < fb_mmio->start)) { | ||
| 1180 | |||
| 1181 | range_min = fb_mmio->start; | ||
| 1182 | range_max = fb_mmio->end; | ||
| 1183 | start = (range_min + align - 1) & ~(align - 1); | ||
| 1184 | for (; start + size - 1 <= range_max; start += align) { | ||
| 1185 | *new = request_mem_region_exclusive(start, size, dev_n); | ||
| 1186 | if (*new) { | ||
| 1187 | retval = 0; | ||
| 1188 | goto exit; | ||
| 1189 | } | ||
| 1190 | } | ||
| 1191 | } | ||
| 1136 | 1192 | ||
| 1137 | for (iter = hyperv_mmio; iter; iter = iter->sibling) { | 1193 | for (iter = hyperv_mmio; iter; iter = iter->sibling) { |
| 1138 | if ((iter->start >= max) || (iter->end <= min)) | 1194 | if ((iter->start >= max) || (iter->end <= min)) |
| @@ -1140,46 +1196,56 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, | |||
| 1140 | 1196 | ||
| 1141 | range_min = iter->start; | 1197 | range_min = iter->start; |
| 1142 | range_max = iter->end; | 1198 | range_max = iter->end; |
| 1143 | 1199 | start = (range_min + align - 1) & ~(align - 1); | |
| 1144 | /* If this range overlaps the frame buffer, split it into | 1200 | for (; start + size - 1 <= range_max; start += align) { |
| 1145 | two tries. */ | 1201 | shadow = __request_region(iter, start, size, NULL, |
| 1146 | for (i = 0; i < 2; i++) { | 1202 | IORESOURCE_BUSY); |
| 1147 | local_min = range_min; | 1203 | if (!shadow) |
| 1148 | local_max = range_max; | 1204 | continue; |
| 1149 | if (fb_overlap_ok || (range_min >= fb_end) || | 1205 | |
| 1150 | (range_max <= screen_info.lfb_base)) { | 1206 | *new = request_mem_region_exclusive(start, size, dev_n); |
| 1151 | i++; | 1207 | if (*new) { |
| 1152 | } else { | 1208 | shadow->name = (char *)*new; |
| 1153 | if ((range_min <= screen_info.lfb_base) && | 1209 | retval = 0; |
| 1154 | (range_max >= screen_info.lfb_base)) { | 1210 | goto exit; |
| 1155 | /* | ||
| 1156 | * The frame buffer is in this window, | ||
| 1157 | * so trim this into the part that | ||
| 1158 | * preceeds the frame buffer. | ||
| 1159 | */ | ||
| 1160 | local_max = screen_info.lfb_base - 1; | ||
| 1161 | range_min = fb_end; | ||
| 1162 | } else { | ||
| 1163 | range_min = fb_end; | ||
| 1164 | continue; | ||
| 1165 | } | ||
| 1166 | } | 1211 | } |
| 1167 | 1212 | ||
| 1168 | start = (local_min + align - 1) & ~(align - 1); | 1213 | __release_region(iter, start, size); |
| 1169 | for (; start + size - 1 <= local_max; start += align) { | ||
| 1170 | *new = request_mem_region_exclusive(start, size, | ||
| 1171 | dev_n); | ||
| 1172 | if (*new) | ||
| 1173 | return 0; | ||
| 1174 | } | ||
| 1175 | } | 1214 | } |
| 1176 | } | 1215 | } |
| 1177 | 1216 | ||
| 1178 | return -ENXIO; | 1217 | exit: |
| 1218 | up(&hyperv_mmio_lock); | ||
| 1219 | return retval; | ||
| 1179 | } | 1220 | } |
| 1180 | EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); | 1221 | EXPORT_SYMBOL_GPL(vmbus_allocate_mmio); |
| 1181 | 1222 | ||
| 1182 | /** | 1223 | /** |
| 1224 | * vmbus_free_mmio() - Free a memory-mapped I/O range. | ||
| 1225 | * @start: Base address of region to release. | ||
| 1226 | * @size: Size of the range to be allocated | ||
| 1227 | * | ||
| 1228 | * This function releases anything requested by | ||
| 1229 | * vmbus_mmio_allocate(). | ||
| 1230 | */ | ||
| 1231 | void vmbus_free_mmio(resource_size_t start, resource_size_t size) | ||
| 1232 | { | ||
| 1233 | struct resource *iter; | ||
| 1234 | |||
| 1235 | down(&hyperv_mmio_lock); | ||
| 1236 | for (iter = hyperv_mmio; iter; iter = iter->sibling) { | ||
| 1237 | if ((iter->start >= start + size) || (iter->end <= start)) | ||
| 1238 | continue; | ||
| 1239 | |||
| 1240 | __release_region(iter, start, size); | ||
| 1241 | } | ||
| 1242 | release_mem_region(start, size); | ||
| 1243 | up(&hyperv_mmio_lock); | ||
| 1244 | |||
| 1245 | } | ||
| 1246 | EXPORT_SYMBOL_GPL(vmbus_free_mmio); | ||
| 1247 | |||
| 1248 | /** | ||
| 1183 | * vmbus_cpu_number_to_vp_number() - Map CPU to VP. | 1249 | * vmbus_cpu_number_to_vp_number() - Map CPU to VP. |
| 1184 | * @cpu_number: CPU number in Linux terms | 1250 | * @cpu_number: CPU number in Linux terms |
| 1185 | * | 1251 | * |
| @@ -1219,8 +1285,10 @@ static int vmbus_acpi_add(struct acpi_device *device) | |||
| 1219 | 1285 | ||
| 1220 | if (ACPI_FAILURE(result)) | 1286 | if (ACPI_FAILURE(result)) |
| 1221 | continue; | 1287 | continue; |
| 1222 | if (hyperv_mmio) | 1288 | if (hyperv_mmio) { |
| 1289 | vmbus_reserve_fb(); | ||
| 1223 | break; | 1290 | break; |
| 1291 | } | ||
| 1224 | } | 1292 | } |
| 1225 | ret_val = 0; | 1293 | ret_val = 0; |
| 1226 | 1294 | ||
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index db0541031c72..130cb2114059 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig | |||
| @@ -78,4 +78,15 @@ config CORESIGHT_QCOM_REPLICATOR | |||
| 78 | programmable ATB replicator sends the ATB trace stream from the | 78 | programmable ATB replicator sends the ATB trace stream from the |
| 79 | ETB/ETF to the TPIUi and ETR. | 79 | ETB/ETF to the TPIUi and ETR. |
| 80 | 80 | ||
| 81 | config CORESIGHT_STM | ||
| 82 | bool "CoreSight System Trace Macrocell driver" | ||
| 83 | depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64 | ||
| 84 | select CORESIGHT_LINKS_AND_SINKS | ||
| 85 | select STM | ||
| 86 | help | ||
| 87 | This driver provides support for hardware assisted software | ||
| 88 | instrumentation based tracing. This is primarily used for | ||
| 89 | logging useful software events or data coming from various entities | ||
| 90 | in the system, possibly running different OSs | ||
| 91 | |||
| 81 | endif | 92 | endif |
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index cf8c6d689747..af480d9c1441 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile | |||
| @@ -1,15 +1,18 @@ | |||
| 1 | # | 1 | # |
| 2 | # Makefile for CoreSight drivers. | 2 | # Makefile for CoreSight drivers. |
| 3 | # | 3 | # |
| 4 | obj-$(CONFIG_CORESIGHT) += coresight.o | 4 | obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o |
| 5 | obj-$(CONFIG_OF) += of_coresight.o | 5 | obj-$(CONFIG_OF) += of_coresight.o |
| 6 | obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o | 6 | obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \ |
| 7 | coresight-tmc-etf.o \ | ||
| 8 | coresight-tmc-etr.o | ||
| 7 | obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o | 9 | obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o |
| 8 | obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o | 10 | obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o |
| 9 | obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ | 11 | obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ |
| 10 | coresight-replicator.o | 12 | coresight-replicator.o |
| 11 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \ | 13 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \ |
| 12 | coresight-etm3x-sysfs.o \ | 14 | coresight-etm3x-sysfs.o |
| 13 | coresight-etm-perf.o | 15 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \ |
| 14 | obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o | 16 | coresight-etm4x-sysfs.o |
| 15 | obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o | 17 | obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o |
| 18 | obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o | ||
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index acbce79934d6..4d20b0be0c0b 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c | |||
| @@ -71,26 +71,6 @@ | |||
| 71 | #define ETB_FRAME_SIZE_WORDS 4 | 71 | #define ETB_FRAME_SIZE_WORDS 4 |
| 72 | 72 | ||
| 73 | /** | 73 | /** |
| 74 | * struct cs_buffer - keep track of a recording session' specifics | ||
| 75 | * @cur: index of the current buffer | ||
| 76 | * @nr_pages: max number of pages granted to us | ||
| 77 | * @offset: offset within the current buffer | ||
| 78 | * @data_size: how much we collected in this run | ||
| 79 | * @lost: other than zero if we had a HW buffer wrap around | ||
| 80 | * @snapshot: is this run in snapshot mode | ||
| 81 | * @data_pages: a handle the ring buffer | ||
| 82 | */ | ||
| 83 | struct cs_buffers { | ||
| 84 | unsigned int cur; | ||
| 85 | unsigned int nr_pages; | ||
| 86 | unsigned long offset; | ||
| 87 | local_t data_size; | ||
| 88 | local_t lost; | ||
| 89 | bool snapshot; | ||
| 90 | void **data_pages; | ||
| 91 | }; | ||
| 92 | |||
| 93 | /** | ||
| 94 | * struct etb_drvdata - specifics associated to an ETB component | 74 | * struct etb_drvdata - specifics associated to an ETB component |
| 95 | * @base: memory mapped base address for this component. | 75 | * @base: memory mapped base address for this component. |
| 96 | * @dev: the device entity associated to this component. | 76 | * @dev: the device entity associated to this component. |
| @@ -440,7 +420,7 @@ static void etb_update_buffer(struct coresight_device *csdev, | |||
| 440 | u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1); | 420 | u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1); |
| 441 | 421 | ||
| 442 | /* The new read pointer must be frame size aligned */ | 422 | /* The new read pointer must be frame size aligned */ |
| 443 | to_read -= handle->size & mask; | 423 | to_read = handle->size & mask; |
| 444 | /* | 424 | /* |
| 445 | * Move the RAM read pointer up, keeping in mind that | 425 | * Move the RAM read pointer up, keeping in mind that |
| 446 | * everything is in frame size units. | 426 | * everything is in frame size units. |
| @@ -448,7 +428,8 @@ static void etb_update_buffer(struct coresight_device *csdev, | |||
| 448 | read_ptr = (write_ptr + drvdata->buffer_depth) - | 428 | read_ptr = (write_ptr + drvdata->buffer_depth) - |
| 449 | to_read / ETB_FRAME_SIZE_WORDS; | 429 | to_read / ETB_FRAME_SIZE_WORDS; |
| 450 | /* Wrap around if need be*/ | 430 | /* Wrap around if need be*/ |
| 451 | read_ptr &= ~(drvdata->buffer_depth - 1); | 431 | if (read_ptr > (drvdata->buffer_depth - 1)) |
| 432 | read_ptr -= drvdata->buffer_depth; | ||
| 452 | /* let the decoder know we've skipped ahead */ | 433 | /* let the decoder know we've skipped ahead */ |
| 453 | local_inc(&buf->lost); | 434 | local_inc(&buf->lost); |
| 454 | } | 435 | } |
| @@ -579,47 +560,29 @@ static const struct file_operations etb_fops = { | |||
| 579 | .llseek = no_llseek, | 560 | .llseek = no_llseek, |
| 580 | }; | 561 | }; |
| 581 | 562 | ||
| 582 | static ssize_t status_show(struct device *dev, | 563 | #define coresight_etb10_simple_func(name, offset) \ |
| 583 | struct device_attribute *attr, char *buf) | 564 | coresight_simple_func(struct etb_drvdata, name, offset) |
| 584 | { | 565 | |
| 585 | unsigned long flags; | 566 | coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG); |
| 586 | u32 etb_rdr, etb_sr, etb_rrp, etb_rwp; | 567 | coresight_etb10_simple_func(sts, ETB_STATUS_REG); |
| 587 | u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr; | 568 | coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER); |
| 588 | struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); | 569 | coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER); |
| 589 | 570 | coresight_etb10_simple_func(trg, ETB_TRG); | |
| 590 | pm_runtime_get_sync(drvdata->dev); | 571 | coresight_etb10_simple_func(ctl, ETB_CTL_REG); |
| 591 | spin_lock_irqsave(&drvdata->spinlock, flags); | 572 | coresight_etb10_simple_func(ffsr, ETB_FFSR); |
| 592 | CS_UNLOCK(drvdata->base); | 573 | coresight_etb10_simple_func(ffcr, ETB_FFCR); |
| 593 | 574 | ||
| 594 | etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); | 575 | static struct attribute *coresight_etb_mgmt_attrs[] = { |
| 595 | etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG); | 576 | &dev_attr_rdp.attr, |
| 596 | etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); | 577 | &dev_attr_sts.attr, |
| 597 | etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); | 578 | &dev_attr_rrp.attr, |
| 598 | etb_trg = readl_relaxed(drvdata->base + ETB_TRG); | 579 | &dev_attr_rwp.attr, |
| 599 | etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG); | 580 | &dev_attr_trg.attr, |
| 600 | etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR); | 581 | &dev_attr_ctl.attr, |
| 601 | etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR); | 582 | &dev_attr_ffsr.attr, |
| 602 | 583 | &dev_attr_ffcr.attr, | |
| 603 | CS_LOCK(drvdata->base); | 584 | NULL, |
| 604 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | 585 | }; |
| 605 | |||
| 606 | pm_runtime_put(drvdata->dev); | ||
| 607 | |||
| 608 | return sprintf(buf, | ||
| 609 | "Depth:\t\t0x%x\n" | ||
| 610 | "Status:\t\t0x%x\n" | ||
| 611 | "RAM read ptr:\t0x%x\n" | ||
| 612 | "RAM wrt ptr:\t0x%x\n" | ||
| 613 | "Trigger cnt:\t0x%x\n" | ||
| 614 | "Control:\t0x%x\n" | ||
| 615 | "Flush status:\t0x%x\n" | ||
| 616 | "Flush ctrl:\t0x%x\n", | ||
| 617 | etb_rdr, etb_sr, etb_rrp, etb_rwp, | ||
| 618 | etb_trg, etb_cr, etb_ffsr, etb_ffcr); | ||
| 619 | |||
| 620 | return -EINVAL; | ||
| 621 | } | ||
| 622 | static DEVICE_ATTR_RO(status); | ||
| 623 | 586 | ||
| 624 | static ssize_t trigger_cntr_show(struct device *dev, | 587 | static ssize_t trigger_cntr_show(struct device *dev, |
| 625 | struct device_attribute *attr, char *buf) | 588 | struct device_attribute *attr, char *buf) |
| @@ -649,10 +612,23 @@ static DEVICE_ATTR_RW(trigger_cntr); | |||
| 649 | 612 | ||
| 650 | static struct attribute *coresight_etb_attrs[] = { | 613 | static struct attribute *coresight_etb_attrs[] = { |
| 651 | &dev_attr_trigger_cntr.attr, | 614 | &dev_attr_trigger_cntr.attr, |
| 652 | &dev_attr_status.attr, | ||
| 653 | NULL, | 615 | NULL, |
| 654 | }; | 616 | }; |
| 655 | ATTRIBUTE_GROUPS(coresight_etb); | 617 | |
| 618 | static const struct attribute_group coresight_etb_group = { | ||
| 619 | .attrs = coresight_etb_attrs, | ||
| 620 | }; | ||
| 621 | |||
| 622 | static const struct attribute_group coresight_etb_mgmt_group = { | ||
| 623 | .attrs = coresight_etb_mgmt_attrs, | ||
| 624 | .name = "mgmt", | ||
| 625 | }; | ||
| 626 | |||
| 627 | const struct attribute_group *coresight_etb_groups[] = { | ||
| 628 | &coresight_etb_group, | ||
| 629 | &coresight_etb_mgmt_group, | ||
| 630 | NULL, | ||
| 631 | }; | ||
| 656 | 632 | ||
| 657 | static int etb_probe(struct amba_device *adev, const struct amba_id *id) | 633 | static int etb_probe(struct amba_device *adev, const struct amba_id *id) |
| 658 | { | 634 | { |
| @@ -729,7 +705,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 729 | if (ret) | 705 | if (ret) |
| 730 | goto err_misc_register; | 706 | goto err_misc_register; |
| 731 | 707 | ||
| 732 | dev_info(dev, "ETB initialized\n"); | ||
| 733 | return 0; | 708 | return 0; |
| 734 | 709 | ||
| 735 | err_misc_register: | 710 | err_misc_register: |
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c index cbb4046c1070..02d4b629891f 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c | |||
| @@ -1221,26 +1221,19 @@ static struct attribute *coresight_etm_attrs[] = { | |||
| 1221 | NULL, | 1221 | NULL, |
| 1222 | }; | 1222 | }; |
| 1223 | 1223 | ||
| 1224 | #define coresight_simple_func(name, offset) \ | 1224 | #define coresight_etm3x_simple_func(name, offset) \ |
| 1225 | static ssize_t name##_show(struct device *_dev, \ | 1225 | coresight_simple_func(struct etm_drvdata, name, offset) |
| 1226 | struct device_attribute *attr, char *buf) \ | 1226 | |
| 1227 | { \ | 1227 | coresight_etm3x_simple_func(etmccr, ETMCCR); |
| 1228 | struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ | 1228 | coresight_etm3x_simple_func(etmccer, ETMCCER); |
| 1229 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | 1229 | coresight_etm3x_simple_func(etmscr, ETMSCR); |
| 1230 | readl_relaxed(drvdata->base + offset)); \ | 1230 | coresight_etm3x_simple_func(etmidr, ETMIDR); |
| 1231 | } \ | 1231 | coresight_etm3x_simple_func(etmcr, ETMCR); |
| 1232 | DEVICE_ATTR_RO(name) | 1232 | coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR); |
| 1233 | 1233 | coresight_etm3x_simple_func(etmteevr, ETMTEEVR); | |
| 1234 | coresight_simple_func(etmccr, ETMCCR); | 1234 | coresight_etm3x_simple_func(etmtssvr, ETMTSSCR); |
| 1235 | coresight_simple_func(etmccer, ETMCCER); | 1235 | coresight_etm3x_simple_func(etmtecr1, ETMTECR1); |
| 1236 | coresight_simple_func(etmscr, ETMSCR); | 1236 | coresight_etm3x_simple_func(etmtecr2, ETMTECR2); |
| 1237 | coresight_simple_func(etmidr, ETMIDR); | ||
| 1238 | coresight_simple_func(etmcr, ETMCR); | ||
| 1239 | coresight_simple_func(etmtraceidr, ETMTRACEIDR); | ||
| 1240 | coresight_simple_func(etmteevr, ETMTEEVR); | ||
| 1241 | coresight_simple_func(etmtssvr, ETMTSSCR); | ||
| 1242 | coresight_simple_func(etmtecr1, ETMTECR1); | ||
| 1243 | coresight_simple_func(etmtecr2, ETMTECR2); | ||
| 1244 | 1237 | ||
| 1245 | static struct attribute *coresight_etm_mgmt_attrs[] = { | 1238 | static struct attribute *coresight_etm_mgmt_attrs[] = { |
| 1246 | &dev_attr_etmccr.attr, | 1239 | &dev_attr_etmccr.attr, |
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c new file mode 100644 index 000000000000..7c84308c5564 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c | |||
| @@ -0,0 +1,2126 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
| 3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/pm_runtime.h> | ||
| 19 | #include <linux/sysfs.h> | ||
| 20 | #include "coresight-etm4x.h" | ||
| 21 | |||
| 22 | static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) | ||
| 23 | { | ||
| 24 | u8 idx; | ||
| 25 | struct etmv4_config *config = &drvdata->config; | ||
| 26 | |||
| 27 | idx = config->addr_idx; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * TRCACATRn.TYPE bit[1:0]: type of comparison | ||
| 31 | * the trace unit performs | ||
| 32 | */ | ||
| 33 | if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { | ||
| 34 | if (idx % 2 != 0) | ||
| 35 | return -EINVAL; | ||
| 36 | |||
| 37 | /* | ||
| 38 | * We are performing instruction address comparison. Set the | ||
| 39 | * relevant bit of ViewInst Include/Exclude Control register | ||
| 40 | * for corresponding address comparator pair. | ||
| 41 | */ | ||
| 42 | if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE || | ||
| 43 | config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) | ||
| 44 | return -EINVAL; | ||
| 45 | |||
| 46 | if (exclude == true) { | ||
| 47 | /* | ||
| 48 | * Set exclude bit and unset the include bit | ||
| 49 | * corresponding to comparator pair | ||
| 50 | */ | ||
| 51 | config->viiectlr |= BIT(idx / 2 + 16); | ||
| 52 | config->viiectlr &= ~BIT(idx / 2); | ||
| 53 | } else { | ||
| 54 | /* | ||
| 55 | * Set include bit and unset exclude bit | ||
| 56 | * corresponding to comparator pair | ||
| 57 | */ | ||
| 58 | config->viiectlr |= BIT(idx / 2); | ||
| 59 | config->viiectlr &= ~BIT(idx / 2 + 16); | ||
| 60 | } | ||
| 61 | } | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | |||
| 65 | static ssize_t nr_pe_cmp_show(struct device *dev, | ||
| 66 | struct device_attribute *attr, | ||
| 67 | char *buf) | ||
| 68 | { | ||
| 69 | unsigned long val; | ||
| 70 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 71 | |||
| 72 | val = drvdata->nr_pe_cmp; | ||
| 73 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 74 | } | ||
| 75 | static DEVICE_ATTR_RO(nr_pe_cmp); | ||
| 76 | |||
| 77 | static ssize_t nr_addr_cmp_show(struct device *dev, | ||
| 78 | struct device_attribute *attr, | ||
| 79 | char *buf) | ||
| 80 | { | ||
| 81 | unsigned long val; | ||
| 82 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 83 | |||
| 84 | val = drvdata->nr_addr_cmp; | ||
| 85 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 86 | } | ||
| 87 | static DEVICE_ATTR_RO(nr_addr_cmp); | ||
| 88 | |||
| 89 | static ssize_t nr_cntr_show(struct device *dev, | ||
| 90 | struct device_attribute *attr, | ||
| 91 | char *buf) | ||
| 92 | { | ||
| 93 | unsigned long val; | ||
| 94 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 95 | |||
| 96 | val = drvdata->nr_cntr; | ||
| 97 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 98 | } | ||
| 99 | static DEVICE_ATTR_RO(nr_cntr); | ||
| 100 | |||
| 101 | static ssize_t nr_ext_inp_show(struct device *dev, | ||
| 102 | struct device_attribute *attr, | ||
| 103 | char *buf) | ||
| 104 | { | ||
| 105 | unsigned long val; | ||
| 106 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 107 | |||
| 108 | val = drvdata->nr_ext_inp; | ||
| 109 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 110 | } | ||
| 111 | static DEVICE_ATTR_RO(nr_ext_inp); | ||
| 112 | |||
| 113 | static ssize_t numcidc_show(struct device *dev, | ||
| 114 | struct device_attribute *attr, | ||
| 115 | char *buf) | ||
| 116 | { | ||
| 117 | unsigned long val; | ||
| 118 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 119 | |||
| 120 | val = drvdata->numcidc; | ||
| 121 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 122 | } | ||
| 123 | static DEVICE_ATTR_RO(numcidc); | ||
| 124 | |||
| 125 | static ssize_t numvmidc_show(struct device *dev, | ||
| 126 | struct device_attribute *attr, | ||
| 127 | char *buf) | ||
| 128 | { | ||
| 129 | unsigned long val; | ||
| 130 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 131 | |||
| 132 | val = drvdata->numvmidc; | ||
| 133 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 134 | } | ||
| 135 | static DEVICE_ATTR_RO(numvmidc); | ||
| 136 | |||
| 137 | static ssize_t nrseqstate_show(struct device *dev, | ||
| 138 | struct device_attribute *attr, | ||
| 139 | char *buf) | ||
| 140 | { | ||
| 141 | unsigned long val; | ||
| 142 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 143 | |||
| 144 | val = drvdata->nrseqstate; | ||
| 145 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 146 | } | ||
| 147 | static DEVICE_ATTR_RO(nrseqstate); | ||
| 148 | |||
| 149 | static ssize_t nr_resource_show(struct device *dev, | ||
| 150 | struct device_attribute *attr, | ||
| 151 | char *buf) | ||
| 152 | { | ||
| 153 | unsigned long val; | ||
| 154 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 155 | |||
| 156 | val = drvdata->nr_resource; | ||
| 157 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 158 | } | ||
| 159 | static DEVICE_ATTR_RO(nr_resource); | ||
| 160 | |||
| 161 | static ssize_t nr_ss_cmp_show(struct device *dev, | ||
| 162 | struct device_attribute *attr, | ||
| 163 | char *buf) | ||
| 164 | { | ||
| 165 | unsigned long val; | ||
| 166 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 167 | |||
| 168 | val = drvdata->nr_ss_cmp; | ||
| 169 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 170 | } | ||
| 171 | static DEVICE_ATTR_RO(nr_ss_cmp); | ||
| 172 | |||
| 173 | static ssize_t reset_store(struct device *dev, | ||
| 174 | struct device_attribute *attr, | ||
| 175 | const char *buf, size_t size) | ||
| 176 | { | ||
| 177 | int i; | ||
| 178 | unsigned long val; | ||
| 179 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 180 | struct etmv4_config *config = &drvdata->config; | ||
| 181 | |||
| 182 | if (kstrtoul(buf, 16, &val)) | ||
| 183 | return -EINVAL; | ||
| 184 | |||
| 185 | spin_lock(&drvdata->spinlock); | ||
| 186 | if (val) | ||
| 187 | config->mode = 0x0; | ||
| 188 | |||
| 189 | /* Disable data tracing: do not trace load and store data transfers */ | ||
| 190 | config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); | ||
| 191 | config->cfg &= ~(BIT(1) | BIT(2)); | ||
| 192 | |||
| 193 | /* Disable data value and data address tracing */ | ||
| 194 | config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | | ||
| 195 | ETM_MODE_DATA_TRACE_VAL); | ||
| 196 | config->cfg &= ~(BIT(16) | BIT(17)); | ||
| 197 | |||
| 198 | /* Disable all events tracing */ | ||
| 199 | config->eventctrl0 = 0x0; | ||
| 200 | config->eventctrl1 = 0x0; | ||
| 201 | |||
| 202 | /* Disable timestamp event */ | ||
| 203 | config->ts_ctrl = 0x0; | ||
| 204 | |||
| 205 | /* Disable stalling */ | ||
| 206 | config->stall_ctrl = 0x0; | ||
| 207 | |||
| 208 | /* Reset trace synchronization period to 2^8 = 256 bytes*/ | ||
| 209 | if (drvdata->syncpr == false) | ||
| 210 | config->syncfreq = 0x8; | ||
| 211 | |||
| 212 | /* | ||
| 213 | * Enable ViewInst to trace everything with start-stop logic in | ||
| 214 | * started state. ARM recommends start-stop logic is set before | ||
| 215 | * each trace run. | ||
| 216 | */ | ||
| 217 | config->vinst_ctrl |= BIT(0); | ||
| 218 | if (drvdata->nr_addr_cmp == true) { | ||
| 219 | config->mode |= ETM_MODE_VIEWINST_STARTSTOP; | ||
| 220 | /* SSSTATUS, bit[9] */ | ||
| 221 | config->vinst_ctrl |= BIT(9); | ||
| 222 | } | ||
| 223 | |||
| 224 | /* No address range filtering for ViewInst */ | ||
| 225 | config->viiectlr = 0x0; | ||
| 226 | |||
| 227 | /* No start-stop filtering for ViewInst */ | ||
| 228 | config->vissctlr = 0x0; | ||
| 229 | |||
| 230 | /* Disable seq events */ | ||
| 231 | for (i = 0; i < drvdata->nrseqstate-1; i++) | ||
| 232 | config->seq_ctrl[i] = 0x0; | ||
| 233 | config->seq_rst = 0x0; | ||
| 234 | config->seq_state = 0x0; | ||
| 235 | |||
| 236 | /* Disable external input events */ | ||
| 237 | config->ext_inp = 0x0; | ||
| 238 | |||
| 239 | config->cntr_idx = 0x0; | ||
| 240 | for (i = 0; i < drvdata->nr_cntr; i++) { | ||
| 241 | config->cntrldvr[i] = 0x0; | ||
| 242 | config->cntr_ctrl[i] = 0x0; | ||
| 243 | config->cntr_val[i] = 0x0; | ||
| 244 | } | ||
| 245 | |||
| 246 | config->res_idx = 0x0; | ||
| 247 | for (i = 0; i < drvdata->nr_resource; i++) | ||
| 248 | config->res_ctrl[i] = 0x0; | ||
| 249 | |||
| 250 | for (i = 0; i < drvdata->nr_ss_cmp; i++) { | ||
| 251 | config->ss_ctrl[i] = 0x0; | ||
| 252 | config->ss_pe_cmp[i] = 0x0; | ||
| 253 | } | ||
| 254 | |||
| 255 | config->addr_idx = 0x0; | ||
| 256 | for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { | ||
| 257 | config->addr_val[i] = 0x0; | ||
| 258 | config->addr_acc[i] = 0x0; | ||
| 259 | config->addr_type[i] = ETM_ADDR_TYPE_NONE; | ||
| 260 | } | ||
| 261 | |||
| 262 | config->ctxid_idx = 0x0; | ||
| 263 | for (i = 0; i < drvdata->numcidc; i++) { | ||
| 264 | config->ctxid_pid[i] = 0x0; | ||
| 265 | config->ctxid_vpid[i] = 0x0; | ||
| 266 | } | ||
| 267 | |||
| 268 | config->ctxid_mask0 = 0x0; | ||
| 269 | config->ctxid_mask1 = 0x0; | ||
| 270 | |||
| 271 | config->vmid_idx = 0x0; | ||
| 272 | for (i = 0; i < drvdata->numvmidc; i++) | ||
| 273 | config->vmid_val[i] = 0x0; | ||
| 274 | config->vmid_mask0 = 0x0; | ||
| 275 | config->vmid_mask1 = 0x0; | ||
| 276 | |||
| 277 | drvdata->trcid = drvdata->cpu + 1; | ||
| 278 | |||
| 279 | spin_unlock(&drvdata->spinlock); | ||
| 280 | |||
| 281 | return size; | ||
| 282 | } | ||
| 283 | static DEVICE_ATTR_WO(reset); | ||
| 284 | |||
| 285 | static ssize_t mode_show(struct device *dev, | ||
| 286 | struct device_attribute *attr, | ||
| 287 | char *buf) | ||
| 288 | { | ||
| 289 | unsigned long val; | ||
| 290 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 291 | struct etmv4_config *config = &drvdata->config; | ||
| 292 | |||
| 293 | val = config->mode; | ||
| 294 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 295 | } | ||
| 296 | |||
| 297 | static ssize_t mode_store(struct device *dev, | ||
| 298 | struct device_attribute *attr, | ||
| 299 | const char *buf, size_t size) | ||
| 300 | { | ||
| 301 | unsigned long val, mode; | ||
| 302 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 303 | struct etmv4_config *config = &drvdata->config; | ||
| 304 | |||
| 305 | if (kstrtoul(buf, 16, &val)) | ||
| 306 | return -EINVAL; | ||
| 307 | |||
| 308 | spin_lock(&drvdata->spinlock); | ||
| 309 | config->mode = val & ETMv4_MODE_ALL; | ||
| 310 | |||
| 311 | if (config->mode & ETM_MODE_EXCLUDE) | ||
| 312 | etm4_set_mode_exclude(drvdata, true); | ||
| 313 | else | ||
| 314 | etm4_set_mode_exclude(drvdata, false); | ||
| 315 | |||
| 316 | if (drvdata->instrp0 == true) { | ||
| 317 | /* start by clearing instruction P0 field */ | ||
| 318 | config->cfg &= ~(BIT(1) | BIT(2)); | ||
| 319 | if (config->mode & ETM_MODE_LOAD) | ||
| 320 | /* 0b01 Trace load instructions as P0 instructions */ | ||
| 321 | config->cfg |= BIT(1); | ||
| 322 | if (config->mode & ETM_MODE_STORE) | ||
| 323 | /* 0b10 Trace store instructions as P0 instructions */ | ||
| 324 | config->cfg |= BIT(2); | ||
| 325 | if (config->mode & ETM_MODE_LOAD_STORE) | ||
| 326 | /* | ||
| 327 | * 0b11 Trace load and store instructions | ||
| 328 | * as P0 instructions | ||
| 329 | */ | ||
| 330 | config->cfg |= BIT(1) | BIT(2); | ||
| 331 | } | ||
| 332 | |||
| 333 | /* bit[3], Branch broadcast mode */ | ||
| 334 | if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) | ||
| 335 | config->cfg |= BIT(3); | ||
| 336 | else | ||
| 337 | config->cfg &= ~BIT(3); | ||
| 338 | |||
| 339 | /* bit[4], Cycle counting instruction trace bit */ | ||
| 340 | if ((config->mode & ETMv4_MODE_CYCACC) && | ||
| 341 | (drvdata->trccci == true)) | ||
| 342 | config->cfg |= BIT(4); | ||
| 343 | else | ||
| 344 | config->cfg &= ~BIT(4); | ||
| 345 | |||
| 346 | /* bit[6], Context ID tracing bit */ | ||
| 347 | if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) | ||
| 348 | config->cfg |= BIT(6); | ||
| 349 | else | ||
| 350 | config->cfg &= ~BIT(6); | ||
| 351 | |||
| 352 | if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) | ||
| 353 | config->cfg |= BIT(7); | ||
| 354 | else | ||
| 355 | config->cfg &= ~BIT(7); | ||
| 356 | |||
| 357 | /* bits[10:8], Conditional instruction tracing bit */ | ||
| 358 | mode = ETM_MODE_COND(config->mode); | ||
| 359 | if (drvdata->trccond == true) { | ||
| 360 | config->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); | ||
| 361 | config->cfg |= mode << 8; | ||
| 362 | } | ||
| 363 | |||
| 364 | /* bit[11], Global timestamp tracing bit */ | ||
| 365 | if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) | ||
| 366 | config->cfg |= BIT(11); | ||
| 367 | else | ||
| 368 | config->cfg &= ~BIT(11); | ||
| 369 | |||
| 370 | /* bit[12], Return stack enable bit */ | ||
| 371 | if ((config->mode & ETM_MODE_RETURNSTACK) && | ||
| 372 | (drvdata->retstack == true)) | ||
| 373 | config->cfg |= BIT(12); | ||
| 374 | else | ||
| 375 | config->cfg &= ~BIT(12); | ||
| 376 | |||
| 377 | /* bits[14:13], Q element enable field */ | ||
| 378 | mode = ETM_MODE_QELEM(config->mode); | ||
| 379 | /* start by clearing QE bits */ | ||
| 380 | config->cfg &= ~(BIT(13) | BIT(14)); | ||
| 381 | /* if supported, Q elements with instruction counts are enabled */ | ||
| 382 | if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) | ||
| 383 | config->cfg |= BIT(13); | ||
| 384 | /* | ||
| 385 | * if supported, Q elements with and without instruction | ||
| 386 | * counts are enabled | ||
| 387 | */ | ||
| 388 | if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) | ||
| 389 | config->cfg |= BIT(14); | ||
| 390 | |||
| 391 | /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ | ||
| 392 | if ((config->mode & ETM_MODE_ATB_TRIGGER) && | ||
| 393 | (drvdata->atbtrig == true)) | ||
| 394 | config->eventctrl1 |= BIT(11); | ||
| 395 | else | ||
| 396 | config->eventctrl1 &= ~BIT(11); | ||
| 397 | |||
| 398 | /* bit[12], Low-power state behavior override bit */ | ||
| 399 | if ((config->mode & ETM_MODE_LPOVERRIDE) && | ||
| 400 | (drvdata->lpoverride == true)) | ||
| 401 | config->eventctrl1 |= BIT(12); | ||
| 402 | else | ||
| 403 | config->eventctrl1 &= ~BIT(12); | ||
| 404 | |||
| 405 | /* bit[8], Instruction stall bit */ | ||
| 406 | if (config->mode & ETM_MODE_ISTALL_EN) | ||
| 407 | config->stall_ctrl |= BIT(8); | ||
| 408 | else | ||
| 409 | config->stall_ctrl &= ~BIT(8); | ||
| 410 | |||
| 411 | /* bit[10], Prioritize instruction trace bit */ | ||
| 412 | if (config->mode & ETM_MODE_INSTPRIO) | ||
| 413 | config->stall_ctrl |= BIT(10); | ||
| 414 | else | ||
| 415 | config->stall_ctrl &= ~BIT(10); | ||
| 416 | |||
| 417 | /* bit[13], Trace overflow prevention bit */ | ||
| 418 | if ((config->mode & ETM_MODE_NOOVERFLOW) && | ||
| 419 | (drvdata->nooverflow == true)) | ||
| 420 | config->stall_ctrl |= BIT(13); | ||
| 421 | else | ||
| 422 | config->stall_ctrl &= ~BIT(13); | ||
| 423 | |||
| 424 | /* bit[9] Start/stop logic control bit */ | ||
| 425 | if (config->mode & ETM_MODE_VIEWINST_STARTSTOP) | ||
| 426 | config->vinst_ctrl |= BIT(9); | ||
| 427 | else | ||
| 428 | config->vinst_ctrl &= ~BIT(9); | ||
| 429 | |||
| 430 | /* bit[10], Whether a trace unit must trace a Reset exception */ | ||
| 431 | if (config->mode & ETM_MODE_TRACE_RESET) | ||
| 432 | config->vinst_ctrl |= BIT(10); | ||
| 433 | else | ||
| 434 | config->vinst_ctrl &= ~BIT(10); | ||
| 435 | |||
| 436 | /* bit[11], Whether a trace unit must trace a system error exception */ | ||
| 437 | if ((config->mode & ETM_MODE_TRACE_ERR) && | ||
| 438 | (drvdata->trc_error == true)) | ||
| 439 | config->vinst_ctrl |= BIT(11); | ||
| 440 | else | ||
| 441 | config->vinst_ctrl &= ~BIT(11); | ||
| 442 | |||
| 443 | if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) | ||
| 444 | etm4_config_trace_mode(config); | ||
| 445 | |||
| 446 | spin_unlock(&drvdata->spinlock); | ||
| 447 | |||
| 448 | return size; | ||
| 449 | } | ||
| 450 | static DEVICE_ATTR_RW(mode); | ||
| 451 | |||
| 452 | static ssize_t pe_show(struct device *dev, | ||
| 453 | struct device_attribute *attr, | ||
| 454 | char *buf) | ||
| 455 | { | ||
| 456 | unsigned long val; | ||
| 457 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 458 | struct etmv4_config *config = &drvdata->config; | ||
| 459 | |||
| 460 | val = config->pe_sel; | ||
| 461 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 462 | } | ||
| 463 | |||
| 464 | static ssize_t pe_store(struct device *dev, | ||
| 465 | struct device_attribute *attr, | ||
| 466 | const char *buf, size_t size) | ||
| 467 | { | ||
| 468 | unsigned long val; | ||
| 469 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 470 | struct etmv4_config *config = &drvdata->config; | ||
| 471 | |||
| 472 | if (kstrtoul(buf, 16, &val)) | ||
| 473 | return -EINVAL; | ||
| 474 | |||
| 475 | spin_lock(&drvdata->spinlock); | ||
| 476 | if (val > drvdata->nr_pe) { | ||
| 477 | spin_unlock(&drvdata->spinlock); | ||
| 478 | return -EINVAL; | ||
| 479 | } | ||
| 480 | |||
| 481 | config->pe_sel = val; | ||
| 482 | spin_unlock(&drvdata->spinlock); | ||
| 483 | return size; | ||
| 484 | } | ||
| 485 | static DEVICE_ATTR_RW(pe); | ||
| 486 | |||
| 487 | static ssize_t event_show(struct device *dev, | ||
| 488 | struct device_attribute *attr, | ||
| 489 | char *buf) | ||
| 490 | { | ||
| 491 | unsigned long val; | ||
| 492 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 493 | struct etmv4_config *config = &drvdata->config; | ||
| 494 | |||
| 495 | val = config->eventctrl0; | ||
| 496 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 497 | } | ||
| 498 | |||
| 499 | static ssize_t event_store(struct device *dev, | ||
| 500 | struct device_attribute *attr, | ||
| 501 | const char *buf, size_t size) | ||
| 502 | { | ||
| 503 | unsigned long val; | ||
| 504 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 505 | struct etmv4_config *config = &drvdata->config; | ||
| 506 | |||
| 507 | if (kstrtoul(buf, 16, &val)) | ||
| 508 | return -EINVAL; | ||
| 509 | |||
| 510 | spin_lock(&drvdata->spinlock); | ||
| 511 | switch (drvdata->nr_event) { | ||
| 512 | case 0x0: | ||
| 513 | /* EVENT0, bits[7:0] */ | ||
| 514 | config->eventctrl0 = val & 0xFF; | ||
| 515 | break; | ||
| 516 | case 0x1: | ||
| 517 | /* EVENT1, bits[15:8] */ | ||
| 518 | config->eventctrl0 = val & 0xFFFF; | ||
| 519 | break; | ||
| 520 | case 0x2: | ||
| 521 | /* EVENT2, bits[23:16] */ | ||
| 522 | config->eventctrl0 = val & 0xFFFFFF; | ||
| 523 | break; | ||
| 524 | case 0x3: | ||
| 525 | /* EVENT3, bits[31:24] */ | ||
| 526 | config->eventctrl0 = val; | ||
| 527 | break; | ||
| 528 | default: | ||
| 529 | break; | ||
| 530 | } | ||
| 531 | spin_unlock(&drvdata->spinlock); | ||
| 532 | return size; | ||
| 533 | } | ||
| 534 | static DEVICE_ATTR_RW(event); | ||
| 535 | |||
| 536 | static ssize_t event_instren_show(struct device *dev, | ||
| 537 | struct device_attribute *attr, | ||
| 538 | char *buf) | ||
| 539 | { | ||
| 540 | unsigned long val; | ||
| 541 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 542 | struct etmv4_config *config = &drvdata->config; | ||
| 543 | |||
| 544 | val = BMVAL(config->eventctrl1, 0, 3); | ||
| 545 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 546 | } | ||
| 547 | |||
| 548 | static ssize_t event_instren_store(struct device *dev, | ||
| 549 | struct device_attribute *attr, | ||
| 550 | const char *buf, size_t size) | ||
| 551 | { | ||
| 552 | unsigned long val; | ||
| 553 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 554 | struct etmv4_config *config = &drvdata->config; | ||
| 555 | |||
| 556 | if (kstrtoul(buf, 16, &val)) | ||
| 557 | return -EINVAL; | ||
| 558 | |||
| 559 | spin_lock(&drvdata->spinlock); | ||
| 560 | /* start by clearing all instruction event enable bits */ | ||
| 561 | config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); | ||
| 562 | switch (drvdata->nr_event) { | ||
| 563 | case 0x0: | ||
| 564 | /* generate Event element for event 1 */ | ||
| 565 | config->eventctrl1 |= val & BIT(1); | ||
| 566 | break; | ||
| 567 | case 0x1: | ||
| 568 | /* generate Event element for event 1 and 2 */ | ||
| 569 | config->eventctrl1 |= val & (BIT(0) | BIT(1)); | ||
| 570 | break; | ||
| 571 | case 0x2: | ||
| 572 | /* generate Event element for event 1, 2 and 3 */ | ||
| 573 | config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); | ||
| 574 | break; | ||
| 575 | case 0x3: | ||
| 576 | /* generate Event element for all 4 events */ | ||
| 577 | config->eventctrl1 |= val & 0xF; | ||
| 578 | break; | ||
| 579 | default: | ||
| 580 | break; | ||
| 581 | } | ||
| 582 | spin_unlock(&drvdata->spinlock); | ||
| 583 | return size; | ||
| 584 | } | ||
| 585 | static DEVICE_ATTR_RW(event_instren); | ||
| 586 | |||
| 587 | static ssize_t event_ts_show(struct device *dev, | ||
| 588 | struct device_attribute *attr, | ||
| 589 | char *buf) | ||
| 590 | { | ||
| 591 | unsigned long val; | ||
| 592 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 593 | struct etmv4_config *config = &drvdata->config; | ||
| 594 | |||
| 595 | val = config->ts_ctrl; | ||
| 596 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 597 | } | ||
| 598 | |||
| 599 | static ssize_t event_ts_store(struct device *dev, | ||
| 600 | struct device_attribute *attr, | ||
| 601 | const char *buf, size_t size) | ||
| 602 | { | ||
| 603 | unsigned long val; | ||
| 604 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 605 | struct etmv4_config *config = &drvdata->config; | ||
| 606 | |||
| 607 | if (kstrtoul(buf, 16, &val)) | ||
| 608 | return -EINVAL; | ||
| 609 | if (!drvdata->ts_size) | ||
| 610 | return -EINVAL; | ||
| 611 | |||
| 612 | config->ts_ctrl = val & ETMv4_EVENT_MASK; | ||
| 613 | return size; | ||
| 614 | } | ||
| 615 | static DEVICE_ATTR_RW(event_ts); | ||
| 616 | |||
| 617 | static ssize_t syncfreq_show(struct device *dev, | ||
| 618 | struct device_attribute *attr, | ||
| 619 | char *buf) | ||
| 620 | { | ||
| 621 | unsigned long val; | ||
| 622 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 623 | struct etmv4_config *config = &drvdata->config; | ||
| 624 | |||
| 625 | val = config->syncfreq; | ||
| 626 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 627 | } | ||
| 628 | |||
| 629 | static ssize_t syncfreq_store(struct device *dev, | ||
| 630 | struct device_attribute *attr, | ||
| 631 | const char *buf, size_t size) | ||
| 632 | { | ||
| 633 | unsigned long val; | ||
| 634 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 635 | struct etmv4_config *config = &drvdata->config; | ||
| 636 | |||
| 637 | if (kstrtoul(buf, 16, &val)) | ||
| 638 | return -EINVAL; | ||
| 639 | if (drvdata->syncpr == true) | ||
| 640 | return -EINVAL; | ||
| 641 | |||
| 642 | config->syncfreq = val & ETMv4_SYNC_MASK; | ||
| 643 | return size; | ||
| 644 | } | ||
| 645 | static DEVICE_ATTR_RW(syncfreq); | ||
| 646 | |||
| 647 | static ssize_t cyc_threshold_show(struct device *dev, | ||
| 648 | struct device_attribute *attr, | ||
| 649 | char *buf) | ||
| 650 | { | ||
| 651 | unsigned long val; | ||
| 652 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 653 | struct etmv4_config *config = &drvdata->config; | ||
| 654 | |||
| 655 | val = config->ccctlr; | ||
| 656 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 657 | } | ||
| 658 | |||
| 659 | static ssize_t cyc_threshold_store(struct device *dev, | ||
| 660 | struct device_attribute *attr, | ||
| 661 | const char *buf, size_t size) | ||
| 662 | { | ||
| 663 | unsigned long val; | ||
| 664 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 665 | struct etmv4_config *config = &drvdata->config; | ||
| 666 | |||
| 667 | if (kstrtoul(buf, 16, &val)) | ||
| 668 | return -EINVAL; | ||
| 669 | if (val < drvdata->ccitmin) | ||
| 670 | return -EINVAL; | ||
| 671 | |||
| 672 | config->ccctlr = val & ETM_CYC_THRESHOLD_MASK; | ||
| 673 | return size; | ||
| 674 | } | ||
| 675 | static DEVICE_ATTR_RW(cyc_threshold); | ||
| 676 | |||
| 677 | static ssize_t bb_ctrl_show(struct device *dev, | ||
| 678 | struct device_attribute *attr, | ||
| 679 | char *buf) | ||
| 680 | { | ||
| 681 | unsigned long val; | ||
| 682 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 683 | struct etmv4_config *config = &drvdata->config; | ||
| 684 | |||
| 685 | val = config->bb_ctrl; | ||
| 686 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 687 | } | ||
| 688 | |||
| 689 | static ssize_t bb_ctrl_store(struct device *dev, | ||
| 690 | struct device_attribute *attr, | ||
| 691 | const char *buf, size_t size) | ||
| 692 | { | ||
| 693 | unsigned long val; | ||
| 694 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 695 | struct etmv4_config *config = &drvdata->config; | ||
| 696 | |||
| 697 | if (kstrtoul(buf, 16, &val)) | ||
| 698 | return -EINVAL; | ||
| 699 | if (drvdata->trcbb == false) | ||
| 700 | return -EINVAL; | ||
| 701 | if (!drvdata->nr_addr_cmp) | ||
| 702 | return -EINVAL; | ||
| 703 | /* | ||
| 704 | * Bit[7:0] selects which address range comparator is used for | ||
| 705 | * branch broadcast control. | ||
| 706 | */ | ||
| 707 | if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) | ||
| 708 | return -EINVAL; | ||
| 709 | |||
| 710 | config->bb_ctrl = val; | ||
| 711 | return size; | ||
| 712 | } | ||
| 713 | static DEVICE_ATTR_RW(bb_ctrl); | ||
| 714 | |||
| 715 | static ssize_t event_vinst_show(struct device *dev, | ||
| 716 | struct device_attribute *attr, | ||
| 717 | char *buf) | ||
| 718 | { | ||
| 719 | unsigned long val; | ||
| 720 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 721 | struct etmv4_config *config = &drvdata->config; | ||
| 722 | |||
| 723 | val = config->vinst_ctrl & ETMv4_EVENT_MASK; | ||
| 724 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 725 | } | ||
| 726 | |||
| 727 | static ssize_t event_vinst_store(struct device *dev, | ||
| 728 | struct device_attribute *attr, | ||
| 729 | const char *buf, size_t size) | ||
| 730 | { | ||
| 731 | unsigned long val; | ||
| 732 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 733 | struct etmv4_config *config = &drvdata->config; | ||
| 734 | |||
| 735 | if (kstrtoul(buf, 16, &val)) | ||
| 736 | return -EINVAL; | ||
| 737 | |||
| 738 | spin_lock(&drvdata->spinlock); | ||
| 739 | val &= ETMv4_EVENT_MASK; | ||
| 740 | config->vinst_ctrl &= ~ETMv4_EVENT_MASK; | ||
| 741 | config->vinst_ctrl |= val; | ||
| 742 | spin_unlock(&drvdata->spinlock); | ||
| 743 | return size; | ||
| 744 | } | ||
| 745 | static DEVICE_ATTR_RW(event_vinst); | ||
| 746 | |||
| 747 | static ssize_t s_exlevel_vinst_show(struct device *dev, | ||
| 748 | struct device_attribute *attr, | ||
| 749 | char *buf) | ||
| 750 | { | ||
| 751 | unsigned long val; | ||
| 752 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 753 | struct etmv4_config *config = &drvdata->config; | ||
| 754 | |||
| 755 | val = BMVAL(config->vinst_ctrl, 16, 19); | ||
| 756 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 757 | } | ||
| 758 | |||
| 759 | static ssize_t s_exlevel_vinst_store(struct device *dev, | ||
| 760 | struct device_attribute *attr, | ||
| 761 | const char *buf, size_t size) | ||
| 762 | { | ||
| 763 | unsigned long val; | ||
| 764 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 765 | struct etmv4_config *config = &drvdata->config; | ||
| 766 | |||
| 767 | if (kstrtoul(buf, 16, &val)) | ||
| 768 | return -EINVAL; | ||
| 769 | |||
| 770 | spin_lock(&drvdata->spinlock); | ||
| 771 | /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ | ||
| 772 | config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); | ||
| 773 | /* enable instruction tracing for corresponding exception level */ | ||
| 774 | val &= drvdata->s_ex_level; | ||
| 775 | config->vinst_ctrl |= (val << 16); | ||
| 776 | spin_unlock(&drvdata->spinlock); | ||
| 777 | return size; | ||
| 778 | } | ||
| 779 | static DEVICE_ATTR_RW(s_exlevel_vinst); | ||
| 780 | |||
| 781 | static ssize_t ns_exlevel_vinst_show(struct device *dev, | ||
| 782 | struct device_attribute *attr, | ||
| 783 | char *buf) | ||
| 784 | { | ||
| 785 | unsigned long val; | ||
| 786 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 787 | struct etmv4_config *config = &drvdata->config; | ||
| 788 | |||
| 789 | /* EXLEVEL_NS, bits[23:20] */ | ||
| 790 | val = BMVAL(config->vinst_ctrl, 20, 23); | ||
| 791 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 792 | } | ||
| 793 | |||
| 794 | static ssize_t ns_exlevel_vinst_store(struct device *dev, | ||
| 795 | struct device_attribute *attr, | ||
| 796 | const char *buf, size_t size) | ||
| 797 | { | ||
| 798 | unsigned long val; | ||
| 799 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 800 | struct etmv4_config *config = &drvdata->config; | ||
| 801 | |||
| 802 | if (kstrtoul(buf, 16, &val)) | ||
| 803 | return -EINVAL; | ||
| 804 | |||
| 805 | spin_lock(&drvdata->spinlock); | ||
| 806 | /* clear EXLEVEL_NS bits (bit[23] is never implemented */ | ||
| 807 | config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); | ||
| 808 | /* enable instruction tracing for corresponding exception level */ | ||
| 809 | val &= drvdata->ns_ex_level; | ||
| 810 | config->vinst_ctrl |= (val << 20); | ||
| 811 | spin_unlock(&drvdata->spinlock); | ||
| 812 | return size; | ||
| 813 | } | ||
| 814 | static DEVICE_ATTR_RW(ns_exlevel_vinst); | ||
| 815 | |||
| 816 | static ssize_t addr_idx_show(struct device *dev, | ||
| 817 | struct device_attribute *attr, | ||
| 818 | char *buf) | ||
| 819 | { | ||
| 820 | unsigned long val; | ||
| 821 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 822 | struct etmv4_config *config = &drvdata->config; | ||
| 823 | |||
| 824 | val = config->addr_idx; | ||
| 825 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 826 | } | ||
| 827 | |||
| 828 | static ssize_t addr_idx_store(struct device *dev, | ||
| 829 | struct device_attribute *attr, | ||
| 830 | const char *buf, size_t size) | ||
| 831 | { | ||
| 832 | unsigned long val; | ||
| 833 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 834 | struct etmv4_config *config = &drvdata->config; | ||
| 835 | |||
| 836 | if (kstrtoul(buf, 16, &val)) | ||
| 837 | return -EINVAL; | ||
| 838 | if (val >= drvdata->nr_addr_cmp * 2) | ||
| 839 | return -EINVAL; | ||
| 840 | |||
| 841 | /* | ||
| 842 | * Use spinlock to ensure index doesn't change while it gets | ||
| 843 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 844 | */ | ||
| 845 | spin_lock(&drvdata->spinlock); | ||
| 846 | config->addr_idx = val; | ||
| 847 | spin_unlock(&drvdata->spinlock); | ||
| 848 | return size; | ||
| 849 | } | ||
| 850 | static DEVICE_ATTR_RW(addr_idx); | ||
| 851 | |||
| 852 | static ssize_t addr_instdatatype_show(struct device *dev, | ||
| 853 | struct device_attribute *attr, | ||
| 854 | char *buf) | ||
| 855 | { | ||
| 856 | ssize_t len; | ||
| 857 | u8 val, idx; | ||
| 858 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 859 | struct etmv4_config *config = &drvdata->config; | ||
| 860 | |||
| 861 | spin_lock(&drvdata->spinlock); | ||
| 862 | idx = config->addr_idx; | ||
| 863 | val = BMVAL(config->addr_acc[idx], 0, 1); | ||
| 864 | len = scnprintf(buf, PAGE_SIZE, "%s\n", | ||
| 865 | val == ETM_INSTR_ADDR ? "instr" : | ||
| 866 | (val == ETM_DATA_LOAD_ADDR ? "data_load" : | ||
| 867 | (val == ETM_DATA_STORE_ADDR ? "data_store" : | ||
| 868 | "data_load_store"))); | ||
| 869 | spin_unlock(&drvdata->spinlock); | ||
| 870 | return len; | ||
| 871 | } | ||
| 872 | |||
| 873 | static ssize_t addr_instdatatype_store(struct device *dev, | ||
| 874 | struct device_attribute *attr, | ||
| 875 | const char *buf, size_t size) | ||
| 876 | { | ||
| 877 | u8 idx; | ||
| 878 | char str[20] = ""; | ||
| 879 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 880 | struct etmv4_config *config = &drvdata->config; | ||
| 881 | |||
| 882 | if (strlen(buf) >= 20) | ||
| 883 | return -EINVAL; | ||
| 884 | if (sscanf(buf, "%s", str) != 1) | ||
| 885 | return -EINVAL; | ||
| 886 | |||
| 887 | spin_lock(&drvdata->spinlock); | ||
| 888 | idx = config->addr_idx; | ||
| 889 | if (!strcmp(str, "instr")) | ||
| 890 | /* TYPE, bits[1:0] */ | ||
| 891 | config->addr_acc[idx] &= ~(BIT(0) | BIT(1)); | ||
| 892 | |||
| 893 | spin_unlock(&drvdata->spinlock); | ||
| 894 | return size; | ||
| 895 | } | ||
| 896 | static DEVICE_ATTR_RW(addr_instdatatype); | ||
| 897 | |||
| 898 | static ssize_t addr_single_show(struct device *dev, | ||
| 899 | struct device_attribute *attr, | ||
| 900 | char *buf) | ||
| 901 | { | ||
| 902 | u8 idx; | ||
| 903 | unsigned long val; | ||
| 904 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 905 | struct etmv4_config *config = &drvdata->config; | ||
| 906 | |||
| 907 | idx = config->addr_idx; | ||
| 908 | spin_lock(&drvdata->spinlock); | ||
| 909 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 910 | config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
| 911 | spin_unlock(&drvdata->spinlock); | ||
| 912 | return -EPERM; | ||
| 913 | } | ||
| 914 | val = (unsigned long)config->addr_val[idx]; | ||
| 915 | spin_unlock(&drvdata->spinlock); | ||
| 916 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 917 | } | ||
| 918 | |||
| 919 | static ssize_t addr_single_store(struct device *dev, | ||
| 920 | struct device_attribute *attr, | ||
| 921 | const char *buf, size_t size) | ||
| 922 | { | ||
| 923 | u8 idx; | ||
| 924 | unsigned long val; | ||
| 925 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 926 | struct etmv4_config *config = &drvdata->config; | ||
| 927 | |||
| 928 | if (kstrtoul(buf, 16, &val)) | ||
| 929 | return -EINVAL; | ||
| 930 | |||
| 931 | spin_lock(&drvdata->spinlock); | ||
| 932 | idx = config->addr_idx; | ||
| 933 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 934 | config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
| 935 | spin_unlock(&drvdata->spinlock); | ||
| 936 | return -EPERM; | ||
| 937 | } | ||
| 938 | |||
| 939 | config->addr_val[idx] = (u64)val; | ||
| 940 | config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; | ||
| 941 | spin_unlock(&drvdata->spinlock); | ||
| 942 | return size; | ||
| 943 | } | ||
| 944 | static DEVICE_ATTR_RW(addr_single); | ||
| 945 | |||
| 946 | static ssize_t addr_range_show(struct device *dev, | ||
| 947 | struct device_attribute *attr, | ||
| 948 | char *buf) | ||
| 949 | { | ||
| 950 | u8 idx; | ||
| 951 | unsigned long val1, val2; | ||
| 952 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 953 | struct etmv4_config *config = &drvdata->config; | ||
| 954 | |||
| 955 | spin_lock(&drvdata->spinlock); | ||
| 956 | idx = config->addr_idx; | ||
| 957 | if (idx % 2 != 0) { | ||
| 958 | spin_unlock(&drvdata->spinlock); | ||
| 959 | return -EPERM; | ||
| 960 | } | ||
| 961 | if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
| 962 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
| 963 | (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
| 964 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
| 965 | spin_unlock(&drvdata->spinlock); | ||
| 966 | return -EPERM; | ||
| 967 | } | ||
| 968 | |||
| 969 | val1 = (unsigned long)config->addr_val[idx]; | ||
| 970 | val2 = (unsigned long)config->addr_val[idx + 1]; | ||
| 971 | spin_unlock(&drvdata->spinlock); | ||
| 972 | return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); | ||
| 973 | } | ||
| 974 | |||
| 975 | static ssize_t addr_range_store(struct device *dev, | ||
| 976 | struct device_attribute *attr, | ||
| 977 | const char *buf, size_t size) | ||
| 978 | { | ||
| 979 | u8 idx; | ||
| 980 | unsigned long val1, val2; | ||
| 981 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 982 | struct etmv4_config *config = &drvdata->config; | ||
| 983 | |||
| 984 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
| 985 | return -EINVAL; | ||
| 986 | /* lower address comparator cannot have a higher address value */ | ||
| 987 | if (val1 > val2) | ||
| 988 | return -EINVAL; | ||
| 989 | |||
| 990 | spin_lock(&drvdata->spinlock); | ||
| 991 | idx = config->addr_idx; | ||
| 992 | if (idx % 2 != 0) { | ||
| 993 | spin_unlock(&drvdata->spinlock); | ||
| 994 | return -EPERM; | ||
| 995 | } | ||
| 996 | |||
| 997 | if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
| 998 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
| 999 | (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
| 1000 | config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
| 1001 | spin_unlock(&drvdata->spinlock); | ||
| 1002 | return -EPERM; | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | config->addr_val[idx] = (u64)val1; | ||
| 1006 | config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; | ||
| 1007 | config->addr_val[idx + 1] = (u64)val2; | ||
| 1008 | config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; | ||
| 1009 | /* | ||
| 1010 | * Program include or exclude control bits for vinst or vdata | ||
| 1011 | * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE | ||
| 1012 | */ | ||
| 1013 | if (config->mode & ETM_MODE_EXCLUDE) | ||
| 1014 | etm4_set_mode_exclude(drvdata, true); | ||
| 1015 | else | ||
| 1016 | etm4_set_mode_exclude(drvdata, false); | ||
| 1017 | |||
| 1018 | spin_unlock(&drvdata->spinlock); | ||
| 1019 | return size; | ||
| 1020 | } | ||
| 1021 | static DEVICE_ATTR_RW(addr_range); | ||
| 1022 | |||
| 1023 | static ssize_t addr_start_show(struct device *dev, | ||
| 1024 | struct device_attribute *attr, | ||
| 1025 | char *buf) | ||
| 1026 | { | ||
| 1027 | u8 idx; | ||
| 1028 | unsigned long val; | ||
| 1029 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1030 | struct etmv4_config *config = &drvdata->config; | ||
| 1031 | |||
| 1032 | spin_lock(&drvdata->spinlock); | ||
| 1033 | idx = config->addr_idx; | ||
| 1034 | |||
| 1035 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1036 | config->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
| 1037 | spin_unlock(&drvdata->spinlock); | ||
| 1038 | return -EPERM; | ||
| 1039 | } | ||
| 1040 | |||
| 1041 | val = (unsigned long)config->addr_val[idx]; | ||
| 1042 | spin_unlock(&drvdata->spinlock); | ||
| 1043 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | static ssize_t addr_start_store(struct device *dev, | ||
| 1047 | struct device_attribute *attr, | ||
| 1048 | const char *buf, size_t size) | ||
| 1049 | { | ||
| 1050 | u8 idx; | ||
| 1051 | unsigned long val; | ||
| 1052 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1053 | struct etmv4_config *config = &drvdata->config; | ||
| 1054 | |||
| 1055 | if (kstrtoul(buf, 16, &val)) | ||
| 1056 | return -EINVAL; | ||
| 1057 | |||
| 1058 | spin_lock(&drvdata->spinlock); | ||
| 1059 | idx = config->addr_idx; | ||
| 1060 | if (!drvdata->nr_addr_cmp) { | ||
| 1061 | spin_unlock(&drvdata->spinlock); | ||
| 1062 | return -EINVAL; | ||
| 1063 | } | ||
| 1064 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1065 | config->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
| 1066 | spin_unlock(&drvdata->spinlock); | ||
| 1067 | return -EPERM; | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | config->addr_val[idx] = (u64)val; | ||
| 1071 | config->addr_type[idx] = ETM_ADDR_TYPE_START; | ||
| 1072 | config->vissctlr |= BIT(idx); | ||
| 1073 | /* SSSTATUS, bit[9] - turn on start/stop logic */ | ||
| 1074 | config->vinst_ctrl |= BIT(9); | ||
| 1075 | spin_unlock(&drvdata->spinlock); | ||
| 1076 | return size; | ||
| 1077 | } | ||
| 1078 | static DEVICE_ATTR_RW(addr_start); | ||
| 1079 | |||
| 1080 | static ssize_t addr_stop_show(struct device *dev, | ||
| 1081 | struct device_attribute *attr, | ||
| 1082 | char *buf) | ||
| 1083 | { | ||
| 1084 | u8 idx; | ||
| 1085 | unsigned long val; | ||
| 1086 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1087 | struct etmv4_config *config = &drvdata->config; | ||
| 1088 | |||
| 1089 | spin_lock(&drvdata->spinlock); | ||
| 1090 | idx = config->addr_idx; | ||
| 1091 | |||
| 1092 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1093 | config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
| 1094 | spin_unlock(&drvdata->spinlock); | ||
| 1095 | return -EPERM; | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | val = (unsigned long)config->addr_val[idx]; | ||
| 1099 | spin_unlock(&drvdata->spinlock); | ||
| 1100 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | static ssize_t addr_stop_store(struct device *dev, | ||
| 1104 | struct device_attribute *attr, | ||
| 1105 | const char *buf, size_t size) | ||
| 1106 | { | ||
| 1107 | u8 idx; | ||
| 1108 | unsigned long val; | ||
| 1109 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1110 | struct etmv4_config *config = &drvdata->config; | ||
| 1111 | |||
| 1112 | if (kstrtoul(buf, 16, &val)) | ||
| 1113 | return -EINVAL; | ||
| 1114 | |||
| 1115 | spin_lock(&drvdata->spinlock); | ||
| 1116 | idx = config->addr_idx; | ||
| 1117 | if (!drvdata->nr_addr_cmp) { | ||
| 1118 | spin_unlock(&drvdata->spinlock); | ||
| 1119 | return -EINVAL; | ||
| 1120 | } | ||
| 1121 | if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1122 | config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
| 1123 | spin_unlock(&drvdata->spinlock); | ||
| 1124 | return -EPERM; | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | config->addr_val[idx] = (u64)val; | ||
| 1128 | config->addr_type[idx] = ETM_ADDR_TYPE_STOP; | ||
| 1129 | config->vissctlr |= BIT(idx + 16); | ||
| 1130 | /* SSSTATUS, bit[9] - turn on start/stop logic */ | ||
| 1131 | config->vinst_ctrl |= BIT(9); | ||
| 1132 | spin_unlock(&drvdata->spinlock); | ||
| 1133 | return size; | ||
| 1134 | } | ||
| 1135 | static DEVICE_ATTR_RW(addr_stop); | ||
| 1136 | |||
| 1137 | static ssize_t addr_ctxtype_show(struct device *dev, | ||
| 1138 | struct device_attribute *attr, | ||
| 1139 | char *buf) | ||
| 1140 | { | ||
| 1141 | ssize_t len; | ||
| 1142 | u8 idx, val; | ||
| 1143 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1144 | struct etmv4_config *config = &drvdata->config; | ||
| 1145 | |||
| 1146 | spin_lock(&drvdata->spinlock); | ||
| 1147 | idx = config->addr_idx; | ||
| 1148 | /* CONTEXTTYPE, bits[3:2] */ | ||
| 1149 | val = BMVAL(config->addr_acc[idx], 2, 3); | ||
| 1150 | len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : | ||
| 1151 | (val == ETM_CTX_CTXID ? "ctxid" : | ||
| 1152 | (val == ETM_CTX_VMID ? "vmid" : "all"))); | ||
| 1153 | spin_unlock(&drvdata->spinlock); | ||
| 1154 | return len; | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | static ssize_t addr_ctxtype_store(struct device *dev, | ||
| 1158 | struct device_attribute *attr, | ||
| 1159 | const char *buf, size_t size) | ||
| 1160 | { | ||
| 1161 | u8 idx; | ||
| 1162 | char str[10] = ""; | ||
| 1163 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1164 | struct etmv4_config *config = &drvdata->config; | ||
| 1165 | |||
| 1166 | if (strlen(buf) >= 10) | ||
| 1167 | return -EINVAL; | ||
| 1168 | if (sscanf(buf, "%s", str) != 1) | ||
| 1169 | return -EINVAL; | ||
| 1170 | |||
| 1171 | spin_lock(&drvdata->spinlock); | ||
| 1172 | idx = config->addr_idx; | ||
| 1173 | if (!strcmp(str, "none")) | ||
| 1174 | /* start by clearing context type bits */ | ||
| 1175 | config->addr_acc[idx] &= ~(BIT(2) | BIT(3)); | ||
| 1176 | else if (!strcmp(str, "ctxid")) { | ||
| 1177 | /* 0b01 The trace unit performs a Context ID */ | ||
| 1178 | if (drvdata->numcidc) { | ||
| 1179 | config->addr_acc[idx] |= BIT(2); | ||
| 1180 | config->addr_acc[idx] &= ~BIT(3); | ||
| 1181 | } | ||
| 1182 | } else if (!strcmp(str, "vmid")) { | ||
| 1183 | /* 0b10 The trace unit performs a VMID */ | ||
| 1184 | if (drvdata->numvmidc) { | ||
| 1185 | config->addr_acc[idx] &= ~BIT(2); | ||
| 1186 | config->addr_acc[idx] |= BIT(3); | ||
| 1187 | } | ||
| 1188 | } else if (!strcmp(str, "all")) { | ||
| 1189 | /* | ||
| 1190 | * 0b11 The trace unit performs a Context ID | ||
| 1191 | * comparison and a VMID | ||
| 1192 | */ | ||
| 1193 | if (drvdata->numcidc) | ||
| 1194 | config->addr_acc[idx] |= BIT(2); | ||
| 1195 | if (drvdata->numvmidc) | ||
| 1196 | config->addr_acc[idx] |= BIT(3); | ||
| 1197 | } | ||
| 1198 | spin_unlock(&drvdata->spinlock); | ||
| 1199 | return size; | ||
| 1200 | } | ||
| 1201 | static DEVICE_ATTR_RW(addr_ctxtype); | ||
| 1202 | |||
| 1203 | static ssize_t addr_context_show(struct device *dev, | ||
| 1204 | struct device_attribute *attr, | ||
| 1205 | char *buf) | ||
| 1206 | { | ||
| 1207 | u8 idx; | ||
| 1208 | unsigned long val; | ||
| 1209 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1210 | struct etmv4_config *config = &drvdata->config; | ||
| 1211 | |||
| 1212 | spin_lock(&drvdata->spinlock); | ||
| 1213 | idx = config->addr_idx; | ||
| 1214 | /* context ID comparator bits[6:4] */ | ||
| 1215 | val = BMVAL(config->addr_acc[idx], 4, 6); | ||
| 1216 | spin_unlock(&drvdata->spinlock); | ||
| 1217 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | static ssize_t addr_context_store(struct device *dev, | ||
| 1221 | struct device_attribute *attr, | ||
| 1222 | const char *buf, size_t size) | ||
| 1223 | { | ||
| 1224 | u8 idx; | ||
| 1225 | unsigned long val; | ||
| 1226 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1227 | struct etmv4_config *config = &drvdata->config; | ||
| 1228 | |||
| 1229 | if (kstrtoul(buf, 16, &val)) | ||
| 1230 | return -EINVAL; | ||
| 1231 | if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1)) | ||
| 1232 | return -EINVAL; | ||
| 1233 | if (val >= (drvdata->numcidc >= drvdata->numvmidc ? | ||
| 1234 | drvdata->numcidc : drvdata->numvmidc)) | ||
| 1235 | return -EINVAL; | ||
| 1236 | |||
| 1237 | spin_lock(&drvdata->spinlock); | ||
| 1238 | idx = config->addr_idx; | ||
| 1239 | /* clear context ID comparator bits[6:4] */ | ||
| 1240 | config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); | ||
| 1241 | config->addr_acc[idx] |= (val << 4); | ||
| 1242 | spin_unlock(&drvdata->spinlock); | ||
| 1243 | return size; | ||
| 1244 | } | ||
| 1245 | static DEVICE_ATTR_RW(addr_context); | ||
| 1246 | |||
| 1247 | static ssize_t seq_idx_show(struct device *dev, | ||
| 1248 | struct device_attribute *attr, | ||
| 1249 | char *buf) | ||
| 1250 | { | ||
| 1251 | unsigned long val; | ||
| 1252 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1253 | struct etmv4_config *config = &drvdata->config; | ||
| 1254 | |||
| 1255 | val = config->seq_idx; | ||
| 1256 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | static ssize_t seq_idx_store(struct device *dev, | ||
| 1260 | struct device_attribute *attr, | ||
| 1261 | const char *buf, size_t size) | ||
| 1262 | { | ||
| 1263 | unsigned long val; | ||
| 1264 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1265 | struct etmv4_config *config = &drvdata->config; | ||
| 1266 | |||
| 1267 | if (kstrtoul(buf, 16, &val)) | ||
| 1268 | return -EINVAL; | ||
| 1269 | if (val >= drvdata->nrseqstate - 1) | ||
| 1270 | return -EINVAL; | ||
| 1271 | |||
| 1272 | /* | ||
| 1273 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1274 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1275 | */ | ||
| 1276 | spin_lock(&drvdata->spinlock); | ||
| 1277 | config->seq_idx = val; | ||
| 1278 | spin_unlock(&drvdata->spinlock); | ||
| 1279 | return size; | ||
| 1280 | } | ||
| 1281 | static DEVICE_ATTR_RW(seq_idx); | ||
| 1282 | |||
| 1283 | static ssize_t seq_state_show(struct device *dev, | ||
| 1284 | struct device_attribute *attr, | ||
| 1285 | char *buf) | ||
| 1286 | { | ||
| 1287 | unsigned long val; | ||
| 1288 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1289 | struct etmv4_config *config = &drvdata->config; | ||
| 1290 | |||
| 1291 | val = config->seq_state; | ||
| 1292 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1293 | } | ||
| 1294 | |||
| 1295 | static ssize_t seq_state_store(struct device *dev, | ||
| 1296 | struct device_attribute *attr, | ||
| 1297 | const char *buf, size_t size) | ||
| 1298 | { | ||
| 1299 | unsigned long val; | ||
| 1300 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1301 | struct etmv4_config *config = &drvdata->config; | ||
| 1302 | |||
| 1303 | if (kstrtoul(buf, 16, &val)) | ||
| 1304 | return -EINVAL; | ||
| 1305 | if (val >= drvdata->nrseqstate) | ||
| 1306 | return -EINVAL; | ||
| 1307 | |||
| 1308 | config->seq_state = val; | ||
| 1309 | return size; | ||
| 1310 | } | ||
| 1311 | static DEVICE_ATTR_RW(seq_state); | ||
| 1312 | |||
| 1313 | static ssize_t seq_event_show(struct device *dev, | ||
| 1314 | struct device_attribute *attr, | ||
| 1315 | char *buf) | ||
| 1316 | { | ||
| 1317 | u8 idx; | ||
| 1318 | unsigned long val; | ||
| 1319 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1320 | struct etmv4_config *config = &drvdata->config; | ||
| 1321 | |||
| 1322 | spin_lock(&drvdata->spinlock); | ||
| 1323 | idx = config->seq_idx; | ||
| 1324 | val = config->seq_ctrl[idx]; | ||
| 1325 | spin_unlock(&drvdata->spinlock); | ||
| 1326 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1327 | } | ||
| 1328 | |||
| 1329 | static ssize_t seq_event_store(struct device *dev, | ||
| 1330 | struct device_attribute *attr, | ||
| 1331 | const char *buf, size_t size) | ||
| 1332 | { | ||
| 1333 | u8 idx; | ||
| 1334 | unsigned long val; | ||
| 1335 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1336 | struct etmv4_config *config = &drvdata->config; | ||
| 1337 | |||
| 1338 | if (kstrtoul(buf, 16, &val)) | ||
| 1339 | return -EINVAL; | ||
| 1340 | |||
| 1341 | spin_lock(&drvdata->spinlock); | ||
| 1342 | idx = config->seq_idx; | ||
| 1343 | /* RST, bits[7:0] */ | ||
| 1344 | config->seq_ctrl[idx] = val & 0xFF; | ||
| 1345 | spin_unlock(&drvdata->spinlock); | ||
| 1346 | return size; | ||
| 1347 | } | ||
| 1348 | static DEVICE_ATTR_RW(seq_event); | ||
| 1349 | |||
| 1350 | static ssize_t seq_reset_event_show(struct device *dev, | ||
| 1351 | struct device_attribute *attr, | ||
| 1352 | char *buf) | ||
| 1353 | { | ||
| 1354 | unsigned long val; | ||
| 1355 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1356 | struct etmv4_config *config = &drvdata->config; | ||
| 1357 | |||
| 1358 | val = config->seq_rst; | ||
| 1359 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1360 | } | ||
| 1361 | |||
| 1362 | static ssize_t seq_reset_event_store(struct device *dev, | ||
| 1363 | struct device_attribute *attr, | ||
| 1364 | const char *buf, size_t size) | ||
| 1365 | { | ||
| 1366 | unsigned long val; | ||
| 1367 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1368 | struct etmv4_config *config = &drvdata->config; | ||
| 1369 | |||
| 1370 | if (kstrtoul(buf, 16, &val)) | ||
| 1371 | return -EINVAL; | ||
| 1372 | if (!(drvdata->nrseqstate)) | ||
| 1373 | return -EINVAL; | ||
| 1374 | |||
| 1375 | config->seq_rst = val & ETMv4_EVENT_MASK; | ||
| 1376 | return size; | ||
| 1377 | } | ||
| 1378 | static DEVICE_ATTR_RW(seq_reset_event); | ||
| 1379 | |||
| 1380 | static ssize_t cntr_idx_show(struct device *dev, | ||
| 1381 | struct device_attribute *attr, | ||
| 1382 | char *buf) | ||
| 1383 | { | ||
| 1384 | unsigned long val; | ||
| 1385 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1386 | struct etmv4_config *config = &drvdata->config; | ||
| 1387 | |||
| 1388 | val = config->cntr_idx; | ||
| 1389 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1390 | } | ||
| 1391 | |||
| 1392 | static ssize_t cntr_idx_store(struct device *dev, | ||
| 1393 | struct device_attribute *attr, | ||
| 1394 | const char *buf, size_t size) | ||
| 1395 | { | ||
| 1396 | unsigned long val; | ||
| 1397 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1398 | struct etmv4_config *config = &drvdata->config; | ||
| 1399 | |||
| 1400 | if (kstrtoul(buf, 16, &val)) | ||
| 1401 | return -EINVAL; | ||
| 1402 | if (val >= drvdata->nr_cntr) | ||
| 1403 | return -EINVAL; | ||
| 1404 | |||
| 1405 | /* | ||
| 1406 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1407 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1408 | */ | ||
| 1409 | spin_lock(&drvdata->spinlock); | ||
| 1410 | config->cntr_idx = val; | ||
| 1411 | spin_unlock(&drvdata->spinlock); | ||
| 1412 | return size; | ||
| 1413 | } | ||
| 1414 | static DEVICE_ATTR_RW(cntr_idx); | ||
| 1415 | |||
| 1416 | static ssize_t cntrldvr_show(struct device *dev, | ||
| 1417 | struct device_attribute *attr, | ||
| 1418 | char *buf) | ||
| 1419 | { | ||
| 1420 | u8 idx; | ||
| 1421 | unsigned long val; | ||
| 1422 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1423 | struct etmv4_config *config = &drvdata->config; | ||
| 1424 | |||
| 1425 | spin_lock(&drvdata->spinlock); | ||
| 1426 | idx = config->cntr_idx; | ||
| 1427 | val = config->cntrldvr[idx]; | ||
| 1428 | spin_unlock(&drvdata->spinlock); | ||
| 1429 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1430 | } | ||
| 1431 | |||
| 1432 | static ssize_t cntrldvr_store(struct device *dev, | ||
| 1433 | struct device_attribute *attr, | ||
| 1434 | const char *buf, size_t size) | ||
| 1435 | { | ||
| 1436 | u8 idx; | ||
| 1437 | unsigned long val; | ||
| 1438 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1439 | struct etmv4_config *config = &drvdata->config; | ||
| 1440 | |||
| 1441 | if (kstrtoul(buf, 16, &val)) | ||
| 1442 | return -EINVAL; | ||
| 1443 | if (val > ETM_CNTR_MAX_VAL) | ||
| 1444 | return -EINVAL; | ||
| 1445 | |||
| 1446 | spin_lock(&drvdata->spinlock); | ||
| 1447 | idx = config->cntr_idx; | ||
| 1448 | config->cntrldvr[idx] = val; | ||
| 1449 | spin_unlock(&drvdata->spinlock); | ||
| 1450 | return size; | ||
| 1451 | } | ||
| 1452 | static DEVICE_ATTR_RW(cntrldvr); | ||
| 1453 | |||
| 1454 | static ssize_t cntr_val_show(struct device *dev, | ||
| 1455 | struct device_attribute *attr, | ||
| 1456 | char *buf) | ||
| 1457 | { | ||
| 1458 | u8 idx; | ||
| 1459 | unsigned long val; | ||
| 1460 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1461 | struct etmv4_config *config = &drvdata->config; | ||
| 1462 | |||
| 1463 | spin_lock(&drvdata->spinlock); | ||
| 1464 | idx = config->cntr_idx; | ||
| 1465 | val = config->cntr_val[idx]; | ||
| 1466 | spin_unlock(&drvdata->spinlock); | ||
| 1467 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1468 | } | ||
| 1469 | |||
| 1470 | static ssize_t cntr_val_store(struct device *dev, | ||
| 1471 | struct device_attribute *attr, | ||
| 1472 | const char *buf, size_t size) | ||
| 1473 | { | ||
| 1474 | u8 idx; | ||
| 1475 | unsigned long val; | ||
| 1476 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1477 | struct etmv4_config *config = &drvdata->config; | ||
| 1478 | |||
| 1479 | if (kstrtoul(buf, 16, &val)) | ||
| 1480 | return -EINVAL; | ||
| 1481 | if (val > ETM_CNTR_MAX_VAL) | ||
| 1482 | return -EINVAL; | ||
| 1483 | |||
| 1484 | spin_lock(&drvdata->spinlock); | ||
| 1485 | idx = config->cntr_idx; | ||
| 1486 | config->cntr_val[idx] = val; | ||
| 1487 | spin_unlock(&drvdata->spinlock); | ||
| 1488 | return size; | ||
| 1489 | } | ||
| 1490 | static DEVICE_ATTR_RW(cntr_val); | ||
| 1491 | |||
| 1492 | static ssize_t cntr_ctrl_show(struct device *dev, | ||
| 1493 | struct device_attribute *attr, | ||
| 1494 | char *buf) | ||
| 1495 | { | ||
| 1496 | u8 idx; | ||
| 1497 | unsigned long val; | ||
| 1498 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1499 | struct etmv4_config *config = &drvdata->config; | ||
| 1500 | |||
| 1501 | spin_lock(&drvdata->spinlock); | ||
| 1502 | idx = config->cntr_idx; | ||
| 1503 | val = config->cntr_ctrl[idx]; | ||
| 1504 | spin_unlock(&drvdata->spinlock); | ||
| 1505 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | static ssize_t cntr_ctrl_store(struct device *dev, | ||
| 1509 | struct device_attribute *attr, | ||
| 1510 | const char *buf, size_t size) | ||
| 1511 | { | ||
| 1512 | u8 idx; | ||
| 1513 | unsigned long val; | ||
| 1514 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1515 | struct etmv4_config *config = &drvdata->config; | ||
| 1516 | |||
| 1517 | if (kstrtoul(buf, 16, &val)) | ||
| 1518 | return -EINVAL; | ||
| 1519 | |||
| 1520 | spin_lock(&drvdata->spinlock); | ||
| 1521 | idx = config->cntr_idx; | ||
| 1522 | config->cntr_ctrl[idx] = val; | ||
| 1523 | spin_unlock(&drvdata->spinlock); | ||
| 1524 | return size; | ||
| 1525 | } | ||
| 1526 | static DEVICE_ATTR_RW(cntr_ctrl); | ||
| 1527 | |||
| 1528 | static ssize_t res_idx_show(struct device *dev, | ||
| 1529 | struct device_attribute *attr, | ||
| 1530 | char *buf) | ||
| 1531 | { | ||
| 1532 | unsigned long val; | ||
| 1533 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1534 | struct etmv4_config *config = &drvdata->config; | ||
| 1535 | |||
| 1536 | val = config->res_idx; | ||
| 1537 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | static ssize_t res_idx_store(struct device *dev, | ||
| 1541 | struct device_attribute *attr, | ||
| 1542 | const char *buf, size_t size) | ||
| 1543 | { | ||
| 1544 | unsigned long val; | ||
| 1545 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1546 | struct etmv4_config *config = &drvdata->config; | ||
| 1547 | |||
| 1548 | if (kstrtoul(buf, 16, &val)) | ||
| 1549 | return -EINVAL; | ||
| 1550 | /* Resource selector pair 0 is always implemented and reserved */ | ||
| 1551 | if ((val == 0) || (val >= drvdata->nr_resource)) | ||
| 1552 | return -EINVAL; | ||
| 1553 | |||
| 1554 | /* | ||
| 1555 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1556 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1557 | */ | ||
| 1558 | spin_lock(&drvdata->spinlock); | ||
| 1559 | config->res_idx = val; | ||
| 1560 | spin_unlock(&drvdata->spinlock); | ||
| 1561 | return size; | ||
| 1562 | } | ||
| 1563 | static DEVICE_ATTR_RW(res_idx); | ||
| 1564 | |||
| 1565 | static ssize_t res_ctrl_show(struct device *dev, | ||
| 1566 | struct device_attribute *attr, | ||
| 1567 | char *buf) | ||
| 1568 | { | ||
| 1569 | u8 idx; | ||
| 1570 | unsigned long val; | ||
| 1571 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1572 | struct etmv4_config *config = &drvdata->config; | ||
| 1573 | |||
| 1574 | spin_lock(&drvdata->spinlock); | ||
| 1575 | idx = config->res_idx; | ||
| 1576 | val = config->res_ctrl[idx]; | ||
| 1577 | spin_unlock(&drvdata->spinlock); | ||
| 1578 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1579 | } | ||
| 1580 | |||
| 1581 | static ssize_t res_ctrl_store(struct device *dev, | ||
| 1582 | struct device_attribute *attr, | ||
| 1583 | const char *buf, size_t size) | ||
| 1584 | { | ||
| 1585 | u8 idx; | ||
| 1586 | unsigned long val; | ||
| 1587 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1588 | struct etmv4_config *config = &drvdata->config; | ||
| 1589 | |||
| 1590 | if (kstrtoul(buf, 16, &val)) | ||
| 1591 | return -EINVAL; | ||
| 1592 | |||
| 1593 | spin_lock(&drvdata->spinlock); | ||
| 1594 | idx = config->res_idx; | ||
| 1595 | /* For odd idx pair inversal bit is RES0 */ | ||
| 1596 | if (idx % 2 != 0) | ||
| 1597 | /* PAIRINV, bit[21] */ | ||
| 1598 | val &= ~BIT(21); | ||
| 1599 | config->res_ctrl[idx] = val; | ||
| 1600 | spin_unlock(&drvdata->spinlock); | ||
| 1601 | return size; | ||
| 1602 | } | ||
| 1603 | static DEVICE_ATTR_RW(res_ctrl); | ||
| 1604 | |||
| 1605 | static ssize_t ctxid_idx_show(struct device *dev, | ||
| 1606 | struct device_attribute *attr, | ||
| 1607 | char *buf) | ||
| 1608 | { | ||
| 1609 | unsigned long val; | ||
| 1610 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1611 | struct etmv4_config *config = &drvdata->config; | ||
| 1612 | |||
| 1613 | val = config->ctxid_idx; | ||
| 1614 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1615 | } | ||
| 1616 | |||
| 1617 | static ssize_t ctxid_idx_store(struct device *dev, | ||
| 1618 | struct device_attribute *attr, | ||
| 1619 | const char *buf, size_t size) | ||
| 1620 | { | ||
| 1621 | unsigned long val; | ||
| 1622 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1623 | struct etmv4_config *config = &drvdata->config; | ||
| 1624 | |||
| 1625 | if (kstrtoul(buf, 16, &val)) | ||
| 1626 | return -EINVAL; | ||
| 1627 | if (val >= drvdata->numcidc) | ||
| 1628 | return -EINVAL; | ||
| 1629 | |||
| 1630 | /* | ||
| 1631 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1632 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1633 | */ | ||
| 1634 | spin_lock(&drvdata->spinlock); | ||
| 1635 | config->ctxid_idx = val; | ||
| 1636 | spin_unlock(&drvdata->spinlock); | ||
| 1637 | return size; | ||
| 1638 | } | ||
| 1639 | static DEVICE_ATTR_RW(ctxid_idx); | ||
| 1640 | |||
| 1641 | static ssize_t ctxid_pid_show(struct device *dev, | ||
| 1642 | struct device_attribute *attr, | ||
| 1643 | char *buf) | ||
| 1644 | { | ||
| 1645 | u8 idx; | ||
| 1646 | unsigned long val; | ||
| 1647 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1648 | struct etmv4_config *config = &drvdata->config; | ||
| 1649 | |||
| 1650 | spin_lock(&drvdata->spinlock); | ||
| 1651 | idx = config->ctxid_idx; | ||
| 1652 | val = (unsigned long)config->ctxid_vpid[idx]; | ||
| 1653 | spin_unlock(&drvdata->spinlock); | ||
| 1654 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1655 | } | ||
| 1656 | |||
| 1657 | static ssize_t ctxid_pid_store(struct device *dev, | ||
| 1658 | struct device_attribute *attr, | ||
| 1659 | const char *buf, size_t size) | ||
| 1660 | { | ||
| 1661 | u8 idx; | ||
| 1662 | unsigned long vpid, pid; | ||
| 1663 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1664 | struct etmv4_config *config = &drvdata->config; | ||
| 1665 | |||
| 1666 | /* | ||
| 1667 | * only implemented when ctxid tracing is enabled, i.e. at least one | ||
| 1668 | * ctxid comparator is implemented and ctxid is greater than 0 bits | ||
| 1669 | * in length | ||
| 1670 | */ | ||
| 1671 | if (!drvdata->ctxid_size || !drvdata->numcidc) | ||
| 1672 | return -EINVAL; | ||
| 1673 | if (kstrtoul(buf, 16, &vpid)) | ||
| 1674 | return -EINVAL; | ||
| 1675 | |||
| 1676 | pid = coresight_vpid_to_pid(vpid); | ||
| 1677 | |||
| 1678 | spin_lock(&drvdata->spinlock); | ||
| 1679 | idx = config->ctxid_idx; | ||
| 1680 | config->ctxid_pid[idx] = (u64)pid; | ||
| 1681 | config->ctxid_vpid[idx] = (u64)vpid; | ||
| 1682 | spin_unlock(&drvdata->spinlock); | ||
| 1683 | return size; | ||
| 1684 | } | ||
| 1685 | static DEVICE_ATTR_RW(ctxid_pid); | ||
| 1686 | |||
| 1687 | static ssize_t ctxid_masks_show(struct device *dev, | ||
| 1688 | struct device_attribute *attr, | ||
| 1689 | char *buf) | ||
| 1690 | { | ||
| 1691 | unsigned long val1, val2; | ||
| 1692 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1693 | struct etmv4_config *config = &drvdata->config; | ||
| 1694 | |||
| 1695 | spin_lock(&drvdata->spinlock); | ||
| 1696 | val1 = config->ctxid_mask0; | ||
| 1697 | val2 = config->ctxid_mask1; | ||
| 1698 | spin_unlock(&drvdata->spinlock); | ||
| 1699 | return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); | ||
| 1700 | } | ||
| 1701 | |||
| 1702 | static ssize_t ctxid_masks_store(struct device *dev, | ||
| 1703 | struct device_attribute *attr, | ||
| 1704 | const char *buf, size_t size) | ||
| 1705 | { | ||
| 1706 | u8 i, j, maskbyte; | ||
| 1707 | unsigned long val1, val2, mask; | ||
| 1708 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1709 | struct etmv4_config *config = &drvdata->config; | ||
| 1710 | |||
| 1711 | /* | ||
| 1712 | * only implemented when ctxid tracing is enabled, i.e. at least one | ||
| 1713 | * ctxid comparator is implemented and ctxid is greater than 0 bits | ||
| 1714 | * in length | ||
| 1715 | */ | ||
| 1716 | if (!drvdata->ctxid_size || !drvdata->numcidc) | ||
| 1717 | return -EINVAL; | ||
| 1718 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
| 1719 | return -EINVAL; | ||
| 1720 | |||
| 1721 | spin_lock(&drvdata->spinlock); | ||
| 1722 | /* | ||
| 1723 | * each byte[0..3] controls mask value applied to ctxid | ||
| 1724 | * comparator[0..3] | ||
| 1725 | */ | ||
| 1726 | switch (drvdata->numcidc) { | ||
| 1727 | case 0x1: | ||
| 1728 | /* COMP0, bits[7:0] */ | ||
| 1729 | config->ctxid_mask0 = val1 & 0xFF; | ||
| 1730 | break; | ||
| 1731 | case 0x2: | ||
| 1732 | /* COMP1, bits[15:8] */ | ||
| 1733 | config->ctxid_mask0 = val1 & 0xFFFF; | ||
| 1734 | break; | ||
| 1735 | case 0x3: | ||
| 1736 | /* COMP2, bits[23:16] */ | ||
| 1737 | config->ctxid_mask0 = val1 & 0xFFFFFF; | ||
| 1738 | break; | ||
| 1739 | case 0x4: | ||
| 1740 | /* COMP3, bits[31:24] */ | ||
| 1741 | config->ctxid_mask0 = val1; | ||
| 1742 | break; | ||
| 1743 | case 0x5: | ||
| 1744 | /* COMP4, bits[7:0] */ | ||
| 1745 | config->ctxid_mask0 = val1; | ||
| 1746 | config->ctxid_mask1 = val2 & 0xFF; | ||
| 1747 | break; | ||
| 1748 | case 0x6: | ||
| 1749 | /* COMP5, bits[15:8] */ | ||
| 1750 | config->ctxid_mask0 = val1; | ||
| 1751 | config->ctxid_mask1 = val2 & 0xFFFF; | ||
| 1752 | break; | ||
| 1753 | case 0x7: | ||
| 1754 | /* COMP6, bits[23:16] */ | ||
| 1755 | config->ctxid_mask0 = val1; | ||
| 1756 | config->ctxid_mask1 = val2 & 0xFFFFFF; | ||
| 1757 | break; | ||
| 1758 | case 0x8: | ||
| 1759 | /* COMP7, bits[31:24] */ | ||
| 1760 | config->ctxid_mask0 = val1; | ||
| 1761 | config->ctxid_mask1 = val2; | ||
| 1762 | break; | ||
| 1763 | default: | ||
| 1764 | break; | ||
| 1765 | } | ||
| 1766 | /* | ||
| 1767 | * If software sets a mask bit to 1, it must program relevant byte | ||
| 1768 | * of ctxid comparator value 0x0, otherwise behavior is unpredictable. | ||
| 1769 | * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24] | ||
| 1770 | * of ctxid comparator0 value (corresponding to byte 0) register. | ||
| 1771 | */ | ||
| 1772 | mask = config->ctxid_mask0; | ||
| 1773 | for (i = 0; i < drvdata->numcidc; i++) { | ||
| 1774 | /* mask value of corresponding ctxid comparator */ | ||
| 1775 | maskbyte = mask & ETMv4_EVENT_MASK; | ||
| 1776 | /* | ||
| 1777 | * each bit corresponds to a byte of respective ctxid comparator | ||
| 1778 | * value register | ||
| 1779 | */ | ||
| 1780 | for (j = 0; j < 8; j++) { | ||
| 1781 | if (maskbyte & 1) | ||
| 1782 | config->ctxid_pid[i] &= ~(0xFF << (j * 8)); | ||
| 1783 | maskbyte >>= 1; | ||
| 1784 | } | ||
| 1785 | /* Select the next ctxid comparator mask value */ | ||
| 1786 | if (i == 3) | ||
| 1787 | /* ctxid comparators[4-7] */ | ||
| 1788 | mask = config->ctxid_mask1; | ||
| 1789 | else | ||
| 1790 | mask >>= 0x8; | ||
| 1791 | } | ||
| 1792 | |||
| 1793 | spin_unlock(&drvdata->spinlock); | ||
| 1794 | return size; | ||
| 1795 | } | ||
| 1796 | static DEVICE_ATTR_RW(ctxid_masks); | ||
| 1797 | |||
| 1798 | static ssize_t vmid_idx_show(struct device *dev, | ||
| 1799 | struct device_attribute *attr, | ||
| 1800 | char *buf) | ||
| 1801 | { | ||
| 1802 | unsigned long val; | ||
| 1803 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1804 | struct etmv4_config *config = &drvdata->config; | ||
| 1805 | |||
| 1806 | val = config->vmid_idx; | ||
| 1807 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1808 | } | ||
| 1809 | |||
| 1810 | static ssize_t vmid_idx_store(struct device *dev, | ||
| 1811 | struct device_attribute *attr, | ||
| 1812 | const char *buf, size_t size) | ||
| 1813 | { | ||
| 1814 | unsigned long val; | ||
| 1815 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1816 | struct etmv4_config *config = &drvdata->config; | ||
| 1817 | |||
| 1818 | if (kstrtoul(buf, 16, &val)) | ||
| 1819 | return -EINVAL; | ||
| 1820 | if (val >= drvdata->numvmidc) | ||
| 1821 | return -EINVAL; | ||
| 1822 | |||
| 1823 | /* | ||
| 1824 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1825 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1826 | */ | ||
| 1827 | spin_lock(&drvdata->spinlock); | ||
| 1828 | config->vmid_idx = val; | ||
| 1829 | spin_unlock(&drvdata->spinlock); | ||
| 1830 | return size; | ||
| 1831 | } | ||
| 1832 | static DEVICE_ATTR_RW(vmid_idx); | ||
| 1833 | |||
| 1834 | static ssize_t vmid_val_show(struct device *dev, | ||
| 1835 | struct device_attribute *attr, | ||
| 1836 | char *buf) | ||
| 1837 | { | ||
| 1838 | unsigned long val; | ||
| 1839 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1840 | struct etmv4_config *config = &drvdata->config; | ||
| 1841 | |||
| 1842 | val = (unsigned long)config->vmid_val[config->vmid_idx]; | ||
| 1843 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1844 | } | ||
| 1845 | |||
| 1846 | static ssize_t vmid_val_store(struct device *dev, | ||
| 1847 | struct device_attribute *attr, | ||
| 1848 | const char *buf, size_t size) | ||
| 1849 | { | ||
| 1850 | unsigned long val; | ||
| 1851 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1852 | struct etmv4_config *config = &drvdata->config; | ||
| 1853 | |||
| 1854 | /* | ||
| 1855 | * only implemented when vmid tracing is enabled, i.e. at least one | ||
| 1856 | * vmid comparator is implemented and at least 8 bit vmid size | ||
| 1857 | */ | ||
| 1858 | if (!drvdata->vmid_size || !drvdata->numvmidc) | ||
| 1859 | return -EINVAL; | ||
| 1860 | if (kstrtoul(buf, 16, &val)) | ||
| 1861 | return -EINVAL; | ||
| 1862 | |||
| 1863 | spin_lock(&drvdata->spinlock); | ||
| 1864 | config->vmid_val[config->vmid_idx] = (u64)val; | ||
| 1865 | spin_unlock(&drvdata->spinlock); | ||
| 1866 | return size; | ||
| 1867 | } | ||
| 1868 | static DEVICE_ATTR_RW(vmid_val); | ||
| 1869 | |||
| 1870 | static ssize_t vmid_masks_show(struct device *dev, | ||
| 1871 | struct device_attribute *attr, char *buf) | ||
| 1872 | { | ||
| 1873 | unsigned long val1, val2; | ||
| 1874 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1875 | struct etmv4_config *config = &drvdata->config; | ||
| 1876 | |||
| 1877 | spin_lock(&drvdata->spinlock); | ||
| 1878 | val1 = config->vmid_mask0; | ||
| 1879 | val2 = config->vmid_mask1; | ||
| 1880 | spin_unlock(&drvdata->spinlock); | ||
| 1881 | return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); | ||
| 1882 | } | ||
| 1883 | |||
| 1884 | static ssize_t vmid_masks_store(struct device *dev, | ||
| 1885 | struct device_attribute *attr, | ||
| 1886 | const char *buf, size_t size) | ||
| 1887 | { | ||
| 1888 | u8 i, j, maskbyte; | ||
| 1889 | unsigned long val1, val2, mask; | ||
| 1890 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1891 | struct etmv4_config *config = &drvdata->config; | ||
| 1892 | |||
| 1893 | /* | ||
| 1894 | * only implemented when vmid tracing is enabled, i.e. at least one | ||
| 1895 | * vmid comparator is implemented and at least 8 bit vmid size | ||
| 1896 | */ | ||
| 1897 | if (!drvdata->vmid_size || !drvdata->numvmidc) | ||
| 1898 | return -EINVAL; | ||
| 1899 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
| 1900 | return -EINVAL; | ||
| 1901 | |||
| 1902 | spin_lock(&drvdata->spinlock); | ||
| 1903 | |||
| 1904 | /* | ||
| 1905 | * each byte[0..3] controls mask value applied to vmid | ||
| 1906 | * comparator[0..3] | ||
| 1907 | */ | ||
| 1908 | switch (drvdata->numvmidc) { | ||
| 1909 | case 0x1: | ||
| 1910 | /* COMP0, bits[7:0] */ | ||
| 1911 | config->vmid_mask0 = val1 & 0xFF; | ||
| 1912 | break; | ||
| 1913 | case 0x2: | ||
| 1914 | /* COMP1, bits[15:8] */ | ||
| 1915 | config->vmid_mask0 = val1 & 0xFFFF; | ||
| 1916 | break; | ||
| 1917 | case 0x3: | ||
| 1918 | /* COMP2, bits[23:16] */ | ||
| 1919 | config->vmid_mask0 = val1 & 0xFFFFFF; | ||
| 1920 | break; | ||
| 1921 | case 0x4: | ||
| 1922 | /* COMP3, bits[31:24] */ | ||
| 1923 | config->vmid_mask0 = val1; | ||
| 1924 | break; | ||
| 1925 | case 0x5: | ||
| 1926 | /* COMP4, bits[7:0] */ | ||
| 1927 | config->vmid_mask0 = val1; | ||
| 1928 | config->vmid_mask1 = val2 & 0xFF; | ||
| 1929 | break; | ||
| 1930 | case 0x6: | ||
| 1931 | /* COMP5, bits[15:8] */ | ||
| 1932 | config->vmid_mask0 = val1; | ||
| 1933 | config->vmid_mask1 = val2 & 0xFFFF; | ||
| 1934 | break; | ||
| 1935 | case 0x7: | ||
| 1936 | /* COMP6, bits[23:16] */ | ||
| 1937 | config->vmid_mask0 = val1; | ||
| 1938 | config->vmid_mask1 = val2 & 0xFFFFFF; | ||
| 1939 | break; | ||
| 1940 | case 0x8: | ||
| 1941 | /* COMP7, bits[31:24] */ | ||
| 1942 | config->vmid_mask0 = val1; | ||
| 1943 | config->vmid_mask1 = val2; | ||
| 1944 | break; | ||
| 1945 | default: | ||
| 1946 | break; | ||
| 1947 | } | ||
| 1948 | |||
| 1949 | /* | ||
| 1950 | * If software sets a mask bit to 1, it must program relevant byte | ||
| 1951 | * of vmid comparator value 0x0, otherwise behavior is unpredictable. | ||
| 1952 | * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24] | ||
| 1953 | * of vmid comparator0 value (corresponding to byte 0) register. | ||
| 1954 | */ | ||
| 1955 | mask = config->vmid_mask0; | ||
| 1956 | for (i = 0; i < drvdata->numvmidc; i++) { | ||
| 1957 | /* mask value of corresponding vmid comparator */ | ||
| 1958 | maskbyte = mask & ETMv4_EVENT_MASK; | ||
| 1959 | /* | ||
| 1960 | * each bit corresponds to a byte of respective vmid comparator | ||
| 1961 | * value register | ||
| 1962 | */ | ||
| 1963 | for (j = 0; j < 8; j++) { | ||
| 1964 | if (maskbyte & 1) | ||
| 1965 | config->vmid_val[i] &= ~(0xFF << (j * 8)); | ||
| 1966 | maskbyte >>= 1; | ||
| 1967 | } | ||
| 1968 | /* Select the next vmid comparator mask value */ | ||
| 1969 | if (i == 3) | ||
| 1970 | /* vmid comparators[4-7] */ | ||
| 1971 | mask = config->vmid_mask1; | ||
| 1972 | else | ||
| 1973 | mask >>= 0x8; | ||
| 1974 | } | ||
| 1975 | spin_unlock(&drvdata->spinlock); | ||
| 1976 | return size; | ||
| 1977 | } | ||
| 1978 | static DEVICE_ATTR_RW(vmid_masks); | ||
| 1979 | |||
| 1980 | static ssize_t cpu_show(struct device *dev, | ||
| 1981 | struct device_attribute *attr, char *buf) | ||
| 1982 | { | ||
| 1983 | int val; | ||
| 1984 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1985 | |||
| 1986 | val = drvdata->cpu; | ||
| 1987 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | ||
| 1988 | |||
| 1989 | } | ||
| 1990 | static DEVICE_ATTR_RO(cpu); | ||
| 1991 | |||
| 1992 | static struct attribute *coresight_etmv4_attrs[] = { | ||
| 1993 | &dev_attr_nr_pe_cmp.attr, | ||
| 1994 | &dev_attr_nr_addr_cmp.attr, | ||
| 1995 | &dev_attr_nr_cntr.attr, | ||
| 1996 | &dev_attr_nr_ext_inp.attr, | ||
| 1997 | &dev_attr_numcidc.attr, | ||
| 1998 | &dev_attr_numvmidc.attr, | ||
| 1999 | &dev_attr_nrseqstate.attr, | ||
| 2000 | &dev_attr_nr_resource.attr, | ||
| 2001 | &dev_attr_nr_ss_cmp.attr, | ||
| 2002 | &dev_attr_reset.attr, | ||
| 2003 | &dev_attr_mode.attr, | ||
| 2004 | &dev_attr_pe.attr, | ||
| 2005 | &dev_attr_event.attr, | ||
| 2006 | &dev_attr_event_instren.attr, | ||
| 2007 | &dev_attr_event_ts.attr, | ||
| 2008 | &dev_attr_syncfreq.attr, | ||
| 2009 | &dev_attr_cyc_threshold.attr, | ||
| 2010 | &dev_attr_bb_ctrl.attr, | ||
| 2011 | &dev_attr_event_vinst.attr, | ||
| 2012 | &dev_attr_s_exlevel_vinst.attr, | ||
| 2013 | &dev_attr_ns_exlevel_vinst.attr, | ||
| 2014 | &dev_attr_addr_idx.attr, | ||
| 2015 | &dev_attr_addr_instdatatype.attr, | ||
| 2016 | &dev_attr_addr_single.attr, | ||
| 2017 | &dev_attr_addr_range.attr, | ||
| 2018 | &dev_attr_addr_start.attr, | ||
| 2019 | &dev_attr_addr_stop.attr, | ||
| 2020 | &dev_attr_addr_ctxtype.attr, | ||
| 2021 | &dev_attr_addr_context.attr, | ||
| 2022 | &dev_attr_seq_idx.attr, | ||
| 2023 | &dev_attr_seq_state.attr, | ||
| 2024 | &dev_attr_seq_event.attr, | ||
| 2025 | &dev_attr_seq_reset_event.attr, | ||
| 2026 | &dev_attr_cntr_idx.attr, | ||
| 2027 | &dev_attr_cntrldvr.attr, | ||
| 2028 | &dev_attr_cntr_val.attr, | ||
| 2029 | &dev_attr_cntr_ctrl.attr, | ||
| 2030 | &dev_attr_res_idx.attr, | ||
| 2031 | &dev_attr_res_ctrl.attr, | ||
| 2032 | &dev_attr_ctxid_idx.attr, | ||
| 2033 | &dev_attr_ctxid_pid.attr, | ||
| 2034 | &dev_attr_ctxid_masks.attr, | ||
| 2035 | &dev_attr_vmid_idx.attr, | ||
| 2036 | &dev_attr_vmid_val.attr, | ||
| 2037 | &dev_attr_vmid_masks.attr, | ||
| 2038 | &dev_attr_cpu.attr, | ||
| 2039 | NULL, | ||
| 2040 | }; | ||
| 2041 | |||
| 2042 | #define coresight_etm4x_simple_func(name, offset) \ | ||
| 2043 | coresight_simple_func(struct etmv4_drvdata, name, offset) | ||
| 2044 | |||
| 2045 | coresight_etm4x_simple_func(trcoslsr, TRCOSLSR); | ||
| 2046 | coresight_etm4x_simple_func(trcpdcr, TRCPDCR); | ||
| 2047 | coresight_etm4x_simple_func(trcpdsr, TRCPDSR); | ||
| 2048 | coresight_etm4x_simple_func(trclsr, TRCLSR); | ||
| 2049 | coresight_etm4x_simple_func(trcconfig, TRCCONFIGR); | ||
| 2050 | coresight_etm4x_simple_func(trctraceid, TRCTRACEIDR); | ||
| 2051 | coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS); | ||
| 2052 | coresight_etm4x_simple_func(trcdevid, TRCDEVID); | ||
| 2053 | coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE); | ||
| 2054 | coresight_etm4x_simple_func(trcpidr0, TRCPIDR0); | ||
| 2055 | coresight_etm4x_simple_func(trcpidr1, TRCPIDR1); | ||
| 2056 | coresight_etm4x_simple_func(trcpidr2, TRCPIDR2); | ||
| 2057 | coresight_etm4x_simple_func(trcpidr3, TRCPIDR3); | ||
| 2058 | |||
| 2059 | static struct attribute *coresight_etmv4_mgmt_attrs[] = { | ||
| 2060 | &dev_attr_trcoslsr.attr, | ||
| 2061 | &dev_attr_trcpdcr.attr, | ||
| 2062 | &dev_attr_trcpdsr.attr, | ||
| 2063 | &dev_attr_trclsr.attr, | ||
| 2064 | &dev_attr_trcconfig.attr, | ||
| 2065 | &dev_attr_trctraceid.attr, | ||
| 2066 | &dev_attr_trcauthstatus.attr, | ||
| 2067 | &dev_attr_trcdevid.attr, | ||
| 2068 | &dev_attr_trcdevtype.attr, | ||
| 2069 | &dev_attr_trcpidr0.attr, | ||
| 2070 | &dev_attr_trcpidr1.attr, | ||
| 2071 | &dev_attr_trcpidr2.attr, | ||
| 2072 | &dev_attr_trcpidr3.attr, | ||
| 2073 | NULL, | ||
| 2074 | }; | ||
| 2075 | |||
| 2076 | coresight_etm4x_simple_func(trcidr0, TRCIDR0); | ||
| 2077 | coresight_etm4x_simple_func(trcidr1, TRCIDR1); | ||
| 2078 | coresight_etm4x_simple_func(trcidr2, TRCIDR2); | ||
| 2079 | coresight_etm4x_simple_func(trcidr3, TRCIDR3); | ||
| 2080 | coresight_etm4x_simple_func(trcidr4, TRCIDR4); | ||
| 2081 | coresight_etm4x_simple_func(trcidr5, TRCIDR5); | ||
| 2082 | /* trcidr[6,7] are reserved */ | ||
| 2083 | coresight_etm4x_simple_func(trcidr8, TRCIDR8); | ||
| 2084 | coresight_etm4x_simple_func(trcidr9, TRCIDR9); | ||
| 2085 | coresight_etm4x_simple_func(trcidr10, TRCIDR10); | ||
| 2086 | coresight_etm4x_simple_func(trcidr11, TRCIDR11); | ||
| 2087 | coresight_etm4x_simple_func(trcidr12, TRCIDR12); | ||
| 2088 | coresight_etm4x_simple_func(trcidr13, TRCIDR13); | ||
| 2089 | |||
| 2090 | static struct attribute *coresight_etmv4_trcidr_attrs[] = { | ||
| 2091 | &dev_attr_trcidr0.attr, | ||
| 2092 | &dev_attr_trcidr1.attr, | ||
| 2093 | &dev_attr_trcidr2.attr, | ||
| 2094 | &dev_attr_trcidr3.attr, | ||
| 2095 | &dev_attr_trcidr4.attr, | ||
| 2096 | &dev_attr_trcidr5.attr, | ||
| 2097 | /* trcidr[6,7] are reserved */ | ||
| 2098 | &dev_attr_trcidr8.attr, | ||
| 2099 | &dev_attr_trcidr9.attr, | ||
| 2100 | &dev_attr_trcidr10.attr, | ||
| 2101 | &dev_attr_trcidr11.attr, | ||
| 2102 | &dev_attr_trcidr12.attr, | ||
| 2103 | &dev_attr_trcidr13.attr, | ||
| 2104 | NULL, | ||
| 2105 | }; | ||
| 2106 | |||
| 2107 | static const struct attribute_group coresight_etmv4_group = { | ||
| 2108 | .attrs = coresight_etmv4_attrs, | ||
| 2109 | }; | ||
| 2110 | |||
| 2111 | static const struct attribute_group coresight_etmv4_mgmt_group = { | ||
| 2112 | .attrs = coresight_etmv4_mgmt_attrs, | ||
| 2113 | .name = "mgmt", | ||
| 2114 | }; | ||
| 2115 | |||
| 2116 | static const struct attribute_group coresight_etmv4_trcidr_group = { | ||
| 2117 | .attrs = coresight_etmv4_trcidr_attrs, | ||
| 2118 | .name = "trcidr", | ||
| 2119 | }; | ||
| 2120 | |||
| 2121 | const struct attribute_group *coresight_etmv4_groups[] = { | ||
| 2122 | &coresight_etmv4_group, | ||
| 2123 | &coresight_etmv4_mgmt_group, | ||
| 2124 | &coresight_etmv4_trcidr_group, | ||
| 2125 | NULL, | ||
| 2126 | }; | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 1c59bd36834c..462f0dc15757 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c | |||
| @@ -26,15 +26,19 @@ | |||
| 26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
| 27 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
| 28 | #include <linux/coresight.h> | 28 | #include <linux/coresight.h> |
| 29 | #include <linux/coresight-pmu.h> | ||
| 29 | #include <linux/pm_wakeup.h> | 30 | #include <linux/pm_wakeup.h> |
| 30 | #include <linux/amba/bus.h> | 31 | #include <linux/amba/bus.h> |
| 31 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
| 32 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
| 34 | #include <linux/perf_event.h> | ||
| 33 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
| 34 | #include <linux/perf_event.h> | 36 | #include <linux/perf_event.h> |
| 35 | #include <asm/sections.h> | 37 | #include <asm/sections.h> |
| 38 | #include <asm/local.h> | ||
| 36 | 39 | ||
| 37 | #include "coresight-etm4x.h" | 40 | #include "coresight-etm4x.h" |
| 41 | #include "coresight-etm-perf.h" | ||
| 38 | 42 | ||
| 39 | static int boot_enable; | 43 | static int boot_enable; |
| 40 | module_param_named(boot_enable, boot_enable, int, S_IRUGO); | 44 | module_param_named(boot_enable, boot_enable, int, S_IRUGO); |
| @@ -42,13 +46,13 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); | |||
| 42 | /* The number of ETMv4 currently registered */ | 46 | /* The number of ETMv4 currently registered */ |
| 43 | static int etm4_count; | 47 | static int etm4_count; |
| 44 | static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; | 48 | static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; |
| 49 | static void etm4_set_default(struct etmv4_config *config); | ||
| 45 | 50 | ||
| 46 | static void etm4_os_unlock(void *info) | 51 | static void etm4_os_unlock(struct etmv4_drvdata *drvdata) |
| 47 | { | 52 | { |
| 48 | struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info; | ||
| 49 | |||
| 50 | /* Writing any value to ETMOSLAR unlocks the trace registers */ | 53 | /* Writing any value to ETMOSLAR unlocks the trace registers */ |
| 51 | writel_relaxed(0x0, drvdata->base + TRCOSLAR); | 54 | writel_relaxed(0x0, drvdata->base + TRCOSLAR); |
| 55 | drvdata->os_unlock = true; | ||
| 52 | isb(); | 56 | isb(); |
| 53 | } | 57 | } |
| 54 | 58 | ||
| @@ -76,7 +80,7 @@ static int etm4_trace_id(struct coresight_device *csdev) | |||
| 76 | unsigned long flags; | 80 | unsigned long flags; |
| 77 | int trace_id = -1; | 81 | int trace_id = -1; |
| 78 | 82 | ||
| 79 | if (!drvdata->enable) | 83 | if (!local_read(&drvdata->mode)) |
| 80 | return drvdata->trcid; | 84 | return drvdata->trcid; |
| 81 | 85 | ||
| 82 | spin_lock_irqsave(&drvdata->spinlock, flags); | 86 | spin_lock_irqsave(&drvdata->spinlock, flags); |
| @@ -95,6 +99,7 @@ static void etm4_enable_hw(void *info) | |||
| 95 | { | 99 | { |
| 96 | int i; | 100 | int i; |
| 97 | struct etmv4_drvdata *drvdata = info; | 101 | struct etmv4_drvdata *drvdata = info; |
| 102 | struct etmv4_config *config = &drvdata->config; | ||
| 98 | 103 | ||
| 99 | CS_UNLOCK(drvdata->base); | 104 | CS_UNLOCK(drvdata->base); |
| 100 | 105 | ||
| @@ -109,69 +114,69 @@ static void etm4_enable_hw(void *info) | |||
| 109 | "timeout observed when probing at offset %#x\n", | 114 | "timeout observed when probing at offset %#x\n", |
| 110 | TRCSTATR); | 115 | TRCSTATR); |
| 111 | 116 | ||
| 112 | writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR); | 117 | writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR); |
| 113 | writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR); | 118 | writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR); |
| 114 | /* nothing specific implemented */ | 119 | /* nothing specific implemented */ |
| 115 | writel_relaxed(0x0, drvdata->base + TRCAUXCTLR); | 120 | writel_relaxed(0x0, drvdata->base + TRCAUXCTLR); |
| 116 | writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R); | 121 | writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R); |
| 117 | writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R); | 122 | writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R); |
| 118 | writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR); | 123 | writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR); |
| 119 | writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR); | 124 | writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR); |
| 120 | writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR); | 125 | writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR); |
| 121 | writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR); | 126 | writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR); |
| 122 | writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR); | 127 | writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR); |
| 123 | writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR); | 128 | writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR); |
| 124 | writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR); | 129 | writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR); |
| 125 | writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR); | 130 | writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR); |
| 126 | writel_relaxed(drvdata->vissctlr, | 131 | writel_relaxed(config->vissctlr, |
| 127 | drvdata->base + TRCVISSCTLR); | 132 | drvdata->base + TRCVISSCTLR); |
| 128 | writel_relaxed(drvdata->vipcssctlr, | 133 | writel_relaxed(config->vipcssctlr, |
| 129 | drvdata->base + TRCVIPCSSCTLR); | 134 | drvdata->base + TRCVIPCSSCTLR); |
| 130 | for (i = 0; i < drvdata->nrseqstate - 1; i++) | 135 | for (i = 0; i < drvdata->nrseqstate - 1; i++) |
| 131 | writel_relaxed(drvdata->seq_ctrl[i], | 136 | writel_relaxed(config->seq_ctrl[i], |
| 132 | drvdata->base + TRCSEQEVRn(i)); | 137 | drvdata->base + TRCSEQEVRn(i)); |
| 133 | writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR); | 138 | writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR); |
| 134 | writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR); | 139 | writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR); |
| 135 | writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR); | 140 | writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR); |
| 136 | for (i = 0; i < drvdata->nr_cntr; i++) { | 141 | for (i = 0; i < drvdata->nr_cntr; i++) { |
| 137 | writel_relaxed(drvdata->cntrldvr[i], | 142 | writel_relaxed(config->cntrldvr[i], |
| 138 | drvdata->base + TRCCNTRLDVRn(i)); | 143 | drvdata->base + TRCCNTRLDVRn(i)); |
| 139 | writel_relaxed(drvdata->cntr_ctrl[i], | 144 | writel_relaxed(config->cntr_ctrl[i], |
| 140 | drvdata->base + TRCCNTCTLRn(i)); | 145 | drvdata->base + TRCCNTCTLRn(i)); |
| 141 | writel_relaxed(drvdata->cntr_val[i], | 146 | writel_relaxed(config->cntr_val[i], |
| 142 | drvdata->base + TRCCNTVRn(i)); | 147 | drvdata->base + TRCCNTVRn(i)); |
| 143 | } | 148 | } |
| 144 | 149 | ||
| 145 | /* Resource selector pair 0 is always implemented and reserved */ | 150 | /* Resource selector pair 0 is always implemented and reserved */ |
| 146 | for (i = 2; i < drvdata->nr_resource * 2; i++) | 151 | for (i = 0; i < drvdata->nr_resource * 2; i++) |
| 147 | writel_relaxed(drvdata->res_ctrl[i], | 152 | writel_relaxed(config->res_ctrl[i], |
| 148 | drvdata->base + TRCRSCTLRn(i)); | 153 | drvdata->base + TRCRSCTLRn(i)); |
| 149 | 154 | ||
| 150 | for (i = 0; i < drvdata->nr_ss_cmp; i++) { | 155 | for (i = 0; i < drvdata->nr_ss_cmp; i++) { |
| 151 | writel_relaxed(drvdata->ss_ctrl[i], | 156 | writel_relaxed(config->ss_ctrl[i], |
| 152 | drvdata->base + TRCSSCCRn(i)); | 157 | drvdata->base + TRCSSCCRn(i)); |
| 153 | writel_relaxed(drvdata->ss_status[i], | 158 | writel_relaxed(config->ss_status[i], |
| 154 | drvdata->base + TRCSSCSRn(i)); | 159 | drvdata->base + TRCSSCSRn(i)); |
| 155 | writel_relaxed(drvdata->ss_pe_cmp[i], | 160 | writel_relaxed(config->ss_pe_cmp[i], |
| 156 | drvdata->base + TRCSSPCICRn(i)); | 161 | drvdata->base + TRCSSPCICRn(i)); |
| 157 | } | 162 | } |
| 158 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | 163 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { |
| 159 | writeq_relaxed(drvdata->addr_val[i], | 164 | writeq_relaxed(config->addr_val[i], |
| 160 | drvdata->base + TRCACVRn(i)); | 165 | drvdata->base + TRCACVRn(i)); |
| 161 | writeq_relaxed(drvdata->addr_acc[i], | 166 | writeq_relaxed(config->addr_acc[i], |
| 162 | drvdata->base + TRCACATRn(i)); | 167 | drvdata->base + TRCACATRn(i)); |
| 163 | } | 168 | } |
| 164 | for (i = 0; i < drvdata->numcidc; i++) | 169 | for (i = 0; i < drvdata->numcidc; i++) |
| 165 | writeq_relaxed(drvdata->ctxid_pid[i], | 170 | writeq_relaxed(config->ctxid_pid[i], |
| 166 | drvdata->base + TRCCIDCVRn(i)); | 171 | drvdata->base + TRCCIDCVRn(i)); |
| 167 | writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); | 172 | writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); |
| 168 | writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); | 173 | writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); |
| 169 | 174 | ||
| 170 | for (i = 0; i < drvdata->numvmidc; i++) | 175 | for (i = 0; i < drvdata->numvmidc; i++) |
| 171 | writeq_relaxed(drvdata->vmid_val[i], | 176 | writeq_relaxed(config->vmid_val[i], |
| 172 | drvdata->base + TRCVMIDCVRn(i)); | 177 | drvdata->base + TRCVMIDCVRn(i)); |
| 173 | writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); | 178 | writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); |
| 174 | writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); | 179 | writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); |
| 175 | 180 | ||
| 176 | /* Enable the trace unit */ | 181 | /* Enable the trace unit */ |
| 177 | writel_relaxed(1, drvdata->base + TRCPRGCTLR); | 182 | writel_relaxed(1, drvdata->base + TRCPRGCTLR); |
| @@ -187,2120 +192,210 @@ static void etm4_enable_hw(void *info) | |||
| 187 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); | 192 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); |
| 188 | } | 193 | } |
| 189 | 194 | ||
| 190 | static int etm4_enable(struct coresight_device *csdev, | 195 | static int etm4_parse_event_config(struct etmv4_drvdata *drvdata, |
| 191 | struct perf_event_attr *attr, u32 mode) | 196 | struct perf_event_attr *attr) |
| 192 | { | 197 | { |
| 193 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 198 | struct etmv4_config *config = &drvdata->config; |
| 194 | int ret; | ||
| 195 | |||
| 196 | spin_lock(&drvdata->spinlock); | ||
| 197 | |||
| 198 | /* | ||
| 199 | * Executing etm4_enable_hw on the cpu whose ETM is being enabled | ||
| 200 | * ensures that register writes occur when cpu is powered. | ||
| 201 | */ | ||
| 202 | ret = smp_call_function_single(drvdata->cpu, | ||
| 203 | etm4_enable_hw, drvdata, 1); | ||
| 204 | if (ret) | ||
| 205 | goto err; | ||
| 206 | drvdata->enable = true; | ||
| 207 | drvdata->sticky_enable = true; | ||
| 208 | |||
| 209 | spin_unlock(&drvdata->spinlock); | ||
| 210 | 199 | ||
| 211 | dev_info(drvdata->dev, "ETM tracing enabled\n"); | 200 | if (!attr) |
| 212 | return 0; | 201 | return -EINVAL; |
| 213 | err: | ||
| 214 | spin_unlock(&drvdata->spinlock); | ||
| 215 | return ret; | ||
| 216 | } | ||
| 217 | 202 | ||
| 218 | static void etm4_disable_hw(void *info) | 203 | /* Clear configuration from previous run */ |
| 219 | { | 204 | memset(config, 0, sizeof(struct etmv4_config)); |
| 220 | u32 control; | ||
| 221 | struct etmv4_drvdata *drvdata = info; | ||
| 222 | 205 | ||
| 223 | CS_UNLOCK(drvdata->base); | 206 | if (attr->exclude_kernel) |
| 207 | config->mode = ETM_MODE_EXCL_KERN; | ||
| 224 | 208 | ||
| 225 | control = readl_relaxed(drvdata->base + TRCPRGCTLR); | 209 | if (attr->exclude_user) |
| 210 | config->mode = ETM_MODE_EXCL_USER; | ||
| 226 | 211 | ||
| 227 | /* EN, bit[0] Trace unit enable bit */ | 212 | /* Always start from the default config */ |
| 228 | control &= ~0x1; | 213 | etm4_set_default(config); |
| 229 | |||
| 230 | /* make sure everything completes before disabling */ | ||
| 231 | mb(); | ||
| 232 | isb(); | ||
| 233 | writel_relaxed(control, drvdata->base + TRCPRGCTLR); | ||
| 234 | |||
| 235 | CS_LOCK(drvdata->base); | ||
| 236 | |||
| 237 | dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); | ||
| 238 | } | ||
| 239 | |||
| 240 | static void etm4_disable(struct coresight_device *csdev) | ||
| 241 | { | ||
| 242 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 243 | 214 | ||
| 244 | /* | 215 | /* |
| 245 | * Taking hotplug lock here protects from clocks getting disabled | 216 | * By default the tracers are configured to trace the whole address |
| 246 | * with tracing being left on (crash scenario) if user disable occurs | 217 | * range. Narrow the field only if requested by user space. |
| 247 | * after cpu online mask indicates the cpu is offline but before the | ||
| 248 | * DYING hotplug callback is serviced by the ETM driver. | ||
| 249 | */ | 218 | */ |
| 250 | get_online_cpus(); | 219 | if (config->mode) |
| 251 | spin_lock(&drvdata->spinlock); | 220 | etm4_config_trace_mode(config); |
| 252 | |||
| 253 | /* | ||
| 254 | * Executing etm4_disable_hw on the cpu whose ETM is being disabled | ||
| 255 | * ensures that register writes occur when cpu is powered. | ||
| 256 | */ | ||
| 257 | smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); | ||
| 258 | drvdata->enable = false; | ||
| 259 | |||
| 260 | spin_unlock(&drvdata->spinlock); | ||
| 261 | put_online_cpus(); | ||
| 262 | |||
| 263 | dev_info(drvdata->dev, "ETM tracing disabled\n"); | ||
| 264 | } | ||
| 265 | |||
| 266 | static const struct coresight_ops_source etm4_source_ops = { | ||
| 267 | .cpu_id = etm4_cpu_id, | ||
| 268 | .trace_id = etm4_trace_id, | ||
| 269 | .enable = etm4_enable, | ||
| 270 | .disable = etm4_disable, | ||
| 271 | }; | ||
| 272 | |||
| 273 | static const struct coresight_ops etm4_cs_ops = { | ||
| 274 | .source_ops = &etm4_source_ops, | ||
| 275 | }; | ||
| 276 | 221 | ||
| 277 | static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) | 222 | /* Go from generic option to ETMv4 specifics */ |
| 278 | { | 223 | if (attr->config & BIT(ETM_OPT_CYCACC)) |
| 279 | u8 idx = drvdata->addr_idx; | 224 | config->cfg |= ETMv4_MODE_CYCACC; |
| 225 | if (attr->config & BIT(ETM_OPT_TS)) | ||
| 226 | config->cfg |= ETMv4_MODE_TIMESTAMP; | ||
| 280 | 227 | ||
| 281 | /* | ||
| 282 | * TRCACATRn.TYPE bit[1:0]: type of comparison | ||
| 283 | * the trace unit performs | ||
| 284 | */ | ||
| 285 | if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { | ||
| 286 | if (idx % 2 != 0) | ||
| 287 | return -EINVAL; | ||
| 288 | |||
| 289 | /* | ||
| 290 | * We are performing instruction address comparison. Set the | ||
| 291 | * relevant bit of ViewInst Include/Exclude Control register | ||
| 292 | * for corresponding address comparator pair. | ||
| 293 | */ | ||
| 294 | if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE || | ||
| 295 | drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) | ||
| 296 | return -EINVAL; | ||
| 297 | |||
| 298 | if (exclude == true) { | ||
| 299 | /* | ||
| 300 | * Set exclude bit and unset the include bit | ||
| 301 | * corresponding to comparator pair | ||
| 302 | */ | ||
| 303 | drvdata->viiectlr |= BIT(idx / 2 + 16); | ||
| 304 | drvdata->viiectlr &= ~BIT(idx / 2); | ||
| 305 | } else { | ||
| 306 | /* | ||
| 307 | * Set include bit and unset exclude bit | ||
| 308 | * corresponding to comparator pair | ||
| 309 | */ | ||
| 310 | drvdata->viiectlr |= BIT(idx / 2); | ||
| 311 | drvdata->viiectlr &= ~BIT(idx / 2 + 16); | ||
| 312 | } | ||
| 313 | } | ||
| 314 | return 0; | 228 | return 0; |
| 315 | } | 229 | } |
| 316 | 230 | ||
| 317 | static ssize_t nr_pe_cmp_show(struct device *dev, | 231 | static int etm4_enable_perf(struct coresight_device *csdev, |
| 318 | struct device_attribute *attr, | 232 | struct perf_event_attr *attr) |
| 319 | char *buf) | ||
| 320 | { | 233 | { |
| 321 | unsigned long val; | 234 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 322 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 323 | |||
| 324 | val = drvdata->nr_pe_cmp; | ||
| 325 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 326 | } | ||
| 327 | static DEVICE_ATTR_RO(nr_pe_cmp); | ||
| 328 | |||
| 329 | static ssize_t nr_addr_cmp_show(struct device *dev, | ||
| 330 | struct device_attribute *attr, | ||
| 331 | char *buf) | ||
| 332 | { | ||
| 333 | unsigned long val; | ||
| 334 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 335 | |||
| 336 | val = drvdata->nr_addr_cmp; | ||
| 337 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 338 | } | ||
| 339 | static DEVICE_ATTR_RO(nr_addr_cmp); | ||
| 340 | |||
| 341 | static ssize_t nr_cntr_show(struct device *dev, | ||
| 342 | struct device_attribute *attr, | ||
| 343 | char *buf) | ||
| 344 | { | ||
| 345 | unsigned long val; | ||
| 346 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 347 | |||
| 348 | val = drvdata->nr_cntr; | ||
| 349 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 350 | } | ||
| 351 | static DEVICE_ATTR_RO(nr_cntr); | ||
| 352 | |||
| 353 | static ssize_t nr_ext_inp_show(struct device *dev, | ||
| 354 | struct device_attribute *attr, | ||
| 355 | char *buf) | ||
| 356 | { | ||
| 357 | unsigned long val; | ||
| 358 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 359 | |||
| 360 | val = drvdata->nr_ext_inp; | ||
| 361 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 362 | } | ||
| 363 | static DEVICE_ATTR_RO(nr_ext_inp); | ||
| 364 | |||
| 365 | static ssize_t numcidc_show(struct device *dev, | ||
| 366 | struct device_attribute *attr, | ||
| 367 | char *buf) | ||
| 368 | { | ||
| 369 | unsigned long val; | ||
| 370 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 371 | |||
| 372 | val = drvdata->numcidc; | ||
| 373 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 374 | } | ||
| 375 | static DEVICE_ATTR_RO(numcidc); | ||
| 376 | |||
| 377 | static ssize_t numvmidc_show(struct device *dev, | ||
| 378 | struct device_attribute *attr, | ||
| 379 | char *buf) | ||
| 380 | { | ||
| 381 | unsigned long val; | ||
| 382 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 383 | |||
| 384 | val = drvdata->numvmidc; | ||
| 385 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 386 | } | ||
| 387 | static DEVICE_ATTR_RO(numvmidc); | ||
| 388 | |||
| 389 | static ssize_t nrseqstate_show(struct device *dev, | ||
| 390 | struct device_attribute *attr, | ||
| 391 | char *buf) | ||
| 392 | { | ||
| 393 | unsigned long val; | ||
| 394 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 395 | |||
| 396 | val = drvdata->nrseqstate; | ||
| 397 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 398 | } | ||
| 399 | static DEVICE_ATTR_RO(nrseqstate); | ||
| 400 | |||
| 401 | static ssize_t nr_resource_show(struct device *dev, | ||
| 402 | struct device_attribute *attr, | ||
| 403 | char *buf) | ||
| 404 | { | ||
| 405 | unsigned long val; | ||
| 406 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 407 | |||
| 408 | val = drvdata->nr_resource; | ||
| 409 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 410 | } | ||
| 411 | static DEVICE_ATTR_RO(nr_resource); | ||
| 412 | |||
| 413 | static ssize_t nr_ss_cmp_show(struct device *dev, | ||
| 414 | struct device_attribute *attr, | ||
| 415 | char *buf) | ||
| 416 | { | ||
| 417 | unsigned long val; | ||
| 418 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 419 | |||
| 420 | val = drvdata->nr_ss_cmp; | ||
| 421 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 422 | } | ||
| 423 | static DEVICE_ATTR_RO(nr_ss_cmp); | ||
| 424 | |||
| 425 | static ssize_t reset_store(struct device *dev, | ||
| 426 | struct device_attribute *attr, | ||
| 427 | const char *buf, size_t size) | ||
| 428 | { | ||
| 429 | int i; | ||
| 430 | unsigned long val; | ||
| 431 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 432 | 235 | ||
| 433 | if (kstrtoul(buf, 16, &val)) | 236 | if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) |
| 434 | return -EINVAL; | 237 | return -EINVAL; |
| 435 | 238 | ||
| 436 | spin_lock(&drvdata->spinlock); | 239 | /* Configure the tracer based on the session's specifics */ |
| 437 | if (val) | 240 | etm4_parse_event_config(drvdata, attr); |
| 438 | drvdata->mode = 0x0; | 241 | /* And enable it */ |
| 439 | 242 | etm4_enable_hw(drvdata); | |
| 440 | /* Disable data tracing: do not trace load and store data transfers */ | ||
| 441 | drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); | ||
| 442 | drvdata->cfg &= ~(BIT(1) | BIT(2)); | ||
| 443 | |||
| 444 | /* Disable data value and data address tracing */ | ||
| 445 | drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | | ||
| 446 | ETM_MODE_DATA_TRACE_VAL); | ||
| 447 | drvdata->cfg &= ~(BIT(16) | BIT(17)); | ||
| 448 | |||
| 449 | /* Disable all events tracing */ | ||
| 450 | drvdata->eventctrl0 = 0x0; | ||
| 451 | drvdata->eventctrl1 = 0x0; | ||
| 452 | |||
| 453 | /* Disable timestamp event */ | ||
| 454 | drvdata->ts_ctrl = 0x0; | ||
| 455 | |||
| 456 | /* Disable stalling */ | ||
| 457 | drvdata->stall_ctrl = 0x0; | ||
| 458 | |||
| 459 | /* Reset trace synchronization period to 2^8 = 256 bytes*/ | ||
| 460 | if (drvdata->syncpr == false) | ||
| 461 | drvdata->syncfreq = 0x8; | ||
| 462 | |||
| 463 | /* | ||
| 464 | * Enable ViewInst to trace everything with start-stop logic in | ||
| 465 | * started state. ARM recommends start-stop logic is set before | ||
| 466 | * each trace run. | ||
| 467 | */ | ||
| 468 | drvdata->vinst_ctrl |= BIT(0); | ||
| 469 | if (drvdata->nr_addr_cmp == true) { | ||
| 470 | drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP; | ||
| 471 | /* SSSTATUS, bit[9] */ | ||
| 472 | drvdata->vinst_ctrl |= BIT(9); | ||
| 473 | } | ||
| 474 | |||
| 475 | /* No address range filtering for ViewInst */ | ||
| 476 | drvdata->viiectlr = 0x0; | ||
| 477 | |||
| 478 | /* No start-stop filtering for ViewInst */ | ||
| 479 | drvdata->vissctlr = 0x0; | ||
| 480 | |||
| 481 | /* Disable seq events */ | ||
| 482 | for (i = 0; i < drvdata->nrseqstate-1; i++) | ||
| 483 | drvdata->seq_ctrl[i] = 0x0; | ||
| 484 | drvdata->seq_rst = 0x0; | ||
| 485 | drvdata->seq_state = 0x0; | ||
| 486 | |||
| 487 | /* Disable external input events */ | ||
| 488 | drvdata->ext_inp = 0x0; | ||
| 489 | |||
| 490 | drvdata->cntr_idx = 0x0; | ||
| 491 | for (i = 0; i < drvdata->nr_cntr; i++) { | ||
| 492 | drvdata->cntrldvr[i] = 0x0; | ||
| 493 | drvdata->cntr_ctrl[i] = 0x0; | ||
| 494 | drvdata->cntr_val[i] = 0x0; | ||
| 495 | } | ||
| 496 | |||
| 497 | /* Resource selector pair 0 is always implemented and reserved */ | ||
| 498 | drvdata->res_idx = 0x2; | ||
| 499 | for (i = 2; i < drvdata->nr_resource * 2; i++) | ||
| 500 | drvdata->res_ctrl[i] = 0x0; | ||
| 501 | |||
| 502 | for (i = 0; i < drvdata->nr_ss_cmp; i++) { | ||
| 503 | drvdata->ss_ctrl[i] = 0x0; | ||
| 504 | drvdata->ss_pe_cmp[i] = 0x0; | ||
| 505 | } | ||
| 506 | |||
| 507 | drvdata->addr_idx = 0x0; | ||
| 508 | for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { | ||
| 509 | drvdata->addr_val[i] = 0x0; | ||
| 510 | drvdata->addr_acc[i] = 0x0; | ||
| 511 | drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; | ||
| 512 | } | ||
| 513 | |||
| 514 | drvdata->ctxid_idx = 0x0; | ||
| 515 | for (i = 0; i < drvdata->numcidc; i++) { | ||
| 516 | drvdata->ctxid_pid[i] = 0x0; | ||
| 517 | drvdata->ctxid_vpid[i] = 0x0; | ||
| 518 | } | ||
| 519 | |||
| 520 | drvdata->ctxid_mask0 = 0x0; | ||
| 521 | drvdata->ctxid_mask1 = 0x0; | ||
| 522 | |||
| 523 | drvdata->vmid_idx = 0x0; | ||
| 524 | for (i = 0; i < drvdata->numvmidc; i++) | ||
| 525 | drvdata->vmid_val[i] = 0x0; | ||
| 526 | drvdata->vmid_mask0 = 0x0; | ||
| 527 | drvdata->vmid_mask1 = 0x0; | ||
| 528 | |||
| 529 | drvdata->trcid = drvdata->cpu + 1; | ||
| 530 | spin_unlock(&drvdata->spinlock); | ||
| 531 | return size; | ||
| 532 | } | ||
| 533 | static DEVICE_ATTR_WO(reset); | ||
| 534 | 243 | ||
| 535 | static ssize_t mode_show(struct device *dev, | 244 | return 0; |
| 536 | struct device_attribute *attr, | ||
| 537 | char *buf) | ||
| 538 | { | ||
| 539 | unsigned long val; | ||
| 540 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 541 | |||
| 542 | val = drvdata->mode; | ||
| 543 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 544 | } | 245 | } |
| 545 | 246 | ||
| 546 | static ssize_t mode_store(struct device *dev, | 247 | static int etm4_enable_sysfs(struct coresight_device *csdev) |
| 547 | struct device_attribute *attr, | ||
| 548 | const char *buf, size_t size) | ||
| 549 | { | 248 | { |
| 550 | unsigned long val, mode; | 249 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 551 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | 250 | int ret; |
| 552 | |||
| 553 | if (kstrtoul(buf, 16, &val)) | ||
| 554 | return -EINVAL; | ||
| 555 | 251 | ||
| 556 | spin_lock(&drvdata->spinlock); | 252 | spin_lock(&drvdata->spinlock); |
| 557 | drvdata->mode = val & ETMv4_MODE_ALL; | ||
| 558 | |||
| 559 | if (drvdata->mode & ETM_MODE_EXCLUDE) | ||
| 560 | etm4_set_mode_exclude(drvdata, true); | ||
| 561 | else | ||
| 562 | etm4_set_mode_exclude(drvdata, false); | ||
| 563 | |||
| 564 | if (drvdata->instrp0 == true) { | ||
| 565 | /* start by clearing instruction P0 field */ | ||
| 566 | drvdata->cfg &= ~(BIT(1) | BIT(2)); | ||
| 567 | if (drvdata->mode & ETM_MODE_LOAD) | ||
| 568 | /* 0b01 Trace load instructions as P0 instructions */ | ||
| 569 | drvdata->cfg |= BIT(1); | ||
| 570 | if (drvdata->mode & ETM_MODE_STORE) | ||
| 571 | /* 0b10 Trace store instructions as P0 instructions */ | ||
| 572 | drvdata->cfg |= BIT(2); | ||
| 573 | if (drvdata->mode & ETM_MODE_LOAD_STORE) | ||
| 574 | /* | ||
| 575 | * 0b11 Trace load and store instructions | ||
| 576 | * as P0 instructions | ||
| 577 | */ | ||
| 578 | drvdata->cfg |= BIT(1) | BIT(2); | ||
| 579 | } | ||
| 580 | |||
| 581 | /* bit[3], Branch broadcast mode */ | ||
| 582 | if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) | ||
| 583 | drvdata->cfg |= BIT(3); | ||
| 584 | else | ||
| 585 | drvdata->cfg &= ~BIT(3); | ||
| 586 | |||
| 587 | /* bit[4], Cycle counting instruction trace bit */ | ||
| 588 | if ((drvdata->mode & ETMv4_MODE_CYCACC) && | ||
| 589 | (drvdata->trccci == true)) | ||
| 590 | drvdata->cfg |= BIT(4); | ||
| 591 | else | ||
| 592 | drvdata->cfg &= ~BIT(4); | ||
| 593 | |||
| 594 | /* bit[6], Context ID tracing bit */ | ||
| 595 | if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) | ||
| 596 | drvdata->cfg |= BIT(6); | ||
| 597 | else | ||
| 598 | drvdata->cfg &= ~BIT(6); | ||
| 599 | |||
| 600 | if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) | ||
| 601 | drvdata->cfg |= BIT(7); | ||
| 602 | else | ||
| 603 | drvdata->cfg &= ~BIT(7); | ||
| 604 | |||
| 605 | /* bits[10:8], Conditional instruction tracing bit */ | ||
| 606 | mode = ETM_MODE_COND(drvdata->mode); | ||
| 607 | if (drvdata->trccond == true) { | ||
| 608 | drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); | ||
| 609 | drvdata->cfg |= mode << 8; | ||
| 610 | } | ||
| 611 | |||
| 612 | /* bit[11], Global timestamp tracing bit */ | ||
| 613 | if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) | ||
| 614 | drvdata->cfg |= BIT(11); | ||
| 615 | else | ||
| 616 | drvdata->cfg &= ~BIT(11); | ||
| 617 | 253 | ||
| 618 | /* bit[12], Return stack enable bit */ | ||
| 619 | if ((drvdata->mode & ETM_MODE_RETURNSTACK) && | ||
| 620 | (drvdata->retstack == true)) | ||
| 621 | drvdata->cfg |= BIT(12); | ||
| 622 | else | ||
| 623 | drvdata->cfg &= ~BIT(12); | ||
| 624 | |||
| 625 | /* bits[14:13], Q element enable field */ | ||
| 626 | mode = ETM_MODE_QELEM(drvdata->mode); | ||
| 627 | /* start by clearing QE bits */ | ||
| 628 | drvdata->cfg &= ~(BIT(13) | BIT(14)); | ||
| 629 | /* if supported, Q elements with instruction counts are enabled */ | ||
| 630 | if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) | ||
| 631 | drvdata->cfg |= BIT(13); | ||
| 632 | /* | 254 | /* |
| 633 | * if supported, Q elements with and without instruction | 255 | * Executing etm4_enable_hw on the cpu whose ETM is being enabled |
| 634 | * counts are enabled | 256 | * ensures that register writes occur when cpu is powered. |
| 635 | */ | 257 | */ |
| 636 | if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) | 258 | ret = smp_call_function_single(drvdata->cpu, |
| 637 | drvdata->cfg |= BIT(14); | 259 | etm4_enable_hw, drvdata, 1); |
| 638 | 260 | if (ret) | |
| 639 | /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ | 261 | goto err; |
| 640 | if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) && | ||
| 641 | (drvdata->atbtrig == true)) | ||
| 642 | drvdata->eventctrl1 |= BIT(11); | ||
| 643 | else | ||
| 644 | drvdata->eventctrl1 &= ~BIT(11); | ||
| 645 | |||
| 646 | /* bit[12], Low-power state behavior override bit */ | ||
| 647 | if ((drvdata->mode & ETM_MODE_LPOVERRIDE) && | ||
| 648 | (drvdata->lpoverride == true)) | ||
| 649 | drvdata->eventctrl1 |= BIT(12); | ||
| 650 | else | ||
| 651 | drvdata->eventctrl1 &= ~BIT(12); | ||
| 652 | |||
| 653 | /* bit[8], Instruction stall bit */ | ||
| 654 | if (drvdata->mode & ETM_MODE_ISTALL_EN) | ||
| 655 | drvdata->stall_ctrl |= BIT(8); | ||
| 656 | else | ||
| 657 | drvdata->stall_ctrl &= ~BIT(8); | ||
| 658 | |||
| 659 | /* bit[10], Prioritize instruction trace bit */ | ||
| 660 | if (drvdata->mode & ETM_MODE_INSTPRIO) | ||
| 661 | drvdata->stall_ctrl |= BIT(10); | ||
| 662 | else | ||
| 663 | drvdata->stall_ctrl &= ~BIT(10); | ||
| 664 | |||
| 665 | /* bit[13], Trace overflow prevention bit */ | ||
| 666 | if ((drvdata->mode & ETM_MODE_NOOVERFLOW) && | ||
| 667 | (drvdata->nooverflow == true)) | ||
| 668 | drvdata->stall_ctrl |= BIT(13); | ||
| 669 | else | ||
| 670 | drvdata->stall_ctrl &= ~BIT(13); | ||
| 671 | |||
| 672 | /* bit[9] Start/stop logic control bit */ | ||
| 673 | if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP) | ||
| 674 | drvdata->vinst_ctrl |= BIT(9); | ||
| 675 | else | ||
| 676 | drvdata->vinst_ctrl &= ~BIT(9); | ||
| 677 | |||
| 678 | /* bit[10], Whether a trace unit must trace a Reset exception */ | ||
| 679 | if (drvdata->mode & ETM_MODE_TRACE_RESET) | ||
| 680 | drvdata->vinst_ctrl |= BIT(10); | ||
| 681 | else | ||
| 682 | drvdata->vinst_ctrl &= ~BIT(10); | ||
| 683 | |||
| 684 | /* bit[11], Whether a trace unit must trace a system error exception */ | ||
| 685 | if ((drvdata->mode & ETM_MODE_TRACE_ERR) && | ||
| 686 | (drvdata->trc_error == true)) | ||
| 687 | drvdata->vinst_ctrl |= BIT(11); | ||
| 688 | else | ||
| 689 | drvdata->vinst_ctrl &= ~BIT(11); | ||
| 690 | |||
| 691 | spin_unlock(&drvdata->spinlock); | ||
| 692 | return size; | ||
| 693 | } | ||
| 694 | static DEVICE_ATTR_RW(mode); | ||
| 695 | |||
| 696 | static ssize_t pe_show(struct device *dev, | ||
| 697 | struct device_attribute *attr, | ||
| 698 | char *buf) | ||
| 699 | { | ||
| 700 | unsigned long val; | ||
| 701 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 702 | |||
| 703 | val = drvdata->pe_sel; | ||
| 704 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 705 | } | ||
| 706 | |||
| 707 | static ssize_t pe_store(struct device *dev, | ||
| 708 | struct device_attribute *attr, | ||
| 709 | const char *buf, size_t size) | ||
| 710 | { | ||
| 711 | unsigned long val; | ||
| 712 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 713 | |||
| 714 | if (kstrtoul(buf, 16, &val)) | ||
| 715 | return -EINVAL; | ||
| 716 | |||
| 717 | spin_lock(&drvdata->spinlock); | ||
| 718 | if (val > drvdata->nr_pe) { | ||
| 719 | spin_unlock(&drvdata->spinlock); | ||
| 720 | return -EINVAL; | ||
| 721 | } | ||
| 722 | 262 | ||
| 723 | drvdata->pe_sel = val; | 263 | drvdata->sticky_enable = true; |
| 724 | spin_unlock(&drvdata->spinlock); | 264 | spin_unlock(&drvdata->spinlock); |
| 725 | return size; | ||
| 726 | } | ||
| 727 | static DEVICE_ATTR_RW(pe); | ||
| 728 | |||
| 729 | static ssize_t event_show(struct device *dev, | ||
| 730 | struct device_attribute *attr, | ||
| 731 | char *buf) | ||
| 732 | { | ||
| 733 | unsigned long val; | ||
| 734 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 735 | |||
| 736 | val = drvdata->eventctrl0; | ||
| 737 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 738 | } | ||
| 739 | 265 | ||
| 740 | static ssize_t event_store(struct device *dev, | 266 | dev_info(drvdata->dev, "ETM tracing enabled\n"); |
| 741 | struct device_attribute *attr, | 267 | return 0; |
| 742 | const char *buf, size_t size) | ||
| 743 | { | ||
| 744 | unsigned long val; | ||
| 745 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 746 | |||
| 747 | if (kstrtoul(buf, 16, &val)) | ||
| 748 | return -EINVAL; | ||
| 749 | 268 | ||
| 750 | spin_lock(&drvdata->spinlock); | 269 | err: |
| 751 | switch (drvdata->nr_event) { | ||
| 752 | case 0x0: | ||
| 753 | /* EVENT0, bits[7:0] */ | ||
| 754 | drvdata->eventctrl0 = val & 0xFF; | ||
| 755 | break; | ||
| 756 | case 0x1: | ||
| 757 | /* EVENT1, bits[15:8] */ | ||
| 758 | drvdata->eventctrl0 = val & 0xFFFF; | ||
| 759 | break; | ||
| 760 | case 0x2: | ||
| 761 | /* EVENT2, bits[23:16] */ | ||
| 762 | drvdata->eventctrl0 = val & 0xFFFFFF; | ||
| 763 | break; | ||
| 764 | case 0x3: | ||
| 765 | /* EVENT3, bits[31:24] */ | ||
| 766 | drvdata->eventctrl0 = val; | ||
| 767 | break; | ||
| 768 | default: | ||
| 769 | break; | ||
| 770 | } | ||
| 771 | spin_unlock(&drvdata->spinlock); | 270 | spin_unlock(&drvdata->spinlock); |
| 772 | return size; | 271 | return ret; |
| 773 | } | 272 | } |
| 774 | static DEVICE_ATTR_RW(event); | ||
| 775 | 273 | ||
| 776 | static ssize_t event_instren_show(struct device *dev, | 274 | static int etm4_enable(struct coresight_device *csdev, |
| 777 | struct device_attribute *attr, | 275 | struct perf_event_attr *attr, u32 mode) |
| 778 | char *buf) | ||
| 779 | { | 276 | { |
| 780 | unsigned long val; | 277 | int ret; |
| 781 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | 278 | u32 val; |
| 782 | 279 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | |
| 783 | val = BMVAL(drvdata->eventctrl1, 0, 3); | ||
| 784 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 785 | } | ||
| 786 | 280 | ||
| 787 | static ssize_t event_instren_store(struct device *dev, | 281 | val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); |
| 788 | struct device_attribute *attr, | ||
| 789 | const char *buf, size_t size) | ||
| 790 | { | ||
| 791 | unsigned long val; | ||
| 792 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 793 | 282 | ||
| 794 | if (kstrtoul(buf, 16, &val)) | 283 | /* Someone is already using the tracer */ |
| 795 | return -EINVAL; | 284 | if (val) |
| 285 | return -EBUSY; | ||
| 796 | 286 | ||
| 797 | spin_lock(&drvdata->spinlock); | 287 | switch (mode) { |
| 798 | /* start by clearing all instruction event enable bits */ | 288 | case CS_MODE_SYSFS: |
| 799 | drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); | 289 | ret = etm4_enable_sysfs(csdev); |
| 800 | switch (drvdata->nr_event) { | ||
| 801 | case 0x0: | ||
| 802 | /* generate Event element for event 1 */ | ||
| 803 | drvdata->eventctrl1 |= val & BIT(1); | ||
| 804 | break; | 290 | break; |
| 805 | case 0x1: | 291 | case CS_MODE_PERF: |
| 806 | /* generate Event element for event 1 and 2 */ | 292 | ret = etm4_enable_perf(csdev, attr); |
| 807 | drvdata->eventctrl1 |= val & (BIT(0) | BIT(1)); | ||
| 808 | break; | ||
| 809 | case 0x2: | ||
| 810 | /* generate Event element for event 1, 2 and 3 */ | ||
| 811 | drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); | ||
| 812 | break; | ||
| 813 | case 0x3: | ||
| 814 | /* generate Event element for all 4 events */ | ||
| 815 | drvdata->eventctrl1 |= val & 0xF; | ||
| 816 | break; | 293 | break; |
| 817 | default: | 294 | default: |
| 818 | break; | 295 | ret = -EINVAL; |
| 819 | } | ||
| 820 | spin_unlock(&drvdata->spinlock); | ||
| 821 | return size; | ||
| 822 | } | ||
| 823 | static DEVICE_ATTR_RW(event_instren); | ||
| 824 | |||
| 825 | static ssize_t event_ts_show(struct device *dev, | ||
| 826 | struct device_attribute *attr, | ||
| 827 | char *buf) | ||
| 828 | { | ||
| 829 | unsigned long val; | ||
| 830 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 831 | |||
| 832 | val = drvdata->ts_ctrl; | ||
| 833 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 834 | } | ||
| 835 | |||
| 836 | static ssize_t event_ts_store(struct device *dev, | ||
| 837 | struct device_attribute *attr, | ||
| 838 | const char *buf, size_t size) | ||
| 839 | { | ||
| 840 | unsigned long val; | ||
| 841 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 842 | |||
| 843 | if (kstrtoul(buf, 16, &val)) | ||
| 844 | return -EINVAL; | ||
| 845 | if (!drvdata->ts_size) | ||
| 846 | return -EINVAL; | ||
| 847 | |||
| 848 | drvdata->ts_ctrl = val & ETMv4_EVENT_MASK; | ||
| 849 | return size; | ||
| 850 | } | ||
| 851 | static DEVICE_ATTR_RW(event_ts); | ||
| 852 | |||
| 853 | static ssize_t syncfreq_show(struct device *dev, | ||
| 854 | struct device_attribute *attr, | ||
| 855 | char *buf) | ||
| 856 | { | ||
| 857 | unsigned long val; | ||
| 858 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 859 | |||
| 860 | val = drvdata->syncfreq; | ||
| 861 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 862 | } | ||
| 863 | |||
| 864 | static ssize_t syncfreq_store(struct device *dev, | ||
| 865 | struct device_attribute *attr, | ||
| 866 | const char *buf, size_t size) | ||
| 867 | { | ||
| 868 | unsigned long val; | ||
| 869 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 870 | |||
| 871 | if (kstrtoul(buf, 16, &val)) | ||
| 872 | return -EINVAL; | ||
| 873 | if (drvdata->syncpr == true) | ||
| 874 | return -EINVAL; | ||
| 875 | |||
| 876 | drvdata->syncfreq = val & ETMv4_SYNC_MASK; | ||
| 877 | return size; | ||
| 878 | } | ||
| 879 | static DEVICE_ATTR_RW(syncfreq); | ||
| 880 | |||
| 881 | static ssize_t cyc_threshold_show(struct device *dev, | ||
| 882 | struct device_attribute *attr, | ||
| 883 | char *buf) | ||
| 884 | { | ||
| 885 | unsigned long val; | ||
| 886 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 887 | |||
| 888 | val = drvdata->ccctlr; | ||
| 889 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 890 | } | ||
| 891 | |||
| 892 | static ssize_t cyc_threshold_store(struct device *dev, | ||
| 893 | struct device_attribute *attr, | ||
| 894 | const char *buf, size_t size) | ||
| 895 | { | ||
| 896 | unsigned long val; | ||
| 897 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 898 | |||
| 899 | if (kstrtoul(buf, 16, &val)) | ||
| 900 | return -EINVAL; | ||
| 901 | if (val < drvdata->ccitmin) | ||
| 902 | return -EINVAL; | ||
| 903 | |||
| 904 | drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK; | ||
| 905 | return size; | ||
| 906 | } | ||
| 907 | static DEVICE_ATTR_RW(cyc_threshold); | ||
| 908 | |||
| 909 | static ssize_t bb_ctrl_show(struct device *dev, | ||
| 910 | struct device_attribute *attr, | ||
| 911 | char *buf) | ||
| 912 | { | ||
| 913 | unsigned long val; | ||
| 914 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 915 | |||
| 916 | val = drvdata->bb_ctrl; | ||
| 917 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 918 | } | ||
| 919 | |||
| 920 | static ssize_t bb_ctrl_store(struct device *dev, | ||
| 921 | struct device_attribute *attr, | ||
| 922 | const char *buf, size_t size) | ||
| 923 | { | ||
| 924 | unsigned long val; | ||
| 925 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 926 | |||
| 927 | if (kstrtoul(buf, 16, &val)) | ||
| 928 | return -EINVAL; | ||
| 929 | if (drvdata->trcbb == false) | ||
| 930 | return -EINVAL; | ||
| 931 | if (!drvdata->nr_addr_cmp) | ||
| 932 | return -EINVAL; | ||
| 933 | /* | ||
| 934 | * Bit[7:0] selects which address range comparator is used for | ||
| 935 | * branch broadcast control. | ||
| 936 | */ | ||
| 937 | if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) | ||
| 938 | return -EINVAL; | ||
| 939 | |||
| 940 | drvdata->bb_ctrl = val; | ||
| 941 | return size; | ||
| 942 | } | ||
| 943 | static DEVICE_ATTR_RW(bb_ctrl); | ||
| 944 | |||
| 945 | static ssize_t event_vinst_show(struct device *dev, | ||
| 946 | struct device_attribute *attr, | ||
| 947 | char *buf) | ||
| 948 | { | ||
| 949 | unsigned long val; | ||
| 950 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 951 | |||
| 952 | val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK; | ||
| 953 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 954 | } | ||
| 955 | |||
| 956 | static ssize_t event_vinst_store(struct device *dev, | ||
| 957 | struct device_attribute *attr, | ||
| 958 | const char *buf, size_t size) | ||
| 959 | { | ||
| 960 | unsigned long val; | ||
| 961 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 962 | |||
| 963 | if (kstrtoul(buf, 16, &val)) | ||
| 964 | return -EINVAL; | ||
| 965 | |||
| 966 | spin_lock(&drvdata->spinlock); | ||
| 967 | val &= ETMv4_EVENT_MASK; | ||
| 968 | drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK; | ||
| 969 | drvdata->vinst_ctrl |= val; | ||
| 970 | spin_unlock(&drvdata->spinlock); | ||
| 971 | return size; | ||
| 972 | } | ||
| 973 | static DEVICE_ATTR_RW(event_vinst); | ||
| 974 | |||
| 975 | static ssize_t s_exlevel_vinst_show(struct device *dev, | ||
| 976 | struct device_attribute *attr, | ||
| 977 | char *buf) | ||
| 978 | { | ||
| 979 | unsigned long val; | ||
| 980 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 981 | |||
| 982 | val = BMVAL(drvdata->vinst_ctrl, 16, 19); | ||
| 983 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 984 | } | ||
| 985 | |||
| 986 | static ssize_t s_exlevel_vinst_store(struct device *dev, | ||
| 987 | struct device_attribute *attr, | ||
| 988 | const char *buf, size_t size) | ||
| 989 | { | ||
| 990 | unsigned long val; | ||
| 991 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 992 | |||
| 993 | if (kstrtoul(buf, 16, &val)) | ||
| 994 | return -EINVAL; | ||
| 995 | |||
| 996 | spin_lock(&drvdata->spinlock); | ||
| 997 | /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ | ||
| 998 | drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); | ||
| 999 | /* enable instruction tracing for corresponding exception level */ | ||
| 1000 | val &= drvdata->s_ex_level; | ||
| 1001 | drvdata->vinst_ctrl |= (val << 16); | ||
| 1002 | spin_unlock(&drvdata->spinlock); | ||
| 1003 | return size; | ||
| 1004 | } | ||
| 1005 | static DEVICE_ATTR_RW(s_exlevel_vinst); | ||
| 1006 | |||
| 1007 | static ssize_t ns_exlevel_vinst_show(struct device *dev, | ||
| 1008 | struct device_attribute *attr, | ||
| 1009 | char *buf) | ||
| 1010 | { | ||
| 1011 | unsigned long val; | ||
| 1012 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1013 | |||
| 1014 | /* EXLEVEL_NS, bits[23:20] */ | ||
| 1015 | val = BMVAL(drvdata->vinst_ctrl, 20, 23); | ||
| 1016 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | static ssize_t ns_exlevel_vinst_store(struct device *dev, | ||
| 1020 | struct device_attribute *attr, | ||
| 1021 | const char *buf, size_t size) | ||
| 1022 | { | ||
| 1023 | unsigned long val; | ||
| 1024 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1025 | |||
| 1026 | if (kstrtoul(buf, 16, &val)) | ||
| 1027 | return -EINVAL; | ||
| 1028 | |||
| 1029 | spin_lock(&drvdata->spinlock); | ||
| 1030 | /* clear EXLEVEL_NS bits (bit[23] is never implemented */ | ||
| 1031 | drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); | ||
| 1032 | /* enable instruction tracing for corresponding exception level */ | ||
| 1033 | val &= drvdata->ns_ex_level; | ||
| 1034 | drvdata->vinst_ctrl |= (val << 20); | ||
| 1035 | spin_unlock(&drvdata->spinlock); | ||
| 1036 | return size; | ||
| 1037 | } | ||
| 1038 | static DEVICE_ATTR_RW(ns_exlevel_vinst); | ||
| 1039 | |||
| 1040 | static ssize_t addr_idx_show(struct device *dev, | ||
| 1041 | struct device_attribute *attr, | ||
| 1042 | char *buf) | ||
| 1043 | { | ||
| 1044 | unsigned long val; | ||
| 1045 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1046 | |||
| 1047 | val = drvdata->addr_idx; | ||
| 1048 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | static ssize_t addr_idx_store(struct device *dev, | ||
| 1052 | struct device_attribute *attr, | ||
| 1053 | const char *buf, size_t size) | ||
| 1054 | { | ||
| 1055 | unsigned long val; | ||
| 1056 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1057 | |||
| 1058 | if (kstrtoul(buf, 16, &val)) | ||
| 1059 | return -EINVAL; | ||
| 1060 | if (val >= drvdata->nr_addr_cmp * 2) | ||
| 1061 | return -EINVAL; | ||
| 1062 | |||
| 1063 | /* | ||
| 1064 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1065 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1066 | */ | ||
| 1067 | spin_lock(&drvdata->spinlock); | ||
| 1068 | drvdata->addr_idx = val; | ||
| 1069 | spin_unlock(&drvdata->spinlock); | ||
| 1070 | return size; | ||
| 1071 | } | ||
| 1072 | static DEVICE_ATTR_RW(addr_idx); | ||
| 1073 | |||
| 1074 | static ssize_t addr_instdatatype_show(struct device *dev, | ||
| 1075 | struct device_attribute *attr, | ||
| 1076 | char *buf) | ||
| 1077 | { | ||
| 1078 | ssize_t len; | ||
| 1079 | u8 val, idx; | ||
| 1080 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1081 | |||
| 1082 | spin_lock(&drvdata->spinlock); | ||
| 1083 | idx = drvdata->addr_idx; | ||
| 1084 | val = BMVAL(drvdata->addr_acc[idx], 0, 1); | ||
| 1085 | len = scnprintf(buf, PAGE_SIZE, "%s\n", | ||
| 1086 | val == ETM_INSTR_ADDR ? "instr" : | ||
| 1087 | (val == ETM_DATA_LOAD_ADDR ? "data_load" : | ||
| 1088 | (val == ETM_DATA_STORE_ADDR ? "data_store" : | ||
| 1089 | "data_load_store"))); | ||
| 1090 | spin_unlock(&drvdata->spinlock); | ||
| 1091 | return len; | ||
| 1092 | } | ||
| 1093 | |||
| 1094 | static ssize_t addr_instdatatype_store(struct device *dev, | ||
| 1095 | struct device_attribute *attr, | ||
| 1096 | const char *buf, size_t size) | ||
| 1097 | { | ||
| 1098 | u8 idx; | ||
| 1099 | char str[20] = ""; | ||
| 1100 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1101 | |||
| 1102 | if (strlen(buf) >= 20) | ||
| 1103 | return -EINVAL; | ||
| 1104 | if (sscanf(buf, "%s", str) != 1) | ||
| 1105 | return -EINVAL; | ||
| 1106 | |||
| 1107 | spin_lock(&drvdata->spinlock); | ||
| 1108 | idx = drvdata->addr_idx; | ||
| 1109 | if (!strcmp(str, "instr")) | ||
| 1110 | /* TYPE, bits[1:0] */ | ||
| 1111 | drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1)); | ||
| 1112 | |||
| 1113 | spin_unlock(&drvdata->spinlock); | ||
| 1114 | return size; | ||
| 1115 | } | ||
| 1116 | static DEVICE_ATTR_RW(addr_instdatatype); | ||
| 1117 | |||
| 1118 | static ssize_t addr_single_show(struct device *dev, | ||
| 1119 | struct device_attribute *attr, | ||
| 1120 | char *buf) | ||
| 1121 | { | ||
| 1122 | u8 idx; | ||
| 1123 | unsigned long val; | ||
| 1124 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1125 | |||
| 1126 | idx = drvdata->addr_idx; | ||
| 1127 | spin_lock(&drvdata->spinlock); | ||
| 1128 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1129 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
| 1130 | spin_unlock(&drvdata->spinlock); | ||
| 1131 | return -EPERM; | ||
| 1132 | } | ||
| 1133 | val = (unsigned long)drvdata->addr_val[idx]; | ||
| 1134 | spin_unlock(&drvdata->spinlock); | ||
| 1135 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | static ssize_t addr_single_store(struct device *dev, | ||
| 1139 | struct device_attribute *attr, | ||
| 1140 | const char *buf, size_t size) | ||
| 1141 | { | ||
| 1142 | u8 idx; | ||
| 1143 | unsigned long val; | ||
| 1144 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1145 | |||
| 1146 | if (kstrtoul(buf, 16, &val)) | ||
| 1147 | return -EINVAL; | ||
| 1148 | |||
| 1149 | spin_lock(&drvdata->spinlock); | ||
| 1150 | idx = drvdata->addr_idx; | ||
| 1151 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1152 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | ||
| 1153 | spin_unlock(&drvdata->spinlock); | ||
| 1154 | return -EPERM; | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | drvdata->addr_val[idx] = (u64)val; | ||
| 1158 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; | ||
| 1159 | spin_unlock(&drvdata->spinlock); | ||
| 1160 | return size; | ||
| 1161 | } | ||
| 1162 | static DEVICE_ATTR_RW(addr_single); | ||
| 1163 | |||
| 1164 | static ssize_t addr_range_show(struct device *dev, | ||
| 1165 | struct device_attribute *attr, | ||
| 1166 | char *buf) | ||
| 1167 | { | ||
| 1168 | u8 idx; | ||
| 1169 | unsigned long val1, val2; | ||
| 1170 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1171 | |||
| 1172 | spin_lock(&drvdata->spinlock); | ||
| 1173 | idx = drvdata->addr_idx; | ||
| 1174 | if (idx % 2 != 0) { | ||
| 1175 | spin_unlock(&drvdata->spinlock); | ||
| 1176 | return -EPERM; | ||
| 1177 | } | ||
| 1178 | if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
| 1179 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
| 1180 | (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
| 1181 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
| 1182 | spin_unlock(&drvdata->spinlock); | ||
| 1183 | return -EPERM; | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | val1 = (unsigned long)drvdata->addr_val[idx]; | ||
| 1187 | val2 = (unsigned long)drvdata->addr_val[idx + 1]; | ||
| 1188 | spin_unlock(&drvdata->spinlock); | ||
| 1189 | return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | static ssize_t addr_range_store(struct device *dev, | ||
| 1193 | struct device_attribute *attr, | ||
| 1194 | const char *buf, size_t size) | ||
| 1195 | { | ||
| 1196 | u8 idx; | ||
| 1197 | unsigned long val1, val2; | ||
| 1198 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1199 | |||
| 1200 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
| 1201 | return -EINVAL; | ||
| 1202 | /* lower address comparator cannot have a higher address value */ | ||
| 1203 | if (val1 > val2) | ||
| 1204 | return -EINVAL; | ||
| 1205 | |||
| 1206 | spin_lock(&drvdata->spinlock); | ||
| 1207 | idx = drvdata->addr_idx; | ||
| 1208 | if (idx % 2 != 0) { | ||
| 1209 | spin_unlock(&drvdata->spinlock); | ||
| 1210 | return -EPERM; | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && | ||
| 1214 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | ||
| 1215 | (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | ||
| 1216 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | ||
| 1217 | spin_unlock(&drvdata->spinlock); | ||
| 1218 | return -EPERM; | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | drvdata->addr_val[idx] = (u64)val1; | ||
| 1222 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE; | ||
| 1223 | drvdata->addr_val[idx + 1] = (u64)val2; | ||
| 1224 | drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; | ||
| 1225 | /* | ||
| 1226 | * Program include or exclude control bits for vinst or vdata | ||
| 1227 | * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE | ||
| 1228 | */ | ||
| 1229 | if (drvdata->mode & ETM_MODE_EXCLUDE) | ||
| 1230 | etm4_set_mode_exclude(drvdata, true); | ||
| 1231 | else | ||
| 1232 | etm4_set_mode_exclude(drvdata, false); | ||
| 1233 | |||
| 1234 | spin_unlock(&drvdata->spinlock); | ||
| 1235 | return size; | ||
| 1236 | } | ||
| 1237 | static DEVICE_ATTR_RW(addr_range); | ||
| 1238 | |||
| 1239 | static ssize_t addr_start_show(struct device *dev, | ||
| 1240 | struct device_attribute *attr, | ||
| 1241 | char *buf) | ||
| 1242 | { | ||
| 1243 | u8 idx; | ||
| 1244 | unsigned long val; | ||
| 1245 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1246 | |||
| 1247 | spin_lock(&drvdata->spinlock); | ||
| 1248 | idx = drvdata->addr_idx; | ||
| 1249 | |||
| 1250 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1251 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
| 1252 | spin_unlock(&drvdata->spinlock); | ||
| 1253 | return -EPERM; | ||
| 1254 | } | ||
| 1255 | |||
| 1256 | val = (unsigned long)drvdata->addr_val[idx]; | ||
| 1257 | spin_unlock(&drvdata->spinlock); | ||
| 1258 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1259 | } | ||
| 1260 | |||
| 1261 | static ssize_t addr_start_store(struct device *dev, | ||
| 1262 | struct device_attribute *attr, | ||
| 1263 | const char *buf, size_t size) | ||
| 1264 | { | ||
| 1265 | u8 idx; | ||
| 1266 | unsigned long val; | ||
| 1267 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1268 | |||
| 1269 | if (kstrtoul(buf, 16, &val)) | ||
| 1270 | return -EINVAL; | ||
| 1271 | |||
| 1272 | spin_lock(&drvdata->spinlock); | ||
| 1273 | idx = drvdata->addr_idx; | ||
| 1274 | if (!drvdata->nr_addr_cmp) { | ||
| 1275 | spin_unlock(&drvdata->spinlock); | ||
| 1276 | return -EINVAL; | ||
| 1277 | } | ||
| 1278 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1279 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { | ||
| 1280 | spin_unlock(&drvdata->spinlock); | ||
| 1281 | return -EPERM; | ||
| 1282 | } | ||
| 1283 | |||
| 1284 | drvdata->addr_val[idx] = (u64)val; | ||
| 1285 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_START; | ||
| 1286 | drvdata->vissctlr |= BIT(idx); | ||
| 1287 | /* SSSTATUS, bit[9] - turn on start/stop logic */ | ||
| 1288 | drvdata->vinst_ctrl |= BIT(9); | ||
| 1289 | spin_unlock(&drvdata->spinlock); | ||
| 1290 | return size; | ||
| 1291 | } | ||
| 1292 | static DEVICE_ATTR_RW(addr_start); | ||
| 1293 | |||
| 1294 | static ssize_t addr_stop_show(struct device *dev, | ||
| 1295 | struct device_attribute *attr, | ||
| 1296 | char *buf) | ||
| 1297 | { | ||
| 1298 | u8 idx; | ||
| 1299 | unsigned long val; | ||
| 1300 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1301 | |||
| 1302 | spin_lock(&drvdata->spinlock); | ||
| 1303 | idx = drvdata->addr_idx; | ||
| 1304 | |||
| 1305 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1306 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
| 1307 | spin_unlock(&drvdata->spinlock); | ||
| 1308 | return -EPERM; | ||
| 1309 | } | ||
| 1310 | |||
| 1311 | val = (unsigned long)drvdata->addr_val[idx]; | ||
| 1312 | spin_unlock(&drvdata->spinlock); | ||
| 1313 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1314 | } | ||
| 1315 | |||
| 1316 | static ssize_t addr_stop_store(struct device *dev, | ||
| 1317 | struct device_attribute *attr, | ||
| 1318 | const char *buf, size_t size) | ||
| 1319 | { | ||
| 1320 | u8 idx; | ||
| 1321 | unsigned long val; | ||
| 1322 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1323 | |||
| 1324 | if (kstrtoul(buf, 16, &val)) | ||
| 1325 | return -EINVAL; | ||
| 1326 | |||
| 1327 | spin_lock(&drvdata->spinlock); | ||
| 1328 | idx = drvdata->addr_idx; | ||
| 1329 | if (!drvdata->nr_addr_cmp) { | ||
| 1330 | spin_unlock(&drvdata->spinlock); | ||
| 1331 | return -EINVAL; | ||
| 1332 | } | ||
| 1333 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | ||
| 1334 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | ||
| 1335 | spin_unlock(&drvdata->spinlock); | ||
| 1336 | return -EPERM; | ||
| 1337 | } | ||
| 1338 | |||
| 1339 | drvdata->addr_val[idx] = (u64)val; | ||
| 1340 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP; | ||
| 1341 | drvdata->vissctlr |= BIT(idx + 16); | ||
| 1342 | /* SSSTATUS, bit[9] - turn on start/stop logic */ | ||
| 1343 | drvdata->vinst_ctrl |= BIT(9); | ||
| 1344 | spin_unlock(&drvdata->spinlock); | ||
| 1345 | return size; | ||
| 1346 | } | ||
| 1347 | static DEVICE_ATTR_RW(addr_stop); | ||
| 1348 | |||
| 1349 | static ssize_t addr_ctxtype_show(struct device *dev, | ||
| 1350 | struct device_attribute *attr, | ||
| 1351 | char *buf) | ||
| 1352 | { | ||
| 1353 | ssize_t len; | ||
| 1354 | u8 idx, val; | ||
| 1355 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1356 | |||
| 1357 | spin_lock(&drvdata->spinlock); | ||
| 1358 | idx = drvdata->addr_idx; | ||
| 1359 | /* CONTEXTTYPE, bits[3:2] */ | ||
| 1360 | val = BMVAL(drvdata->addr_acc[idx], 2, 3); | ||
| 1361 | len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : | ||
| 1362 | (val == ETM_CTX_CTXID ? "ctxid" : | ||
| 1363 | (val == ETM_CTX_VMID ? "vmid" : "all"))); | ||
| 1364 | spin_unlock(&drvdata->spinlock); | ||
| 1365 | return len; | ||
| 1366 | } | ||
| 1367 | |||
| 1368 | static ssize_t addr_ctxtype_store(struct device *dev, | ||
| 1369 | struct device_attribute *attr, | ||
| 1370 | const char *buf, size_t size) | ||
| 1371 | { | ||
| 1372 | u8 idx; | ||
| 1373 | char str[10] = ""; | ||
| 1374 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1375 | |||
| 1376 | if (strlen(buf) >= 10) | ||
| 1377 | return -EINVAL; | ||
| 1378 | if (sscanf(buf, "%s", str) != 1) | ||
| 1379 | return -EINVAL; | ||
| 1380 | |||
| 1381 | spin_lock(&drvdata->spinlock); | ||
| 1382 | idx = drvdata->addr_idx; | ||
| 1383 | if (!strcmp(str, "none")) | ||
| 1384 | /* start by clearing context type bits */ | ||
| 1385 | drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3)); | ||
| 1386 | else if (!strcmp(str, "ctxid")) { | ||
| 1387 | /* 0b01 The trace unit performs a Context ID */ | ||
| 1388 | if (drvdata->numcidc) { | ||
| 1389 | drvdata->addr_acc[idx] |= BIT(2); | ||
| 1390 | drvdata->addr_acc[idx] &= ~BIT(3); | ||
| 1391 | } | ||
| 1392 | } else if (!strcmp(str, "vmid")) { | ||
| 1393 | /* 0b10 The trace unit performs a VMID */ | ||
| 1394 | if (drvdata->numvmidc) { | ||
| 1395 | drvdata->addr_acc[idx] &= ~BIT(2); | ||
| 1396 | drvdata->addr_acc[idx] |= BIT(3); | ||
| 1397 | } | ||
| 1398 | } else if (!strcmp(str, "all")) { | ||
| 1399 | /* | ||
| 1400 | * 0b11 The trace unit performs a Context ID | ||
| 1401 | * comparison and a VMID | ||
| 1402 | */ | ||
| 1403 | if (drvdata->numcidc) | ||
| 1404 | drvdata->addr_acc[idx] |= BIT(2); | ||
| 1405 | if (drvdata->numvmidc) | ||
| 1406 | drvdata->addr_acc[idx] |= BIT(3); | ||
| 1407 | } | 296 | } |
| 1408 | spin_unlock(&drvdata->spinlock); | ||
| 1409 | return size; | ||
| 1410 | } | ||
| 1411 | static DEVICE_ATTR_RW(addr_ctxtype); | ||
| 1412 | |||
| 1413 | static ssize_t addr_context_show(struct device *dev, | ||
| 1414 | struct device_attribute *attr, | ||
| 1415 | char *buf) | ||
| 1416 | { | ||
| 1417 | u8 idx; | ||
| 1418 | unsigned long val; | ||
| 1419 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1420 | |||
| 1421 | spin_lock(&drvdata->spinlock); | ||
| 1422 | idx = drvdata->addr_idx; | ||
| 1423 | /* context ID comparator bits[6:4] */ | ||
| 1424 | val = BMVAL(drvdata->addr_acc[idx], 4, 6); | ||
| 1425 | spin_unlock(&drvdata->spinlock); | ||
| 1426 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1427 | } | ||
| 1428 | |||
| 1429 | static ssize_t addr_context_store(struct device *dev, | ||
| 1430 | struct device_attribute *attr, | ||
| 1431 | const char *buf, size_t size) | ||
| 1432 | { | ||
| 1433 | u8 idx; | ||
| 1434 | unsigned long val; | ||
| 1435 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1436 | |||
| 1437 | if (kstrtoul(buf, 16, &val)) | ||
| 1438 | return -EINVAL; | ||
| 1439 | if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1)) | ||
| 1440 | return -EINVAL; | ||
| 1441 | if (val >= (drvdata->numcidc >= drvdata->numvmidc ? | ||
| 1442 | drvdata->numcidc : drvdata->numvmidc)) | ||
| 1443 | return -EINVAL; | ||
| 1444 | |||
| 1445 | spin_lock(&drvdata->spinlock); | ||
| 1446 | idx = drvdata->addr_idx; | ||
| 1447 | /* clear context ID comparator bits[6:4] */ | ||
| 1448 | drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); | ||
| 1449 | drvdata->addr_acc[idx] |= (val << 4); | ||
| 1450 | spin_unlock(&drvdata->spinlock); | ||
| 1451 | return size; | ||
| 1452 | } | ||
| 1453 | static DEVICE_ATTR_RW(addr_context); | ||
| 1454 | |||
| 1455 | static ssize_t seq_idx_show(struct device *dev, | ||
| 1456 | struct device_attribute *attr, | ||
| 1457 | char *buf) | ||
| 1458 | { | ||
| 1459 | unsigned long val; | ||
| 1460 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1461 | |||
| 1462 | val = drvdata->seq_idx; | ||
| 1463 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1464 | } | ||
| 1465 | |||
| 1466 | static ssize_t seq_idx_store(struct device *dev, | ||
| 1467 | struct device_attribute *attr, | ||
| 1468 | const char *buf, size_t size) | ||
| 1469 | { | ||
| 1470 | unsigned long val; | ||
| 1471 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1472 | |||
| 1473 | if (kstrtoul(buf, 16, &val)) | ||
| 1474 | return -EINVAL; | ||
| 1475 | if (val >= drvdata->nrseqstate - 1) | ||
| 1476 | return -EINVAL; | ||
| 1477 | |||
| 1478 | /* | ||
| 1479 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1480 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1481 | */ | ||
| 1482 | spin_lock(&drvdata->spinlock); | ||
| 1483 | drvdata->seq_idx = val; | ||
| 1484 | spin_unlock(&drvdata->spinlock); | ||
| 1485 | return size; | ||
| 1486 | } | ||
| 1487 | static DEVICE_ATTR_RW(seq_idx); | ||
| 1488 | |||
| 1489 | static ssize_t seq_state_show(struct device *dev, | ||
| 1490 | struct device_attribute *attr, | ||
| 1491 | char *buf) | ||
| 1492 | { | ||
| 1493 | unsigned long val; | ||
| 1494 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1495 | 297 | ||
| 1496 | val = drvdata->seq_state; | 298 | /* The tracer didn't start */ |
| 1497 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | 299 | if (ret) |
| 1498 | } | 300 | local_set(&drvdata->mode, CS_MODE_DISABLED); |
| 1499 | |||
| 1500 | static ssize_t seq_state_store(struct device *dev, | ||
| 1501 | struct device_attribute *attr, | ||
| 1502 | const char *buf, size_t size) | ||
| 1503 | { | ||
| 1504 | unsigned long val; | ||
| 1505 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1506 | |||
| 1507 | if (kstrtoul(buf, 16, &val)) | ||
| 1508 | return -EINVAL; | ||
| 1509 | if (val >= drvdata->nrseqstate) | ||
| 1510 | return -EINVAL; | ||
| 1511 | |||
| 1512 | drvdata->seq_state = val; | ||
| 1513 | return size; | ||
| 1514 | } | ||
| 1515 | static DEVICE_ATTR_RW(seq_state); | ||
| 1516 | |||
| 1517 | static ssize_t seq_event_show(struct device *dev, | ||
| 1518 | struct device_attribute *attr, | ||
| 1519 | char *buf) | ||
| 1520 | { | ||
| 1521 | u8 idx; | ||
| 1522 | unsigned long val; | ||
| 1523 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1524 | |||
| 1525 | spin_lock(&drvdata->spinlock); | ||
| 1526 | idx = drvdata->seq_idx; | ||
| 1527 | val = drvdata->seq_ctrl[idx]; | ||
| 1528 | spin_unlock(&drvdata->spinlock); | ||
| 1529 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1530 | } | ||
| 1531 | |||
| 1532 | static ssize_t seq_event_store(struct device *dev, | ||
| 1533 | struct device_attribute *attr, | ||
| 1534 | const char *buf, size_t size) | ||
| 1535 | { | ||
| 1536 | u8 idx; | ||
| 1537 | unsigned long val; | ||
| 1538 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1539 | |||
| 1540 | if (kstrtoul(buf, 16, &val)) | ||
| 1541 | return -EINVAL; | ||
| 1542 | |||
| 1543 | spin_lock(&drvdata->spinlock); | ||
| 1544 | idx = drvdata->seq_idx; | ||
| 1545 | /* RST, bits[7:0] */ | ||
| 1546 | drvdata->seq_ctrl[idx] = val & 0xFF; | ||
| 1547 | spin_unlock(&drvdata->spinlock); | ||
| 1548 | return size; | ||
| 1549 | } | ||
| 1550 | static DEVICE_ATTR_RW(seq_event); | ||
| 1551 | |||
| 1552 | static ssize_t seq_reset_event_show(struct device *dev, | ||
| 1553 | struct device_attribute *attr, | ||
| 1554 | char *buf) | ||
| 1555 | { | ||
| 1556 | unsigned long val; | ||
| 1557 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1558 | |||
| 1559 | val = drvdata->seq_rst; | ||
| 1560 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1561 | } | ||
| 1562 | |||
| 1563 | static ssize_t seq_reset_event_store(struct device *dev, | ||
| 1564 | struct device_attribute *attr, | ||
| 1565 | const char *buf, size_t size) | ||
| 1566 | { | ||
| 1567 | unsigned long val; | ||
| 1568 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1569 | |||
| 1570 | if (kstrtoul(buf, 16, &val)) | ||
| 1571 | return -EINVAL; | ||
| 1572 | if (!(drvdata->nrseqstate)) | ||
| 1573 | return -EINVAL; | ||
| 1574 | |||
| 1575 | drvdata->seq_rst = val & ETMv4_EVENT_MASK; | ||
| 1576 | return size; | ||
| 1577 | } | ||
| 1578 | static DEVICE_ATTR_RW(seq_reset_event); | ||
| 1579 | |||
| 1580 | static ssize_t cntr_idx_show(struct device *dev, | ||
| 1581 | struct device_attribute *attr, | ||
| 1582 | char *buf) | ||
| 1583 | { | ||
| 1584 | unsigned long val; | ||
| 1585 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1586 | |||
| 1587 | val = drvdata->cntr_idx; | ||
| 1588 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1589 | } | ||
| 1590 | |||
| 1591 | static ssize_t cntr_idx_store(struct device *dev, | ||
| 1592 | struct device_attribute *attr, | ||
| 1593 | const char *buf, size_t size) | ||
| 1594 | { | ||
| 1595 | unsigned long val; | ||
| 1596 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1597 | |||
| 1598 | if (kstrtoul(buf, 16, &val)) | ||
| 1599 | return -EINVAL; | ||
| 1600 | if (val >= drvdata->nr_cntr) | ||
| 1601 | return -EINVAL; | ||
| 1602 | |||
| 1603 | /* | ||
| 1604 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1605 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1606 | */ | ||
| 1607 | spin_lock(&drvdata->spinlock); | ||
| 1608 | drvdata->cntr_idx = val; | ||
| 1609 | spin_unlock(&drvdata->spinlock); | ||
| 1610 | return size; | ||
| 1611 | } | ||
| 1612 | static DEVICE_ATTR_RW(cntr_idx); | ||
| 1613 | |||
| 1614 | static ssize_t cntrldvr_show(struct device *dev, | ||
| 1615 | struct device_attribute *attr, | ||
| 1616 | char *buf) | ||
| 1617 | { | ||
| 1618 | u8 idx; | ||
| 1619 | unsigned long val; | ||
| 1620 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1621 | |||
| 1622 | spin_lock(&drvdata->spinlock); | ||
| 1623 | idx = drvdata->cntr_idx; | ||
| 1624 | val = drvdata->cntrldvr[idx]; | ||
| 1625 | spin_unlock(&drvdata->spinlock); | ||
| 1626 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1627 | } | ||
| 1628 | |||
| 1629 | static ssize_t cntrldvr_store(struct device *dev, | ||
| 1630 | struct device_attribute *attr, | ||
| 1631 | const char *buf, size_t size) | ||
| 1632 | { | ||
| 1633 | u8 idx; | ||
| 1634 | unsigned long val; | ||
| 1635 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1636 | |||
| 1637 | if (kstrtoul(buf, 16, &val)) | ||
| 1638 | return -EINVAL; | ||
| 1639 | if (val > ETM_CNTR_MAX_VAL) | ||
| 1640 | return -EINVAL; | ||
| 1641 | |||
| 1642 | spin_lock(&drvdata->spinlock); | ||
| 1643 | idx = drvdata->cntr_idx; | ||
| 1644 | drvdata->cntrldvr[idx] = val; | ||
| 1645 | spin_unlock(&drvdata->spinlock); | ||
| 1646 | return size; | ||
| 1647 | } | ||
| 1648 | static DEVICE_ATTR_RW(cntrldvr); | ||
| 1649 | |||
| 1650 | static ssize_t cntr_val_show(struct device *dev, | ||
| 1651 | struct device_attribute *attr, | ||
| 1652 | char *buf) | ||
| 1653 | { | ||
| 1654 | u8 idx; | ||
| 1655 | unsigned long val; | ||
| 1656 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1657 | |||
| 1658 | spin_lock(&drvdata->spinlock); | ||
| 1659 | idx = drvdata->cntr_idx; | ||
| 1660 | val = drvdata->cntr_val[idx]; | ||
| 1661 | spin_unlock(&drvdata->spinlock); | ||
| 1662 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | static ssize_t cntr_val_store(struct device *dev, | ||
| 1666 | struct device_attribute *attr, | ||
| 1667 | const char *buf, size_t size) | ||
| 1668 | { | ||
| 1669 | u8 idx; | ||
| 1670 | unsigned long val; | ||
| 1671 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1672 | |||
| 1673 | if (kstrtoul(buf, 16, &val)) | ||
| 1674 | return -EINVAL; | ||
| 1675 | if (val > ETM_CNTR_MAX_VAL) | ||
| 1676 | return -EINVAL; | ||
| 1677 | |||
| 1678 | spin_lock(&drvdata->spinlock); | ||
| 1679 | idx = drvdata->cntr_idx; | ||
| 1680 | drvdata->cntr_val[idx] = val; | ||
| 1681 | spin_unlock(&drvdata->spinlock); | ||
| 1682 | return size; | ||
| 1683 | } | ||
| 1684 | static DEVICE_ATTR_RW(cntr_val); | ||
| 1685 | |||
| 1686 | static ssize_t cntr_ctrl_show(struct device *dev, | ||
| 1687 | struct device_attribute *attr, | ||
| 1688 | char *buf) | ||
| 1689 | { | ||
| 1690 | u8 idx; | ||
| 1691 | unsigned long val; | ||
| 1692 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1693 | |||
| 1694 | spin_lock(&drvdata->spinlock); | ||
| 1695 | idx = drvdata->cntr_idx; | ||
| 1696 | val = drvdata->cntr_ctrl[idx]; | ||
| 1697 | spin_unlock(&drvdata->spinlock); | ||
| 1698 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | static ssize_t cntr_ctrl_store(struct device *dev, | ||
| 1702 | struct device_attribute *attr, | ||
| 1703 | const char *buf, size_t size) | ||
| 1704 | { | ||
| 1705 | u8 idx; | ||
| 1706 | unsigned long val; | ||
| 1707 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1708 | |||
| 1709 | if (kstrtoul(buf, 16, &val)) | ||
| 1710 | return -EINVAL; | ||
| 1711 | |||
| 1712 | spin_lock(&drvdata->spinlock); | ||
| 1713 | idx = drvdata->cntr_idx; | ||
| 1714 | drvdata->cntr_ctrl[idx] = val; | ||
| 1715 | spin_unlock(&drvdata->spinlock); | ||
| 1716 | return size; | ||
| 1717 | } | ||
| 1718 | static DEVICE_ATTR_RW(cntr_ctrl); | ||
| 1719 | |||
| 1720 | static ssize_t res_idx_show(struct device *dev, | ||
| 1721 | struct device_attribute *attr, | ||
| 1722 | char *buf) | ||
| 1723 | { | ||
| 1724 | unsigned long val; | ||
| 1725 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1726 | |||
| 1727 | val = drvdata->res_idx; | ||
| 1728 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1729 | } | ||
| 1730 | |||
| 1731 | static ssize_t res_idx_store(struct device *dev, | ||
| 1732 | struct device_attribute *attr, | ||
| 1733 | const char *buf, size_t size) | ||
| 1734 | { | ||
| 1735 | unsigned long val; | ||
| 1736 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1737 | |||
| 1738 | if (kstrtoul(buf, 16, &val)) | ||
| 1739 | return -EINVAL; | ||
| 1740 | /* Resource selector pair 0 is always implemented and reserved */ | ||
| 1741 | if (val < 2 || val >= drvdata->nr_resource * 2) | ||
| 1742 | return -EINVAL; | ||
| 1743 | 301 | ||
| 1744 | /* | 302 | return ret; |
| 1745 | * Use spinlock to ensure index doesn't change while it gets | ||
| 1746 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1747 | */ | ||
| 1748 | spin_lock(&drvdata->spinlock); | ||
| 1749 | drvdata->res_idx = val; | ||
| 1750 | spin_unlock(&drvdata->spinlock); | ||
| 1751 | return size; | ||
| 1752 | } | 303 | } |
| 1753 | static DEVICE_ATTR_RW(res_idx); | ||
| 1754 | 304 | ||
| 1755 | static ssize_t res_ctrl_show(struct device *dev, | 305 | static void etm4_disable_hw(void *info) |
| 1756 | struct device_attribute *attr, | ||
| 1757 | char *buf) | ||
| 1758 | { | 306 | { |
| 1759 | u8 idx; | 307 | u32 control; |
| 1760 | unsigned long val; | 308 | struct etmv4_drvdata *drvdata = info; |
| 1761 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1762 | 309 | ||
| 1763 | spin_lock(&drvdata->spinlock); | 310 | CS_UNLOCK(drvdata->base); |
| 1764 | idx = drvdata->res_idx; | ||
| 1765 | val = drvdata->res_ctrl[idx]; | ||
| 1766 | spin_unlock(&drvdata->spinlock); | ||
| 1767 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1768 | } | ||
| 1769 | 311 | ||
| 1770 | static ssize_t res_ctrl_store(struct device *dev, | 312 | control = readl_relaxed(drvdata->base + TRCPRGCTLR); |
| 1771 | struct device_attribute *attr, | ||
| 1772 | const char *buf, size_t size) | ||
| 1773 | { | ||
| 1774 | u8 idx; | ||
| 1775 | unsigned long val; | ||
| 1776 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1777 | 313 | ||
| 1778 | if (kstrtoul(buf, 16, &val)) | 314 | /* EN, bit[0] Trace unit enable bit */ |
| 1779 | return -EINVAL; | 315 | control &= ~0x1; |
| 1780 | 316 | ||
| 1781 | spin_lock(&drvdata->spinlock); | 317 | /* make sure everything completes before disabling */ |
| 1782 | idx = drvdata->res_idx; | 318 | mb(); |
| 1783 | /* For odd idx pair inversal bit is RES0 */ | 319 | isb(); |
| 1784 | if (idx % 2 != 0) | 320 | writel_relaxed(control, drvdata->base + TRCPRGCTLR); |
| 1785 | /* PAIRINV, bit[21] */ | ||
| 1786 | val &= ~BIT(21); | ||
| 1787 | drvdata->res_ctrl[idx] = val; | ||
| 1788 | spin_unlock(&drvdata->spinlock); | ||
| 1789 | return size; | ||
| 1790 | } | ||
| 1791 | static DEVICE_ATTR_RW(res_ctrl); | ||
| 1792 | 321 | ||
| 1793 | static ssize_t ctxid_idx_show(struct device *dev, | 322 | CS_LOCK(drvdata->base); |
| 1794 | struct device_attribute *attr, | ||
| 1795 | char *buf) | ||
| 1796 | { | ||
| 1797 | unsigned long val; | ||
| 1798 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1799 | 323 | ||
| 1800 | val = drvdata->ctxid_idx; | 324 | dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); |
| 1801 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1802 | } | 325 | } |
| 1803 | 326 | ||
| 1804 | static ssize_t ctxid_idx_store(struct device *dev, | 327 | static int etm4_disable_perf(struct coresight_device *csdev) |
| 1805 | struct device_attribute *attr, | ||
| 1806 | const char *buf, size_t size) | ||
| 1807 | { | 328 | { |
| 1808 | unsigned long val; | 329 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 1809 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1810 | 330 | ||
| 1811 | if (kstrtoul(buf, 16, &val)) | 331 | if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) |
| 1812 | return -EINVAL; | ||
| 1813 | if (val >= drvdata->numcidc) | ||
| 1814 | return -EINVAL; | 332 | return -EINVAL; |
| 1815 | 333 | ||
| 1816 | /* | 334 | etm4_disable_hw(drvdata); |
| 1817 | * Use spinlock to ensure index doesn't change while it gets | 335 | return 0; |
| 1818 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 1819 | */ | ||
| 1820 | spin_lock(&drvdata->spinlock); | ||
| 1821 | drvdata->ctxid_idx = val; | ||
| 1822 | spin_unlock(&drvdata->spinlock); | ||
| 1823 | return size; | ||
| 1824 | } | ||
| 1825 | static DEVICE_ATTR_RW(ctxid_idx); | ||
| 1826 | |||
| 1827 | static ssize_t ctxid_pid_show(struct device *dev, | ||
| 1828 | struct device_attribute *attr, | ||
| 1829 | char *buf) | ||
| 1830 | { | ||
| 1831 | u8 idx; | ||
| 1832 | unsigned long val; | ||
| 1833 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1834 | |||
| 1835 | spin_lock(&drvdata->spinlock); | ||
| 1836 | idx = drvdata->ctxid_idx; | ||
| 1837 | val = (unsigned long)drvdata->ctxid_vpid[idx]; | ||
| 1838 | spin_unlock(&drvdata->spinlock); | ||
| 1839 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1840 | } | 336 | } |
| 1841 | 337 | ||
| 1842 | static ssize_t ctxid_pid_store(struct device *dev, | 338 | static void etm4_disable_sysfs(struct coresight_device *csdev) |
| 1843 | struct device_attribute *attr, | ||
| 1844 | const char *buf, size_t size) | ||
| 1845 | { | 339 | { |
| 1846 | u8 idx; | 340 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 1847 | unsigned long vpid, pid; | ||
| 1848 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1849 | 341 | ||
| 1850 | /* | 342 | /* |
| 1851 | * only implemented when ctxid tracing is enabled, i.e. at least one | 343 | * Taking hotplug lock here protects from clocks getting disabled |
| 1852 | * ctxid comparator is implemented and ctxid is greater than 0 bits | 344 | * with tracing being left on (crash scenario) if user disable occurs |
| 1853 | * in length | 345 | * after cpu online mask indicates the cpu is offline but before the |
| 346 | * DYING hotplug callback is serviced by the ETM driver. | ||
| 1854 | */ | 347 | */ |
| 1855 | if (!drvdata->ctxid_size || !drvdata->numcidc) | 348 | get_online_cpus(); |
| 1856 | return -EINVAL; | ||
| 1857 | if (kstrtoul(buf, 16, &vpid)) | ||
| 1858 | return -EINVAL; | ||
| 1859 | |||
| 1860 | pid = coresight_vpid_to_pid(vpid); | ||
| 1861 | |||
| 1862 | spin_lock(&drvdata->spinlock); | ||
| 1863 | idx = drvdata->ctxid_idx; | ||
| 1864 | drvdata->ctxid_pid[idx] = (u64)pid; | ||
| 1865 | drvdata->ctxid_vpid[idx] = (u64)vpid; | ||
| 1866 | spin_unlock(&drvdata->spinlock); | ||
| 1867 | return size; | ||
| 1868 | } | ||
| 1869 | static DEVICE_ATTR_RW(ctxid_pid); | ||
| 1870 | |||
| 1871 | static ssize_t ctxid_masks_show(struct device *dev, | ||
| 1872 | struct device_attribute *attr, | ||
| 1873 | char *buf) | ||
| 1874 | { | ||
| 1875 | unsigned long val1, val2; | ||
| 1876 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1877 | |||
| 1878 | spin_lock(&drvdata->spinlock); | 349 | spin_lock(&drvdata->spinlock); |
| 1879 | val1 = drvdata->ctxid_mask0; | ||
| 1880 | val2 = drvdata->ctxid_mask1; | ||
| 1881 | spin_unlock(&drvdata->spinlock); | ||
| 1882 | return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); | ||
| 1883 | } | ||
| 1884 | 350 | ||
| 1885 | static ssize_t ctxid_masks_store(struct device *dev, | ||
| 1886 | struct device_attribute *attr, | ||
| 1887 | const char *buf, size_t size) | ||
| 1888 | { | ||
| 1889 | u8 i, j, maskbyte; | ||
| 1890 | unsigned long val1, val2, mask; | ||
| 1891 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1892 | |||
| 1893 | /* | ||
| 1894 | * only implemented when ctxid tracing is enabled, i.e. at least one | ||
| 1895 | * ctxid comparator is implemented and ctxid is greater than 0 bits | ||
| 1896 | * in length | ||
| 1897 | */ | ||
| 1898 | if (!drvdata->ctxid_size || !drvdata->numcidc) | ||
| 1899 | return -EINVAL; | ||
| 1900 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
| 1901 | return -EINVAL; | ||
| 1902 | |||
| 1903 | spin_lock(&drvdata->spinlock); | ||
| 1904 | /* | ||
| 1905 | * each byte[0..3] controls mask value applied to ctxid | ||
| 1906 | * comparator[0..3] | ||
| 1907 | */ | ||
| 1908 | switch (drvdata->numcidc) { | ||
| 1909 | case 0x1: | ||
| 1910 | /* COMP0, bits[7:0] */ | ||
| 1911 | drvdata->ctxid_mask0 = val1 & 0xFF; | ||
| 1912 | break; | ||
| 1913 | case 0x2: | ||
| 1914 | /* COMP1, bits[15:8] */ | ||
| 1915 | drvdata->ctxid_mask0 = val1 & 0xFFFF; | ||
| 1916 | break; | ||
| 1917 | case 0x3: | ||
| 1918 | /* COMP2, bits[23:16] */ | ||
| 1919 | drvdata->ctxid_mask0 = val1 & 0xFFFFFF; | ||
| 1920 | break; | ||
| 1921 | case 0x4: | ||
| 1922 | /* COMP3, bits[31:24] */ | ||
| 1923 | drvdata->ctxid_mask0 = val1; | ||
| 1924 | break; | ||
| 1925 | case 0x5: | ||
| 1926 | /* COMP4, bits[7:0] */ | ||
| 1927 | drvdata->ctxid_mask0 = val1; | ||
| 1928 | drvdata->ctxid_mask1 = val2 & 0xFF; | ||
| 1929 | break; | ||
| 1930 | case 0x6: | ||
| 1931 | /* COMP5, bits[15:8] */ | ||
| 1932 | drvdata->ctxid_mask0 = val1; | ||
| 1933 | drvdata->ctxid_mask1 = val2 & 0xFFFF; | ||
| 1934 | break; | ||
| 1935 | case 0x7: | ||
| 1936 | /* COMP6, bits[23:16] */ | ||
| 1937 | drvdata->ctxid_mask0 = val1; | ||
| 1938 | drvdata->ctxid_mask1 = val2 & 0xFFFFFF; | ||
| 1939 | break; | ||
| 1940 | case 0x8: | ||
| 1941 | /* COMP7, bits[31:24] */ | ||
| 1942 | drvdata->ctxid_mask0 = val1; | ||
| 1943 | drvdata->ctxid_mask1 = val2; | ||
| 1944 | break; | ||
| 1945 | default: | ||
| 1946 | break; | ||
| 1947 | } | ||
| 1948 | /* | 351 | /* |
| 1949 | * If software sets a mask bit to 1, it must program relevant byte | 352 | * Executing etm4_disable_hw on the cpu whose ETM is being disabled |
| 1950 | * of ctxid comparator value 0x0, otherwise behavior is unpredictable. | 353 | * ensures that register writes occur when cpu is powered. |
| 1951 | * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24] | ||
| 1952 | * of ctxid comparator0 value (corresponding to byte 0) register. | ||
| 1953 | */ | 354 | */ |
| 1954 | mask = drvdata->ctxid_mask0; | 355 | smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); |
| 1955 | for (i = 0; i < drvdata->numcidc; i++) { | ||
| 1956 | /* mask value of corresponding ctxid comparator */ | ||
| 1957 | maskbyte = mask & ETMv4_EVENT_MASK; | ||
| 1958 | /* | ||
| 1959 | * each bit corresponds to a byte of respective ctxid comparator | ||
| 1960 | * value register | ||
| 1961 | */ | ||
| 1962 | for (j = 0; j < 8; j++) { | ||
| 1963 | if (maskbyte & 1) | ||
| 1964 | drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8)); | ||
| 1965 | maskbyte >>= 1; | ||
| 1966 | } | ||
| 1967 | /* Select the next ctxid comparator mask value */ | ||
| 1968 | if (i == 3) | ||
| 1969 | /* ctxid comparators[4-7] */ | ||
| 1970 | mask = drvdata->ctxid_mask1; | ||
| 1971 | else | ||
| 1972 | mask >>= 0x8; | ||
| 1973 | } | ||
| 1974 | 356 | ||
| 1975 | spin_unlock(&drvdata->spinlock); | 357 | spin_unlock(&drvdata->spinlock); |
| 1976 | return size; | 358 | put_online_cpus(); |
| 1977 | } | ||
| 1978 | static DEVICE_ATTR_RW(ctxid_masks); | ||
| 1979 | |||
| 1980 | static ssize_t vmid_idx_show(struct device *dev, | ||
| 1981 | struct device_attribute *attr, | ||
| 1982 | char *buf) | ||
| 1983 | { | ||
| 1984 | unsigned long val; | ||
| 1985 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1986 | |||
| 1987 | val = drvdata->vmid_idx; | ||
| 1988 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 1989 | } | ||
| 1990 | |||
| 1991 | static ssize_t vmid_idx_store(struct device *dev, | ||
| 1992 | struct device_attribute *attr, | ||
| 1993 | const char *buf, size_t size) | ||
| 1994 | { | ||
| 1995 | unsigned long val; | ||
| 1996 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 1997 | |||
| 1998 | if (kstrtoul(buf, 16, &val)) | ||
| 1999 | return -EINVAL; | ||
| 2000 | if (val >= drvdata->numvmidc) | ||
| 2001 | return -EINVAL; | ||
| 2002 | |||
| 2003 | /* | ||
| 2004 | * Use spinlock to ensure index doesn't change while it gets | ||
| 2005 | * dereferenced multiple times within a spinlock block elsewhere. | ||
| 2006 | */ | ||
| 2007 | spin_lock(&drvdata->spinlock); | ||
| 2008 | drvdata->vmid_idx = val; | ||
| 2009 | spin_unlock(&drvdata->spinlock); | ||
| 2010 | return size; | ||
| 2011 | } | ||
| 2012 | static DEVICE_ATTR_RW(vmid_idx); | ||
| 2013 | |||
| 2014 | static ssize_t vmid_val_show(struct device *dev, | ||
| 2015 | struct device_attribute *attr, | ||
| 2016 | char *buf) | ||
| 2017 | { | ||
| 2018 | unsigned long val; | ||
| 2019 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 2020 | |||
| 2021 | val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx]; | ||
| 2022 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 2023 | } | ||
| 2024 | |||
| 2025 | static ssize_t vmid_val_store(struct device *dev, | ||
| 2026 | struct device_attribute *attr, | ||
| 2027 | const char *buf, size_t size) | ||
| 2028 | { | ||
| 2029 | unsigned long val; | ||
| 2030 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 2031 | |||
| 2032 | /* | ||
| 2033 | * only implemented when vmid tracing is enabled, i.e. at least one | ||
| 2034 | * vmid comparator is implemented and at least 8 bit vmid size | ||
| 2035 | */ | ||
| 2036 | if (!drvdata->vmid_size || !drvdata->numvmidc) | ||
| 2037 | return -EINVAL; | ||
| 2038 | if (kstrtoul(buf, 16, &val)) | ||
| 2039 | return -EINVAL; | ||
| 2040 | 359 | ||
| 2041 | spin_lock(&drvdata->spinlock); | 360 | dev_info(drvdata->dev, "ETM tracing disabled\n"); |
| 2042 | drvdata->vmid_val[drvdata->vmid_idx] = (u64)val; | ||
| 2043 | spin_unlock(&drvdata->spinlock); | ||
| 2044 | return size; | ||
| 2045 | } | 361 | } |
| 2046 | static DEVICE_ATTR_RW(vmid_val); | ||
| 2047 | 362 | ||
| 2048 | static ssize_t vmid_masks_show(struct device *dev, | 363 | static void etm4_disable(struct coresight_device *csdev) |
| 2049 | struct device_attribute *attr, char *buf) | ||
| 2050 | { | 364 | { |
| 2051 | unsigned long val1, val2; | 365 | u32 mode; |
| 2052 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | 366 | struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); |
| 2053 | |||
| 2054 | spin_lock(&drvdata->spinlock); | ||
| 2055 | val1 = drvdata->vmid_mask0; | ||
| 2056 | val2 = drvdata->vmid_mask1; | ||
| 2057 | spin_unlock(&drvdata->spinlock); | ||
| 2058 | return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); | ||
| 2059 | } | ||
| 2060 | 367 | ||
| 2061 | static ssize_t vmid_masks_store(struct device *dev, | ||
| 2062 | struct device_attribute *attr, | ||
| 2063 | const char *buf, size_t size) | ||
| 2064 | { | ||
| 2065 | u8 i, j, maskbyte; | ||
| 2066 | unsigned long val1, val2, mask; | ||
| 2067 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 2068 | /* | 368 | /* |
| 2069 | * only implemented when vmid tracing is enabled, i.e. at least one | 369 | * For as long as the tracer isn't disabled another entity can't |
| 2070 | * vmid comparator is implemented and at least 8 bit vmid size | 370 | * change its status. As such we can read the status here without |
| 371 | * fearing it will change under us. | ||
| 2071 | */ | 372 | */ |
| 2072 | if (!drvdata->vmid_size || !drvdata->numvmidc) | 373 | mode = local_read(&drvdata->mode); |
| 2073 | return -EINVAL; | ||
| 2074 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | ||
| 2075 | return -EINVAL; | ||
| 2076 | |||
| 2077 | spin_lock(&drvdata->spinlock); | ||
| 2078 | 374 | ||
| 2079 | /* | 375 | switch (mode) { |
| 2080 | * each byte[0..3] controls mask value applied to vmid | 376 | case CS_MODE_DISABLED: |
| 2081 | * comparator[0..3] | ||
| 2082 | */ | ||
| 2083 | switch (drvdata->numvmidc) { | ||
| 2084 | case 0x1: | ||
| 2085 | /* COMP0, bits[7:0] */ | ||
| 2086 | drvdata->vmid_mask0 = val1 & 0xFF; | ||
| 2087 | break; | ||
| 2088 | case 0x2: | ||
| 2089 | /* COMP1, bits[15:8] */ | ||
| 2090 | drvdata->vmid_mask0 = val1 & 0xFFFF; | ||
| 2091 | break; | ||
| 2092 | case 0x3: | ||
| 2093 | /* COMP2, bits[23:16] */ | ||
| 2094 | drvdata->vmid_mask0 = val1 & 0xFFFFFF; | ||
| 2095 | break; | 377 | break; |
| 2096 | case 0x4: | 378 | case CS_MODE_SYSFS: |
| 2097 | /* COMP3, bits[31:24] */ | 379 | etm4_disable_sysfs(csdev); |
| 2098 | drvdata->vmid_mask0 = val1; | ||
| 2099 | break; | 380 | break; |
| 2100 | case 0x5: | 381 | case CS_MODE_PERF: |
| 2101 | /* COMP4, bits[7:0] */ | 382 | etm4_disable_perf(csdev); |
| 2102 | drvdata->vmid_mask0 = val1; | ||
| 2103 | drvdata->vmid_mask1 = val2 & 0xFF; | ||
| 2104 | break; | ||
| 2105 | case 0x6: | ||
| 2106 | /* COMP5, bits[15:8] */ | ||
| 2107 | drvdata->vmid_mask0 = val1; | ||
| 2108 | drvdata->vmid_mask1 = val2 & 0xFFFF; | ||
| 2109 | break; | ||
| 2110 | case 0x7: | ||
| 2111 | /* COMP6, bits[23:16] */ | ||
| 2112 | drvdata->vmid_mask0 = val1; | ||
| 2113 | drvdata->vmid_mask1 = val2 & 0xFFFFFF; | ||
| 2114 | break; | ||
| 2115 | case 0x8: | ||
| 2116 | /* COMP7, bits[31:24] */ | ||
| 2117 | drvdata->vmid_mask0 = val1; | ||
| 2118 | drvdata->vmid_mask1 = val2; | ||
| 2119 | break; | ||
| 2120 | default: | ||
| 2121 | break; | 383 | break; |
| 2122 | } | 384 | } |
| 2123 | 385 | ||
| 2124 | /* | 386 | if (mode) |
| 2125 | * If software sets a mask bit to 1, it must program relevant byte | 387 | local_set(&drvdata->mode, CS_MODE_DISABLED); |
| 2126 | * of vmid comparator value 0x0, otherwise behavior is unpredictable. | ||
| 2127 | * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24] | ||
| 2128 | * of vmid comparator0 value (corresponding to byte 0) register. | ||
| 2129 | */ | ||
| 2130 | mask = drvdata->vmid_mask0; | ||
| 2131 | for (i = 0; i < drvdata->numvmidc; i++) { | ||
| 2132 | /* mask value of corresponding vmid comparator */ | ||
| 2133 | maskbyte = mask & ETMv4_EVENT_MASK; | ||
| 2134 | /* | ||
| 2135 | * each bit corresponds to a byte of respective vmid comparator | ||
| 2136 | * value register | ||
| 2137 | */ | ||
| 2138 | for (j = 0; j < 8; j++) { | ||
| 2139 | if (maskbyte & 1) | ||
| 2140 | drvdata->vmid_val[i] &= ~(0xFF << (j * 8)); | ||
| 2141 | maskbyte >>= 1; | ||
| 2142 | } | ||
| 2143 | /* Select the next vmid comparator mask value */ | ||
| 2144 | if (i == 3) | ||
| 2145 | /* vmid comparators[4-7] */ | ||
| 2146 | mask = drvdata->vmid_mask1; | ||
| 2147 | else | ||
| 2148 | mask >>= 0x8; | ||
| 2149 | } | ||
| 2150 | spin_unlock(&drvdata->spinlock); | ||
| 2151 | return size; | ||
| 2152 | } | ||
| 2153 | static DEVICE_ATTR_RW(vmid_masks); | ||
| 2154 | |||
| 2155 | static ssize_t cpu_show(struct device *dev, | ||
| 2156 | struct device_attribute *attr, char *buf) | ||
| 2157 | { | ||
| 2158 | int val; | ||
| 2159 | struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 2160 | |||
| 2161 | val = drvdata->cpu; | ||
| 2162 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); | ||
| 2163 | |||
| 2164 | } | 388 | } |
| 2165 | static DEVICE_ATTR_RO(cpu); | ||
| 2166 | |||
| 2167 | static struct attribute *coresight_etmv4_attrs[] = { | ||
| 2168 | &dev_attr_nr_pe_cmp.attr, | ||
| 2169 | &dev_attr_nr_addr_cmp.attr, | ||
| 2170 | &dev_attr_nr_cntr.attr, | ||
| 2171 | &dev_attr_nr_ext_inp.attr, | ||
| 2172 | &dev_attr_numcidc.attr, | ||
| 2173 | &dev_attr_numvmidc.attr, | ||
| 2174 | &dev_attr_nrseqstate.attr, | ||
| 2175 | &dev_attr_nr_resource.attr, | ||
| 2176 | &dev_attr_nr_ss_cmp.attr, | ||
| 2177 | &dev_attr_reset.attr, | ||
| 2178 | &dev_attr_mode.attr, | ||
| 2179 | &dev_attr_pe.attr, | ||
| 2180 | &dev_attr_event.attr, | ||
| 2181 | &dev_attr_event_instren.attr, | ||
| 2182 | &dev_attr_event_ts.attr, | ||
| 2183 | &dev_attr_syncfreq.attr, | ||
| 2184 | &dev_attr_cyc_threshold.attr, | ||
| 2185 | &dev_attr_bb_ctrl.attr, | ||
| 2186 | &dev_attr_event_vinst.attr, | ||
| 2187 | &dev_attr_s_exlevel_vinst.attr, | ||
| 2188 | &dev_attr_ns_exlevel_vinst.attr, | ||
| 2189 | &dev_attr_addr_idx.attr, | ||
| 2190 | &dev_attr_addr_instdatatype.attr, | ||
| 2191 | &dev_attr_addr_single.attr, | ||
| 2192 | &dev_attr_addr_range.attr, | ||
| 2193 | &dev_attr_addr_start.attr, | ||
| 2194 | &dev_attr_addr_stop.attr, | ||
| 2195 | &dev_attr_addr_ctxtype.attr, | ||
| 2196 | &dev_attr_addr_context.attr, | ||
| 2197 | &dev_attr_seq_idx.attr, | ||
| 2198 | &dev_attr_seq_state.attr, | ||
| 2199 | &dev_attr_seq_event.attr, | ||
| 2200 | &dev_attr_seq_reset_event.attr, | ||
| 2201 | &dev_attr_cntr_idx.attr, | ||
| 2202 | &dev_attr_cntrldvr.attr, | ||
| 2203 | &dev_attr_cntr_val.attr, | ||
| 2204 | &dev_attr_cntr_ctrl.attr, | ||
| 2205 | &dev_attr_res_idx.attr, | ||
| 2206 | &dev_attr_res_ctrl.attr, | ||
| 2207 | &dev_attr_ctxid_idx.attr, | ||
| 2208 | &dev_attr_ctxid_pid.attr, | ||
| 2209 | &dev_attr_ctxid_masks.attr, | ||
| 2210 | &dev_attr_vmid_idx.attr, | ||
| 2211 | &dev_attr_vmid_val.attr, | ||
| 2212 | &dev_attr_vmid_masks.attr, | ||
| 2213 | &dev_attr_cpu.attr, | ||
| 2214 | NULL, | ||
| 2215 | }; | ||
| 2216 | |||
| 2217 | #define coresight_simple_func(name, offset) \ | ||
| 2218 | static ssize_t name##_show(struct device *_dev, \ | ||
| 2219 | struct device_attribute *attr, char *buf) \ | ||
| 2220 | { \ | ||
| 2221 | struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ | ||
| 2222 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | ||
| 2223 | readl_relaxed(drvdata->base + offset)); \ | ||
| 2224 | } \ | ||
| 2225 | static DEVICE_ATTR_RO(name) | ||
| 2226 | |||
| 2227 | coresight_simple_func(trcoslsr, TRCOSLSR); | ||
| 2228 | coresight_simple_func(trcpdcr, TRCPDCR); | ||
| 2229 | coresight_simple_func(trcpdsr, TRCPDSR); | ||
| 2230 | coresight_simple_func(trclsr, TRCLSR); | ||
| 2231 | coresight_simple_func(trcauthstatus, TRCAUTHSTATUS); | ||
| 2232 | coresight_simple_func(trcdevid, TRCDEVID); | ||
| 2233 | coresight_simple_func(trcdevtype, TRCDEVTYPE); | ||
| 2234 | coresight_simple_func(trcpidr0, TRCPIDR0); | ||
| 2235 | coresight_simple_func(trcpidr1, TRCPIDR1); | ||
| 2236 | coresight_simple_func(trcpidr2, TRCPIDR2); | ||
| 2237 | coresight_simple_func(trcpidr3, TRCPIDR3); | ||
| 2238 | |||
| 2239 | static struct attribute *coresight_etmv4_mgmt_attrs[] = { | ||
| 2240 | &dev_attr_trcoslsr.attr, | ||
| 2241 | &dev_attr_trcpdcr.attr, | ||
| 2242 | &dev_attr_trcpdsr.attr, | ||
| 2243 | &dev_attr_trclsr.attr, | ||
| 2244 | &dev_attr_trcauthstatus.attr, | ||
| 2245 | &dev_attr_trcdevid.attr, | ||
| 2246 | &dev_attr_trcdevtype.attr, | ||
| 2247 | &dev_attr_trcpidr0.attr, | ||
| 2248 | &dev_attr_trcpidr1.attr, | ||
| 2249 | &dev_attr_trcpidr2.attr, | ||
| 2250 | &dev_attr_trcpidr3.attr, | ||
| 2251 | NULL, | ||
| 2252 | }; | ||
| 2253 | 389 | ||
| 2254 | coresight_simple_func(trcidr0, TRCIDR0); | 390 | static const struct coresight_ops_source etm4_source_ops = { |
| 2255 | coresight_simple_func(trcidr1, TRCIDR1); | 391 | .cpu_id = etm4_cpu_id, |
| 2256 | coresight_simple_func(trcidr2, TRCIDR2); | 392 | .trace_id = etm4_trace_id, |
| 2257 | coresight_simple_func(trcidr3, TRCIDR3); | 393 | .enable = etm4_enable, |
| 2258 | coresight_simple_func(trcidr4, TRCIDR4); | 394 | .disable = etm4_disable, |
| 2259 | coresight_simple_func(trcidr5, TRCIDR5); | ||
| 2260 | /* trcidr[6,7] are reserved */ | ||
| 2261 | coresight_simple_func(trcidr8, TRCIDR8); | ||
| 2262 | coresight_simple_func(trcidr9, TRCIDR9); | ||
| 2263 | coresight_simple_func(trcidr10, TRCIDR10); | ||
| 2264 | coresight_simple_func(trcidr11, TRCIDR11); | ||
| 2265 | coresight_simple_func(trcidr12, TRCIDR12); | ||
| 2266 | coresight_simple_func(trcidr13, TRCIDR13); | ||
| 2267 | |||
| 2268 | static struct attribute *coresight_etmv4_trcidr_attrs[] = { | ||
| 2269 | &dev_attr_trcidr0.attr, | ||
| 2270 | &dev_attr_trcidr1.attr, | ||
| 2271 | &dev_attr_trcidr2.attr, | ||
| 2272 | &dev_attr_trcidr3.attr, | ||
| 2273 | &dev_attr_trcidr4.attr, | ||
| 2274 | &dev_attr_trcidr5.attr, | ||
| 2275 | /* trcidr[6,7] are reserved */ | ||
| 2276 | &dev_attr_trcidr8.attr, | ||
| 2277 | &dev_attr_trcidr9.attr, | ||
| 2278 | &dev_attr_trcidr10.attr, | ||
| 2279 | &dev_attr_trcidr11.attr, | ||
| 2280 | &dev_attr_trcidr12.attr, | ||
| 2281 | &dev_attr_trcidr13.attr, | ||
| 2282 | NULL, | ||
| 2283 | }; | ||
| 2284 | |||
| 2285 | static const struct attribute_group coresight_etmv4_group = { | ||
| 2286 | .attrs = coresight_etmv4_attrs, | ||
| 2287 | }; | ||
| 2288 | |||
| 2289 | static const struct attribute_group coresight_etmv4_mgmt_group = { | ||
| 2290 | .attrs = coresight_etmv4_mgmt_attrs, | ||
| 2291 | .name = "mgmt", | ||
| 2292 | }; | ||
| 2293 | |||
| 2294 | static const struct attribute_group coresight_etmv4_trcidr_group = { | ||
| 2295 | .attrs = coresight_etmv4_trcidr_attrs, | ||
| 2296 | .name = "trcidr", | ||
| 2297 | }; | 395 | }; |
| 2298 | 396 | ||
| 2299 | static const struct attribute_group *coresight_etmv4_groups[] = { | 397 | static const struct coresight_ops etm4_cs_ops = { |
| 2300 | &coresight_etmv4_group, | 398 | .source_ops = &etm4_source_ops, |
| 2301 | &coresight_etmv4_mgmt_group, | ||
| 2302 | &coresight_etmv4_trcidr_group, | ||
| 2303 | NULL, | ||
| 2304 | }; | 399 | }; |
| 2305 | 400 | ||
| 2306 | static void etm4_init_arch_data(void *info) | 401 | static void etm4_init_arch_data(void *info) |
| @@ -2313,6 +408,9 @@ static void etm4_init_arch_data(void *info) | |||
| 2313 | u32 etmidr5; | 408 | u32 etmidr5; |
| 2314 | struct etmv4_drvdata *drvdata = info; | 409 | struct etmv4_drvdata *drvdata = info; |
| 2315 | 410 | ||
| 411 | /* Make sure all registers are accessible */ | ||
| 412 | etm4_os_unlock(drvdata); | ||
| 413 | |||
| 2316 | CS_UNLOCK(drvdata->base); | 414 | CS_UNLOCK(drvdata->base); |
| 2317 | 415 | ||
| 2318 | /* find all capabilities of the tracing unit */ | 416 | /* find all capabilities of the tracing unit */ |
| @@ -2464,93 +562,115 @@ static void etm4_init_arch_data(void *info) | |||
| 2464 | CS_LOCK(drvdata->base); | 562 | CS_LOCK(drvdata->base); |
| 2465 | } | 563 | } |
| 2466 | 564 | ||
| 2467 | static void etm4_init_default_data(struct etmv4_drvdata *drvdata) | 565 | static void etm4_set_default(struct etmv4_config *config) |
| 2468 | { | 566 | { |
| 2469 | int i; | 567 | if (WARN_ON_ONCE(!config)) |
| 568 | return; | ||
| 2470 | 569 | ||
| 2471 | drvdata->pe_sel = 0x0; | 570 | /* |
| 2472 | drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID | | 571 | * Make default initialisation trace everything |
| 2473 | ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK); | 572 | * |
| 573 | * Select the "always true" resource selector on the | ||
| 574 | * "Enablign Event" line and configure address range comparator | ||
| 575 | * '0' to trace all the possible address range. From there | ||
| 576 | * configure the "include/exclude" engine to include address | ||
| 577 | * range comparator '0'. | ||
| 578 | */ | ||
| 2474 | 579 | ||
| 2475 | /* disable all events tracing */ | 580 | /* disable all events tracing */ |
| 2476 | drvdata->eventctrl0 = 0x0; | 581 | config->eventctrl0 = 0x0; |
| 2477 | drvdata->eventctrl1 = 0x0; | 582 | config->eventctrl1 = 0x0; |
| 2478 | 583 | ||
| 2479 | /* disable stalling */ | 584 | /* disable stalling */ |
| 2480 | drvdata->stall_ctrl = 0x0; | 585 | config->stall_ctrl = 0x0; |
| 586 | |||
| 587 | /* enable trace synchronization every 4096 bytes, if available */ | ||
| 588 | config->syncfreq = 0xC; | ||
| 2481 | 589 | ||
| 2482 | /* disable timestamp event */ | 590 | /* disable timestamp event */ |
| 2483 | drvdata->ts_ctrl = 0x0; | 591 | config->ts_ctrl = 0x0; |
| 2484 | 592 | ||
| 2485 | /* enable trace synchronization every 4096 bytes for trace */ | 593 | /* TRCVICTLR::EVENT = 0x01, select the always on logic */ |
| 2486 | if (drvdata->syncpr == false) | 594 | config->vinst_ctrl |= BIT(0); |
| 2487 | drvdata->syncfreq = 0xC; | ||
| 2488 | 595 | ||
| 2489 | /* | 596 | /* |
| 2490 | * enable viewInst to trace everything with start-stop logic in | 597 | * TRCVICTLR::SSSTATUS == 1, the start-stop logic is |
| 2491 | * started state | 598 | * in the started state |
| 2492 | */ | 599 | */ |
| 2493 | drvdata->vinst_ctrl |= BIT(0); | 600 | config->vinst_ctrl |= BIT(9); |
| 2494 | /* set initial state of start-stop logic */ | ||
| 2495 | if (drvdata->nr_addr_cmp) | ||
| 2496 | drvdata->vinst_ctrl |= BIT(9); | ||
| 2497 | 601 | ||
| 2498 | /* no address range filtering for ViewInst */ | 602 | /* |
| 2499 | drvdata->viiectlr = 0x0; | 603 | * Configure address range comparator '0' to encompass all |
| 2500 | /* no start-stop filtering for ViewInst */ | 604 | * possible addresses. |
| 2501 | drvdata->vissctlr = 0x0; | 605 | */ |
| 2502 | 606 | ||
| 2503 | /* disable seq events */ | 607 | /* First half of default address comparator: start at address 0 */ |
| 2504 | for (i = 0; i < drvdata->nrseqstate-1; i++) | 608 | config->addr_val[ETM_DEFAULT_ADDR_COMP] = 0x0; |
| 2505 | drvdata->seq_ctrl[i] = 0x0; | 609 | /* trace instruction addresses */ |
| 2506 | drvdata->seq_rst = 0x0; | 610 | config->addr_acc[ETM_DEFAULT_ADDR_COMP] &= ~(BIT(0) | BIT(1)); |
| 2507 | drvdata->seq_state = 0x0; | 611 | /* EXLEVEL_NS, bits[12:15], only trace application and kernel space */ |
| 612 | config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= ETM_EXLEVEL_NS_HYP; | ||
| 613 | /* EXLEVEL_S, bits[11:8], don't trace anything in secure state */ | ||
| 614 | config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= (ETM_EXLEVEL_S_APP | | ||
| 615 | ETM_EXLEVEL_S_OS | | ||
| 616 | ETM_EXLEVEL_S_HYP); | ||
| 617 | config->addr_type[ETM_DEFAULT_ADDR_COMP] = ETM_ADDR_TYPE_RANGE; | ||
| 2508 | 618 | ||
| 2509 | /* disable external input events */ | 619 | /* |
| 2510 | drvdata->ext_inp = 0x0; | 620 | * Second half of default address comparator: go all |
| 621 | * the way to the top. | ||
| 622 | */ | ||
| 623 | config->addr_val[ETM_DEFAULT_ADDR_COMP + 1] = ~0x0; | ||
| 624 | /* trace instruction addresses */ | ||
| 625 | config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] &= ~(BIT(0) | BIT(1)); | ||
| 626 | /* Address comparator type must be equal for both halves */ | ||
| 627 | config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = | ||
| 628 | config->addr_acc[ETM_DEFAULT_ADDR_COMP]; | ||
| 629 | config->addr_type[ETM_DEFAULT_ADDR_COMP + 1] = ETM_ADDR_TYPE_RANGE; | ||
| 2511 | 630 | ||
| 2512 | for (i = 0; i < drvdata->nr_cntr; i++) { | 631 | /* |
| 2513 | drvdata->cntrldvr[i] = 0x0; | 632 | * Configure the ViewInst function to filter on address range |
| 2514 | drvdata->cntr_ctrl[i] = 0x0; | 633 | * comparator '0'. |
| 2515 | drvdata->cntr_val[i] = 0x0; | 634 | */ |
| 2516 | } | 635 | config->viiectlr = BIT(0); |
| 2517 | 636 | ||
| 2518 | /* Resource selector pair 0 is always implemented and reserved */ | 637 | /* no start-stop filtering for ViewInst */ |
| 2519 | drvdata->res_idx = 0x2; | 638 | config->vissctlr = 0x0; |
| 2520 | for (i = 2; i < drvdata->nr_resource * 2; i++) | 639 | } |
| 2521 | drvdata->res_ctrl[i] = 0x0; | ||
| 2522 | 640 | ||
| 2523 | for (i = 0; i < drvdata->nr_ss_cmp; i++) { | 641 | void etm4_config_trace_mode(struct etmv4_config *config) |
| 2524 | drvdata->ss_ctrl[i] = 0x0; | 642 | { |
| 2525 | drvdata->ss_pe_cmp[i] = 0x0; | 643 | u32 addr_acc, mode; |
| 2526 | } | ||
| 2527 | 644 | ||
| 2528 | if (drvdata->nr_addr_cmp >= 1) { | 645 | mode = config->mode; |
| 2529 | drvdata->addr_val[0] = (unsigned long)_stext; | 646 | mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); |
| 2530 | drvdata->addr_val[1] = (unsigned long)_etext; | ||
| 2531 | drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; | ||
| 2532 | drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; | ||
| 2533 | } | ||
| 2534 | 647 | ||
| 2535 | for (i = 0; i < drvdata->numcidc; i++) { | 648 | /* excluding kernel AND user space doesn't make sense */ |
| 2536 | drvdata->ctxid_pid[i] = 0x0; | 649 | WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)); |
| 2537 | drvdata->ctxid_vpid[i] = 0x0; | ||
| 2538 | } | ||
| 2539 | 650 | ||
| 2540 | drvdata->ctxid_mask0 = 0x0; | 651 | /* nothing to do if neither flags are set */ |
| 2541 | drvdata->ctxid_mask1 = 0x0; | 652 | if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) |
| 653 | return; | ||
| 2542 | 654 | ||
| 2543 | for (i = 0; i < drvdata->numvmidc; i++) | 655 | addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP]; |
| 2544 | drvdata->vmid_val[i] = 0x0; | 656 | /* clear default config */ |
| 2545 | drvdata->vmid_mask0 = 0x0; | 657 | addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS); |
| 2546 | drvdata->vmid_mask1 = 0x0; | ||
| 2547 | 658 | ||
| 2548 | /* | 659 | /* |
| 2549 | * A trace ID value of 0 is invalid, so let's start at some | 660 | * EXLEVEL_NS, bits[15:12] |
| 2550 | * random value that fits in 7 bits. ETMv3.x has 0x10 so let's | 661 | * The Exception levels are: |
| 2551 | * start at 0x20. | 662 | * Bit[12] Exception level 0 - Application |
| 663 | * Bit[13] Exception level 1 - OS | ||
| 664 | * Bit[14] Exception level 2 - Hypervisor | ||
| 665 | * Bit[15] Never implemented | ||
| 2552 | */ | 666 | */ |
| 2553 | drvdata->trcid = 0x20 + drvdata->cpu; | 667 | if (mode & ETM_MODE_EXCL_KERN) |
| 668 | addr_acc |= ETM_EXLEVEL_NS_OS; | ||
| 669 | else | ||
| 670 | addr_acc |= ETM_EXLEVEL_NS_APP; | ||
| 671 | |||
| 672 | config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc; | ||
| 673 | config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; | ||
| 2554 | } | 674 | } |
| 2555 | 675 | ||
| 2556 | static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, | 676 | static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, |
| @@ -2569,7 +689,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, | |||
| 2569 | etmdrvdata[cpu]->os_unlock = true; | 689 | etmdrvdata[cpu]->os_unlock = true; |
| 2570 | } | 690 | } |
| 2571 | 691 | ||
| 2572 | if (etmdrvdata[cpu]->enable) | 692 | if (local_read(&etmdrvdata[cpu]->mode)) |
| 2573 | etm4_enable_hw(etmdrvdata[cpu]); | 693 | etm4_enable_hw(etmdrvdata[cpu]); |
| 2574 | spin_unlock(&etmdrvdata[cpu]->spinlock); | 694 | spin_unlock(&etmdrvdata[cpu]->spinlock); |
| 2575 | break; | 695 | break; |
| @@ -2582,7 +702,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, | |||
| 2582 | 702 | ||
| 2583 | case CPU_DYING: | 703 | case CPU_DYING: |
| 2584 | spin_lock(&etmdrvdata[cpu]->spinlock); | 704 | spin_lock(&etmdrvdata[cpu]->spinlock); |
| 2585 | if (etmdrvdata[cpu]->enable) | 705 | if (local_read(&etmdrvdata[cpu]->mode)) |
| 2586 | etm4_disable_hw(etmdrvdata[cpu]); | 706 | etm4_disable_hw(etmdrvdata[cpu]); |
| 2587 | spin_unlock(&etmdrvdata[cpu]->spinlock); | 707 | spin_unlock(&etmdrvdata[cpu]->spinlock); |
| 2588 | break; | 708 | break; |
| @@ -2595,6 +715,11 @@ static struct notifier_block etm4_cpu_notifier = { | |||
| 2595 | .notifier_call = etm4_cpu_callback, | 715 | .notifier_call = etm4_cpu_callback, |
| 2596 | }; | 716 | }; |
| 2597 | 717 | ||
| 718 | static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) | ||
| 719 | { | ||
| 720 | drvdata->trcid = coresight_get_trace_id(drvdata->cpu); | ||
| 721 | } | ||
| 722 | |||
| 2598 | static int etm4_probe(struct amba_device *adev, const struct amba_id *id) | 723 | static int etm4_probe(struct amba_device *adev, const struct amba_id *id) |
| 2599 | { | 724 | { |
| 2600 | int ret; | 725 | int ret; |
| @@ -2638,9 +763,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2638 | get_online_cpus(); | 763 | get_online_cpus(); |
| 2639 | etmdrvdata[drvdata->cpu] = drvdata; | 764 | etmdrvdata[drvdata->cpu] = drvdata; |
| 2640 | 765 | ||
| 2641 | if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1)) | ||
| 2642 | drvdata->os_unlock = true; | ||
| 2643 | |||
| 2644 | if (smp_call_function_single(drvdata->cpu, | 766 | if (smp_call_function_single(drvdata->cpu, |
| 2645 | etm4_init_arch_data, drvdata, 1)) | 767 | etm4_init_arch_data, drvdata, 1)) |
| 2646 | dev_err(dev, "ETM arch init failed\n"); | 768 | dev_err(dev, "ETM arch init failed\n"); |
| @@ -2654,9 +776,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2654 | ret = -EINVAL; | 776 | ret = -EINVAL; |
| 2655 | goto err_arch_supported; | 777 | goto err_arch_supported; |
| 2656 | } | 778 | } |
| 2657 | etm4_init_default_data(drvdata); | ||
| 2658 | 779 | ||
| 2659 | pm_runtime_put(&adev->dev); | 780 | etm4_init_trace_id(drvdata); |
| 781 | etm4_set_default(&drvdata->config); | ||
| 2660 | 782 | ||
| 2661 | desc->type = CORESIGHT_DEV_TYPE_SOURCE; | 783 | desc->type = CORESIGHT_DEV_TYPE_SOURCE; |
| 2662 | desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; | 784 | desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; |
| @@ -2667,9 +789,16 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2667 | drvdata->csdev = coresight_register(desc); | 789 | drvdata->csdev = coresight_register(desc); |
| 2668 | if (IS_ERR(drvdata->csdev)) { | 790 | if (IS_ERR(drvdata->csdev)) { |
| 2669 | ret = PTR_ERR(drvdata->csdev); | 791 | ret = PTR_ERR(drvdata->csdev); |
| 2670 | goto err_coresight_register; | 792 | goto err_arch_supported; |
| 2671 | } | 793 | } |
| 2672 | 794 | ||
| 795 | ret = etm_perf_symlink(drvdata->csdev, true); | ||
| 796 | if (ret) { | ||
| 797 | coresight_unregister(drvdata->csdev); | ||
| 798 | goto err_arch_supported; | ||
| 799 | } | ||
| 800 | |||
| 801 | pm_runtime_put(&adev->dev); | ||
| 2673 | dev_info(dev, "%s initialized\n", (char *)id->data); | 802 | dev_info(dev, "%s initialized\n", (char *)id->data); |
| 2674 | 803 | ||
| 2675 | if (boot_enable) { | 804 | if (boot_enable) { |
| @@ -2680,8 +809,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2680 | return 0; | 809 | return 0; |
| 2681 | 810 | ||
| 2682 | err_arch_supported: | 811 | err_arch_supported: |
| 2683 | pm_runtime_put(&adev->dev); | ||
| 2684 | err_coresight_register: | ||
| 2685 | if (--etm4_count == 0) | 812 | if (--etm4_count == 0) |
| 2686 | unregister_hotcpu_notifier(&etm4_cpu_notifier); | 813 | unregister_hotcpu_notifier(&etm4_cpu_notifier); |
| 2687 | return ret; | 814 | return ret; |
| @@ -2698,6 +825,11 @@ static struct amba_id etm4_ids[] = { | |||
| 2698 | .mask = 0x000fffff, | 825 | .mask = 0x000fffff, |
| 2699 | .data = "ETM 4.0", | 826 | .data = "ETM 4.0", |
| 2700 | }, | 827 | }, |
| 828 | { /* ETM 4.0 - A72, Maia, HiSilicon */ | ||
| 829 | .id = 0x000bb95a, | ||
| 830 | .mask = 0x000fffff, | ||
| 831 | .data = "ETM 4.0", | ||
| 832 | }, | ||
| 2701 | { 0, 0}, | 833 | { 0, 0}, |
| 2702 | }; | 834 | }; |
| 2703 | 835 | ||
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index c34100205ca9..5359c5197c1d 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #ifndef _CORESIGHT_CORESIGHT_ETM_H | 13 | #ifndef _CORESIGHT_CORESIGHT_ETM_H |
| 14 | #define _CORESIGHT_CORESIGHT_ETM_H | 14 | #define _CORESIGHT_CORESIGHT_ETM_H |
| 15 | 15 | ||
| 16 | #include <asm/local.h> | ||
| 16 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
| 17 | #include "coresight-priv.h" | 18 | #include "coresight-priv.h" |
| 18 | 19 | ||
| @@ -175,71 +176,38 @@ | |||
| 175 | #define ETM_MODE_TRACE_RESET BIT(25) | 176 | #define ETM_MODE_TRACE_RESET BIT(25) |
| 176 | #define ETM_MODE_TRACE_ERR BIT(26) | 177 | #define ETM_MODE_TRACE_ERR BIT(26) |
| 177 | #define ETM_MODE_VIEWINST_STARTSTOP BIT(27) | 178 | #define ETM_MODE_VIEWINST_STARTSTOP BIT(27) |
| 178 | #define ETMv4_MODE_ALL 0xFFFFFFF | 179 | #define ETMv4_MODE_ALL (GENMASK(27, 0) | \ |
| 180 | ETM_MODE_EXCL_KERN | \ | ||
| 181 | ETM_MODE_EXCL_USER) | ||
| 179 | 182 | ||
| 180 | #define TRCSTATR_IDLE_BIT 0 | 183 | #define TRCSTATR_IDLE_BIT 0 |
| 184 | #define ETM_DEFAULT_ADDR_COMP 0 | ||
| 185 | |||
| 186 | /* secure state access levels */ | ||
| 187 | #define ETM_EXLEVEL_S_APP BIT(8) | ||
| 188 | #define ETM_EXLEVEL_S_OS BIT(9) | ||
| 189 | #define ETM_EXLEVEL_S_NA BIT(10) | ||
| 190 | #define ETM_EXLEVEL_S_HYP BIT(11) | ||
| 191 | /* non-secure state access levels */ | ||
| 192 | #define ETM_EXLEVEL_NS_APP BIT(12) | ||
| 193 | #define ETM_EXLEVEL_NS_OS BIT(13) | ||
| 194 | #define ETM_EXLEVEL_NS_HYP BIT(14) | ||
| 195 | #define ETM_EXLEVEL_NS_NA BIT(15) | ||
| 181 | 196 | ||
| 182 | /** | 197 | /** |
| 183 | * struct etm4_drvdata - specifics associated to an ETM component | 198 | * struct etmv4_config - configuration information related to an ETMv4 |
| 184 | * @base: Memory mapped base address for this component. | ||
| 185 | * @dev: The device entity associated to this component. | ||
| 186 | * @csdev: Component vitals needed by the framework. | ||
| 187 | * @spinlock: Only one at a time pls. | ||
| 188 | * @cpu: The cpu this component is affined to. | ||
| 189 | * @arch: ETM version number. | ||
| 190 | * @enable: Is this ETM currently tracing. | ||
| 191 | * @sticky_enable: true if ETM base configuration has been done. | ||
| 192 | * @boot_enable:True if we should start tracing at boot time. | ||
| 193 | * @os_unlock: True if access to management registers is allowed. | ||
| 194 | * @nr_pe: The number of processing entity available for tracing. | ||
| 195 | * @nr_pe_cmp: The number of processing entity comparator inputs that are | ||
| 196 | * available for tracing. | ||
| 197 | * @nr_addr_cmp:Number of pairs of address comparators available | ||
| 198 | * as found in ETMIDR4 0-3. | ||
| 199 | * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30. | ||
| 200 | * @nr_ext_inp: Number of external input. | ||
| 201 | * @numcidc: Number of contextID comparators. | ||
| 202 | * @numvmidc: Number of VMID comparators. | ||
| 203 | * @nrseqstate: The number of sequencer states that are implemented. | ||
| 204 | * @nr_event: Indicates how many events the trace unit support. | ||
| 205 | * @nr_resource:The number of resource selection pairs available for tracing. | ||
| 206 | * @nr_ss_cmp: Number of single-shot comparator controls that are available. | ||
| 207 | * @mode: Controls various modes supported by this ETM. | 199 | * @mode: Controls various modes supported by this ETM. |
| 208 | * @trcid: value of the current ID for this component. | ||
| 209 | * @trcid_size: Indicates the trace ID width. | ||
| 210 | * @instrp0: Tracing of load and store instructions | ||
| 211 | * as P0 elements is supported. | ||
| 212 | * @trccond: If the trace unit supports conditional | ||
| 213 | * instruction tracing. | ||
| 214 | * @retstack: Indicates if the implementation supports a return stack. | ||
| 215 | * @trc_error: Whether a trace unit can trace a system | ||
| 216 | * error exception. | ||
| 217 | * @atbtrig: If the implementation can support ATB triggers | ||
| 218 | * @lpoverride: If the implementation can support low-power state over. | ||
| 219 | * @pe_sel: Controls which PE to trace. | 200 | * @pe_sel: Controls which PE to trace. |
| 220 | * @cfg: Controls the tracing options. | 201 | * @cfg: Controls the tracing options. |
| 221 | * @eventctrl0: Controls the tracing of arbitrary events. | 202 | * @eventctrl0: Controls the tracing of arbitrary events. |
| 222 | * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects. | 203 | * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects. |
| 223 | * @stallctl: If functionality that prevents trace unit buffer overflows | 204 | * @stallctl: If functionality that prevents trace unit buffer overflows |
| 224 | * is available. | 205 | * is available. |
| 225 | * @sysstall: Does the system support stall control of the PE? | ||
| 226 | * @nooverflow: Indicate if overflow prevention is supported. | ||
| 227 | * @stall_ctrl: Enables trace unit functionality that prevents trace | ||
| 228 | * unit buffer overflows. | ||
| 229 | * @ts_size: Global timestamp size field. | ||
| 230 | * @ts_ctrl: Controls the insertion of global timestamps in the | 206 | * @ts_ctrl: Controls the insertion of global timestamps in the |
| 231 | * trace streams. | 207 | * trace streams. |
| 232 | * @syncpr: Indicates if an implementation has a fixed | ||
| 233 | * synchronization period. | ||
| 234 | * @syncfreq: Controls how often trace synchronization requests occur. | 208 | * @syncfreq: Controls how often trace synchronization requests occur. |
| 235 | * @trccci: Indicates if the trace unit supports cycle counting | ||
| 236 | * for instruction. | ||
| 237 | * @ccsize: Indicates the size of the cycle counter in bits. | ||
| 238 | * @ccitmin: minimum value that can be programmed in | ||
| 239 | * the TRCCCCTLR register. | 209 | * the TRCCCCTLR register. |
| 240 | * @ccctlr: Sets the threshold value for cycle counting. | 210 | * @ccctlr: Sets the threshold value for cycle counting. |
| 241 | * @trcbb: Indicates if the trace unit supports branch broadcast tracing. | ||
| 242 | * @q_support: Q element support characteristics. | ||
| 243 | * @vinst_ctrl: Controls instruction trace filtering. | 211 | * @vinst_ctrl: Controls instruction trace filtering. |
| 244 | * @viiectlr: Set or read, the address range comparators. | 212 | * @viiectlr: Set or read, the address range comparators. |
| 245 | * @vissctlr: Set, or read, the single address comparators that control the | 213 | * @vissctlr: Set, or read, the single address comparators that control the |
| @@ -264,73 +232,28 @@ | |||
| 264 | * @addr_acc: Address comparator access type. | 232 | * @addr_acc: Address comparator access type. |
| 265 | * @addr_type: Current status of the comparator register. | 233 | * @addr_type: Current status of the comparator register. |
| 266 | * @ctxid_idx: Context ID index selector. | 234 | * @ctxid_idx: Context ID index selector. |
| 267 | * @ctxid_size: Size of the context ID field to consider. | ||
| 268 | * @ctxid_pid: Value of the context ID comparator. | 235 | * @ctxid_pid: Value of the context ID comparator. |
| 269 | * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise | 236 | * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise |
| 270 | * the same value of ctxid_pid. | 237 | * the same value of ctxid_pid. |
| 271 | * @ctxid_mask0:Context ID comparator mask for comparator 0-3. | 238 | * @ctxid_mask0:Context ID comparator mask for comparator 0-3. |
| 272 | * @ctxid_mask1:Context ID comparator mask for comparator 4-7. | 239 | * @ctxid_mask1:Context ID comparator mask for comparator 4-7. |
| 273 | * @vmid_idx: VM ID index selector. | 240 | * @vmid_idx: VM ID index selector. |
| 274 | * @vmid_size: Size of the VM ID comparator to consider. | ||
| 275 | * @vmid_val: Value of the VM ID comparator. | 241 | * @vmid_val: Value of the VM ID comparator. |
| 276 | * @vmid_mask0: VM ID comparator mask for comparator 0-3. | 242 | * @vmid_mask0: VM ID comparator mask for comparator 0-3. |
| 277 | * @vmid_mask1: VM ID comparator mask for comparator 4-7. | 243 | * @vmid_mask1: VM ID comparator mask for comparator 4-7. |
| 278 | * @s_ex_level: In secure state, indicates whether instruction tracing is | ||
| 279 | * supported for the corresponding Exception level. | ||
| 280 | * @ns_ex_level:In non-secure state, indicates whether instruction tracing is | ||
| 281 | * supported for the corresponding Exception level. | ||
| 282 | * @ext_inp: External input selection. | 244 | * @ext_inp: External input selection. |
| 283 | */ | 245 | */ |
| 284 | struct etmv4_drvdata { | 246 | struct etmv4_config { |
| 285 | void __iomem *base; | ||
| 286 | struct device *dev; | ||
| 287 | struct coresight_device *csdev; | ||
| 288 | spinlock_t spinlock; | ||
| 289 | int cpu; | ||
| 290 | u8 arch; | ||
| 291 | bool enable; | ||
| 292 | bool sticky_enable; | ||
| 293 | bool boot_enable; | ||
| 294 | bool os_unlock; | ||
| 295 | u8 nr_pe; | ||
| 296 | u8 nr_pe_cmp; | ||
| 297 | u8 nr_addr_cmp; | ||
| 298 | u8 nr_cntr; | ||
| 299 | u8 nr_ext_inp; | ||
| 300 | u8 numcidc; | ||
| 301 | u8 numvmidc; | ||
| 302 | u8 nrseqstate; | ||
| 303 | u8 nr_event; | ||
| 304 | u8 nr_resource; | ||
| 305 | u8 nr_ss_cmp; | ||
| 306 | u32 mode; | 247 | u32 mode; |
| 307 | u8 trcid; | ||
| 308 | u8 trcid_size; | ||
| 309 | bool instrp0; | ||
| 310 | bool trccond; | ||
| 311 | bool retstack; | ||
| 312 | bool trc_error; | ||
| 313 | bool atbtrig; | ||
| 314 | bool lpoverride; | ||
| 315 | u32 pe_sel; | 248 | u32 pe_sel; |
| 316 | u32 cfg; | 249 | u32 cfg; |
| 317 | u32 eventctrl0; | 250 | u32 eventctrl0; |
| 318 | u32 eventctrl1; | 251 | u32 eventctrl1; |
| 319 | bool stallctl; | ||
| 320 | bool sysstall; | ||
| 321 | bool nooverflow; | ||
| 322 | u32 stall_ctrl; | 252 | u32 stall_ctrl; |
| 323 | u8 ts_size; | ||
| 324 | u32 ts_ctrl; | 253 | u32 ts_ctrl; |
| 325 | bool syncpr; | ||
| 326 | u32 syncfreq; | 254 | u32 syncfreq; |
| 327 | bool trccci; | ||
| 328 | u8 ccsize; | ||
| 329 | u8 ccitmin; | ||
| 330 | u32 ccctlr; | 255 | u32 ccctlr; |
| 331 | bool trcbb; | ||
| 332 | u32 bb_ctrl; | 256 | u32 bb_ctrl; |
| 333 | bool q_support; | ||
| 334 | u32 vinst_ctrl; | 257 | u32 vinst_ctrl; |
| 335 | u32 viiectlr; | 258 | u32 viiectlr; |
| 336 | u32 vissctlr; | 259 | u32 vissctlr; |
| @@ -353,19 +276,119 @@ struct etmv4_drvdata { | |||
| 353 | u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP]; | 276 | u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP]; |
| 354 | u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP]; | 277 | u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP]; |
| 355 | u8 ctxid_idx; | 278 | u8 ctxid_idx; |
| 356 | u8 ctxid_size; | ||
| 357 | u64 ctxid_pid[ETMv4_MAX_CTXID_CMP]; | 279 | u64 ctxid_pid[ETMv4_MAX_CTXID_CMP]; |
| 358 | u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP]; | 280 | u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP]; |
| 359 | u32 ctxid_mask0; | 281 | u32 ctxid_mask0; |
| 360 | u32 ctxid_mask1; | 282 | u32 ctxid_mask1; |
| 361 | u8 vmid_idx; | 283 | u8 vmid_idx; |
| 362 | u8 vmid_size; | ||
| 363 | u64 vmid_val[ETM_MAX_VMID_CMP]; | 284 | u64 vmid_val[ETM_MAX_VMID_CMP]; |
| 364 | u32 vmid_mask0; | 285 | u32 vmid_mask0; |
| 365 | u32 vmid_mask1; | 286 | u32 vmid_mask1; |
| 287 | u32 ext_inp; | ||
| 288 | }; | ||
| 289 | |||
| 290 | /** | ||
| 291 | * struct etm4_drvdata - specifics associated to an ETM component | ||
| 292 | * @base: Memory mapped base address for this component. | ||
| 293 | * @dev: The device entity associated to this component. | ||
| 294 | * @csdev: Component vitals needed by the framework. | ||
| 295 | * @spinlock: Only one at a time pls. | ||
| 296 | * @mode: This tracer's mode, i.e sysFS, Perf or disabled. | ||
| 297 | * @cpu: The cpu this component is affined to. | ||
| 298 | * @arch: ETM version number. | ||
| 299 | * @nr_pe: The number of processing entity available for tracing. | ||
| 300 | * @nr_pe_cmp: The number of processing entity comparator inputs that are | ||
| 301 | * available for tracing. | ||
| 302 | * @nr_addr_cmp:Number of pairs of address comparators available | ||
| 303 | * as found in ETMIDR4 0-3. | ||
| 304 | * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30. | ||
| 305 | * @nr_ext_inp: Number of external input. | ||
| 306 | * @numcidc: Number of contextID comparators. | ||
| 307 | * @numvmidc: Number of VMID comparators. | ||
| 308 | * @nrseqstate: The number of sequencer states that are implemented. | ||
| 309 | * @nr_event: Indicates how many events the trace unit support. | ||
| 310 | * @nr_resource:The number of resource selection pairs available for tracing. | ||
| 311 | * @nr_ss_cmp: Number of single-shot comparator controls that are available. | ||
| 312 | * @trcid: value of the current ID for this component. | ||
| 313 | * @trcid_size: Indicates the trace ID width. | ||
| 314 | * @ts_size: Global timestamp size field. | ||
| 315 | * @ctxid_size: Size of the context ID field to consider. | ||
| 316 | * @vmid_size: Size of the VM ID comparator to consider. | ||
| 317 | * @ccsize: Indicates the size of the cycle counter in bits. | ||
| 318 | * @ccitmin: minimum value that can be programmed in | ||
| 319 | * @s_ex_level: In secure state, indicates whether instruction tracing is | ||
| 320 | * supported for the corresponding Exception level. | ||
| 321 | * @ns_ex_level:In non-secure state, indicates whether instruction tracing is | ||
| 322 | * supported for the corresponding Exception level. | ||
| 323 | * @sticky_enable: true if ETM base configuration has been done. | ||
| 324 | * @boot_enable:True if we should start tracing at boot time. | ||
| 325 | * @os_unlock: True if access to management registers is allowed. | ||
| 326 | * @instrp0: Tracing of load and store instructions | ||
| 327 | * as P0 elements is supported. | ||
| 328 | * @trcbb: Indicates if the trace unit supports branch broadcast tracing. | ||
| 329 | * @trccond: If the trace unit supports conditional | ||
| 330 | * instruction tracing. | ||
| 331 | * @retstack: Indicates if the implementation supports a return stack. | ||
| 332 | * @trccci: Indicates if the trace unit supports cycle counting | ||
| 333 | * for instruction. | ||
| 334 | * @q_support: Q element support characteristics. | ||
| 335 | * @trc_error: Whether a trace unit can trace a system | ||
| 336 | * error exception. | ||
| 337 | * @syncpr: Indicates if an implementation has a fixed | ||
| 338 | * synchronization period. | ||
| 339 | * @stall_ctrl: Enables trace unit functionality that prevents trace | ||
| 340 | * unit buffer overflows. | ||
| 341 | * @sysstall: Does the system support stall control of the PE? | ||
| 342 | * @nooverflow: Indicate if overflow prevention is supported. | ||
| 343 | * @atbtrig: If the implementation can support ATB triggers | ||
| 344 | * @lpoverride: If the implementation can support low-power state over. | ||
| 345 | * @config: structure holding configuration parameters. | ||
| 346 | */ | ||
| 347 | struct etmv4_drvdata { | ||
| 348 | void __iomem *base; | ||
| 349 | struct device *dev; | ||
| 350 | struct coresight_device *csdev; | ||
| 351 | spinlock_t spinlock; | ||
| 352 | local_t mode; | ||
| 353 | int cpu; | ||
| 354 | u8 arch; | ||
| 355 | u8 nr_pe; | ||
| 356 | u8 nr_pe_cmp; | ||
| 357 | u8 nr_addr_cmp; | ||
| 358 | u8 nr_cntr; | ||
| 359 | u8 nr_ext_inp; | ||
| 360 | u8 numcidc; | ||
| 361 | u8 numvmidc; | ||
| 362 | u8 nrseqstate; | ||
| 363 | u8 nr_event; | ||
| 364 | u8 nr_resource; | ||
| 365 | u8 nr_ss_cmp; | ||
| 366 | u8 trcid; | ||
| 367 | u8 trcid_size; | ||
| 368 | u8 ts_size; | ||
| 369 | u8 ctxid_size; | ||
| 370 | u8 vmid_size; | ||
| 371 | u8 ccsize; | ||
| 372 | u8 ccitmin; | ||
| 366 | u8 s_ex_level; | 373 | u8 s_ex_level; |
| 367 | u8 ns_ex_level; | 374 | u8 ns_ex_level; |
| 368 | u32 ext_inp; | 375 | u8 q_support; |
| 376 | bool sticky_enable; | ||
| 377 | bool boot_enable; | ||
| 378 | bool os_unlock; | ||
| 379 | bool instrp0; | ||
| 380 | bool trcbb; | ||
| 381 | bool trccond; | ||
| 382 | bool retstack; | ||
| 383 | bool trccci; | ||
| 384 | bool trc_error; | ||
| 385 | bool syncpr; | ||
| 386 | bool stallctl; | ||
| 387 | bool sysstall; | ||
| 388 | bool nooverflow; | ||
| 389 | bool atbtrig; | ||
| 390 | bool lpoverride; | ||
| 391 | struct etmv4_config config; | ||
| 369 | }; | 392 | }; |
| 370 | 393 | ||
| 371 | /* Address comparator access types */ | 394 | /* Address comparator access types */ |
| @@ -391,4 +414,7 @@ enum etm_addr_type { | |||
| 391 | ETM_ADDR_TYPE_START, | 414 | ETM_ADDR_TYPE_START, |
| 392 | ETM_ADDR_TYPE_STOP, | 415 | ETM_ADDR_TYPE_STOP, |
| 393 | }; | 416 | }; |
| 417 | |||
| 418 | extern const struct attribute_group *coresight_etmv4_groups[]; | ||
| 419 | void etm4_config_trace_mode(struct etmv4_config *config); | ||
| 394 | #endif | 420 | #endif |
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 0600ca30649d..05df789056cc 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c | |||
| @@ -221,7 +221,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 221 | if (IS_ERR(drvdata->csdev)) | 221 | if (IS_ERR(drvdata->csdev)) |
| 222 | return PTR_ERR(drvdata->csdev); | 222 | return PTR_ERR(drvdata->csdev); |
| 223 | 223 | ||
| 224 | dev_info(dev, "FUNNEL initialized\n"); | ||
| 225 | return 0; | 224 | return 0; |
| 226 | } | 225 | } |
| 227 | 226 | ||
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 333eddaed339..ad975c58080d 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h | |||
| @@ -37,12 +37,42 @@ | |||
| 37 | #define ETM_MODE_EXCL_KERN BIT(30) | 37 | #define ETM_MODE_EXCL_KERN BIT(30) |
| 38 | #define ETM_MODE_EXCL_USER BIT(31) | 38 | #define ETM_MODE_EXCL_USER BIT(31) |
| 39 | 39 | ||
| 40 | #define coresight_simple_func(type, name, offset) \ | ||
| 41 | static ssize_t name##_show(struct device *_dev, \ | ||
| 42 | struct device_attribute *attr, char *buf) \ | ||
| 43 | { \ | ||
| 44 | type *drvdata = dev_get_drvdata(_dev->parent); \ | ||
| 45 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ | ||
| 46 | readl_relaxed(drvdata->base + offset)); \ | ||
| 47 | } \ | ||
| 48 | static DEVICE_ATTR_RO(name) | ||
| 49 | |||
| 40 | enum cs_mode { | 50 | enum cs_mode { |
| 41 | CS_MODE_DISABLED, | 51 | CS_MODE_DISABLED, |
| 42 | CS_MODE_SYSFS, | 52 | CS_MODE_SYSFS, |
| 43 | CS_MODE_PERF, | 53 | CS_MODE_PERF, |
| 44 | }; | 54 | }; |
| 45 | 55 | ||
| 56 | /** | ||
| 57 | * struct cs_buffer - keep track of a recording session' specifics | ||
| 58 | * @cur: index of the current buffer | ||
| 59 | * @nr_pages: max number of pages granted to us | ||
| 60 | * @offset: offset within the current buffer | ||
| 61 | * @data_size: how much we collected in this run | ||
| 62 | * @lost: other than zero if we had a HW buffer wrap around | ||
| 63 | * @snapshot: is this run in snapshot mode | ||
| 64 | * @data_pages: a handle the ring buffer | ||
| 65 | */ | ||
| 66 | struct cs_buffers { | ||
| 67 | unsigned int cur; | ||
| 68 | unsigned int nr_pages; | ||
| 69 | unsigned long offset; | ||
| 70 | local_t data_size; | ||
| 71 | local_t lost; | ||
| 72 | bool snapshot; | ||
| 73 | void **data_pages; | ||
| 74 | }; | ||
| 75 | |||
| 46 | static inline void CS_LOCK(void __iomem *addr) | 76 | static inline void CS_LOCK(void __iomem *addr) |
| 47 | { | 77 | { |
| 48 | do { | 78 | do { |
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 4299c0569340..c6982e312e15 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c | |||
| @@ -114,7 +114,6 @@ static int replicator_probe(struct platform_device *pdev) | |||
| 114 | 114 | ||
| 115 | pm_runtime_put(&pdev->dev); | 115 | pm_runtime_put(&pdev->dev); |
| 116 | 116 | ||
| 117 | dev_info(dev, "REPLICATOR initialized\n"); | ||
| 118 | return 0; | 117 | return 0; |
| 119 | 118 | ||
| 120 | out_disable_pm: | 119 | out_disable_pm: |
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c new file mode 100644 index 000000000000..73be58a11e4f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-stm.c | |||
| @@ -0,0 +1,920 @@ | |||
| 1 | /* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. | ||
| 2 | * | ||
| 3 | * Description: CoreSight System Trace Macrocell driver | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License version 2 and | ||
| 7 | * only version 2 as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * Initial implementation by Pratik Patel | ||
| 15 | * (C) 2014-2015 Pratik Patel <pratikp@codeaurora.org> | ||
| 16 | * | ||
| 17 | * Serious refactoring, code cleanup and upgrading to the Coresight upstream | ||
| 18 | * framework by Mathieu Poirier | ||
| 19 | * (C) 2015-2016 Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 20 | * | ||
| 21 | * Guaranteed timing and support for various packet type coming from the | ||
| 22 | * generic STM API by Chunyan Zhang | ||
| 23 | * (C) 2015-2016 Chunyan Zhang <zhang.chunyan@linaro.org> | ||
| 24 | */ | ||
| 25 | #include <asm/local.h> | ||
| 26 | #include <linux/amba/bus.h> | ||
| 27 | #include <linux/bitmap.h> | ||
| 28 | #include <linux/clk.h> | ||
| 29 | #include <linux/coresight.h> | ||
| 30 | #include <linux/coresight-stm.h> | ||
| 31 | #include <linux/err.h> | ||
| 32 | #include <linux/kernel.h> | ||
| 33 | #include <linux/moduleparam.h> | ||
| 34 | #include <linux/of_address.h> | ||
| 35 | #include <linux/perf_event.h> | ||
| 36 | #include <linux/pm_runtime.h> | ||
| 37 | #include <linux/stm.h> | ||
| 38 | |||
| 39 | #include "coresight-priv.h" | ||
| 40 | |||
| 41 | #define STMDMASTARTR 0xc04 | ||
| 42 | #define STMDMASTOPR 0xc08 | ||
| 43 | #define STMDMASTATR 0xc0c | ||
| 44 | #define STMDMACTLR 0xc10 | ||
| 45 | #define STMDMAIDR 0xcfc | ||
| 46 | #define STMHEER 0xd00 | ||
| 47 | #define STMHETER 0xd20 | ||
| 48 | #define STMHEBSR 0xd60 | ||
| 49 | #define STMHEMCR 0xd64 | ||
| 50 | #define STMHEMASTR 0xdf4 | ||
| 51 | #define STMHEFEAT1R 0xdf8 | ||
| 52 | #define STMHEIDR 0xdfc | ||
| 53 | #define STMSPER 0xe00 | ||
| 54 | #define STMSPTER 0xe20 | ||
| 55 | #define STMPRIVMASKR 0xe40 | ||
| 56 | #define STMSPSCR 0xe60 | ||
| 57 | #define STMSPMSCR 0xe64 | ||
| 58 | #define STMSPOVERRIDER 0xe68 | ||
| 59 | #define STMSPMOVERRIDER 0xe6c | ||
| 60 | #define STMSPTRIGCSR 0xe70 | ||
| 61 | #define STMTCSR 0xe80 | ||
| 62 | #define STMTSSTIMR 0xe84 | ||
| 63 | #define STMTSFREQR 0xe8c | ||
| 64 | #define STMSYNCR 0xe90 | ||
| 65 | #define STMAUXCR 0xe94 | ||
| 66 | #define STMSPFEAT1R 0xea0 | ||
| 67 | #define STMSPFEAT2R 0xea4 | ||
| 68 | #define STMSPFEAT3R 0xea8 | ||
| 69 | #define STMITTRIGGER 0xee8 | ||
| 70 | #define STMITATBDATA0 0xeec | ||
| 71 | #define STMITATBCTR2 0xef0 | ||
| 72 | #define STMITATBID 0xef4 | ||
| 73 | #define STMITATBCTR0 0xef8 | ||
| 74 | |||
| 75 | #define STM_32_CHANNEL 32 | ||
| 76 | #define BYTES_PER_CHANNEL 256 | ||
| 77 | #define STM_TRACE_BUF_SIZE 4096 | ||
| 78 | #define STM_SW_MASTER_END 127 | ||
| 79 | |||
| 80 | /* Register bit definition */ | ||
| 81 | #define STMTCSR_BUSY_BIT 23 | ||
| 82 | /* Reserve the first 10 channels for kernel usage */ | ||
| 83 | #define STM_CHANNEL_OFFSET 0 | ||
| 84 | |||
| 85 | enum stm_pkt_type { | ||
| 86 | STM_PKT_TYPE_DATA = 0x98, | ||
| 87 | STM_PKT_TYPE_FLAG = 0xE8, | ||
| 88 | STM_PKT_TYPE_TRIG = 0xF8, | ||
| 89 | }; | ||
| 90 | |||
| 91 | #define stm_channel_addr(drvdata, ch) (drvdata->chs.base + \ | ||
| 92 | (ch * BYTES_PER_CHANNEL)) | ||
| 93 | #define stm_channel_off(type, opts) (type & ~opts) | ||
| 94 | |||
| 95 | static int boot_nr_channel; | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Not really modular but using module_param is the easiest way to | ||
| 99 | * remain consistent with existing use cases for now. | ||
| 100 | */ | ||
| 101 | module_param_named( | ||
| 102 | boot_nr_channel, boot_nr_channel, int, S_IRUGO | ||
| 103 | ); | ||
| 104 | |||
| 105 | /** | ||
| 106 | * struct channel_space - central management entity for extended ports | ||
| 107 | * @base: memory mapped base address where channels start. | ||
| 108 | * @guaraneed: is the channel delivery guaranteed. | ||
| 109 | */ | ||
| 110 | struct channel_space { | ||
| 111 | void __iomem *base; | ||
| 112 | unsigned long *guaranteed; | ||
| 113 | }; | ||
| 114 | |||
| 115 | /** | ||
| 116 | * struct stm_drvdata - specifics associated to an STM component | ||
| 117 | * @base: memory mapped base address for this component. | ||
| 118 | * @dev: the device entity associated to this component. | ||
| 119 | * @atclk: optional clock for the core parts of the STM. | ||
| 120 | * @csdev: component vitals needed by the framework. | ||
| 121 | * @spinlock: only one at a time pls. | ||
| 122 | * @chs: the channels accociated to this STM. | ||
| 123 | * @stm: structure associated to the generic STM interface. | ||
| 124 | * @mode: this tracer's mode, i.e sysFS, or disabled. | ||
| 125 | * @traceid: value of the current ID for this component. | ||
| 126 | * @write_bytes: Maximus bytes this STM can write at a time. | ||
| 127 | * @stmsper: settings for register STMSPER. | ||
| 128 | * @stmspscr: settings for register STMSPSCR. | ||
| 129 | * @numsp: the total number of stimulus port support by this STM. | ||
| 130 | * @stmheer: settings for register STMHEER. | ||
| 131 | * @stmheter: settings for register STMHETER. | ||
| 132 | * @stmhebsr: settings for register STMHEBSR. | ||
| 133 | */ | ||
| 134 | struct stm_drvdata { | ||
| 135 | void __iomem *base; | ||
| 136 | struct device *dev; | ||
| 137 | struct clk *atclk; | ||
| 138 | struct coresight_device *csdev; | ||
| 139 | spinlock_t spinlock; | ||
| 140 | struct channel_space chs; | ||
| 141 | struct stm_data stm; | ||
| 142 | local_t mode; | ||
| 143 | u8 traceid; | ||
| 144 | u32 write_bytes; | ||
| 145 | u32 stmsper; | ||
| 146 | u32 stmspscr; | ||
| 147 | u32 numsp; | ||
| 148 | u32 stmheer; | ||
| 149 | u32 stmheter; | ||
| 150 | u32 stmhebsr; | ||
| 151 | }; | ||
| 152 | |||
| 153 | static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata) | ||
| 154 | { | ||
| 155 | CS_UNLOCK(drvdata->base); | ||
| 156 | |||
| 157 | writel_relaxed(drvdata->stmhebsr, drvdata->base + STMHEBSR); | ||
| 158 | writel_relaxed(drvdata->stmheter, drvdata->base + STMHETER); | ||
| 159 | writel_relaxed(drvdata->stmheer, drvdata->base + STMHEER); | ||
| 160 | writel_relaxed(0x01 | /* Enable HW event tracing */ | ||
| 161 | 0x04, /* Error detection on event tracing */ | ||
| 162 | drvdata->base + STMHEMCR); | ||
| 163 | |||
| 164 | CS_LOCK(drvdata->base); | ||
| 165 | } | ||
| 166 | |||
| 167 | static void stm_port_enable_hw(struct stm_drvdata *drvdata) | ||
| 168 | { | ||
| 169 | CS_UNLOCK(drvdata->base); | ||
| 170 | /* ATB trigger enable on direct writes to TRIG locations */ | ||
| 171 | writel_relaxed(0x10, | ||
| 172 | drvdata->base + STMSPTRIGCSR); | ||
| 173 | writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR); | ||
| 174 | writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER); | ||
| 175 | |||
| 176 | CS_LOCK(drvdata->base); | ||
| 177 | } | ||
| 178 | |||
| 179 | static void stm_enable_hw(struct stm_drvdata *drvdata) | ||
| 180 | { | ||
| 181 | if (drvdata->stmheer) | ||
| 182 | stm_hwevent_enable_hw(drvdata); | ||
| 183 | |||
| 184 | stm_port_enable_hw(drvdata); | ||
| 185 | |||
| 186 | CS_UNLOCK(drvdata->base); | ||
| 187 | |||
| 188 | /* 4096 byte between synchronisation packets */ | ||
| 189 | writel_relaxed(0xFFF, drvdata->base + STMSYNCR); | ||
| 190 | writel_relaxed((drvdata->traceid << 16 | /* trace id */ | ||
| 191 | 0x02 | /* timestamp enable */ | ||
| 192 | 0x01), /* global STM enable */ | ||
| 193 | drvdata->base + STMTCSR); | ||
| 194 | |||
| 195 | CS_LOCK(drvdata->base); | ||
| 196 | } | ||
| 197 | |||
| 198 | static int stm_enable(struct coresight_device *csdev, | ||
| 199 | struct perf_event_attr *attr, u32 mode) | ||
| 200 | { | ||
| 201 | u32 val; | ||
| 202 | struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 203 | |||
| 204 | if (mode != CS_MODE_SYSFS) | ||
| 205 | return -EINVAL; | ||
| 206 | |||
| 207 | val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); | ||
| 208 | |||
| 209 | /* Someone is already using the tracer */ | ||
| 210 | if (val) | ||
| 211 | return -EBUSY; | ||
| 212 | |||
| 213 | pm_runtime_get_sync(drvdata->dev); | ||
| 214 | |||
| 215 | spin_lock(&drvdata->spinlock); | ||
| 216 | stm_enable_hw(drvdata); | ||
| 217 | spin_unlock(&drvdata->spinlock); | ||
| 218 | |||
| 219 | dev_info(drvdata->dev, "STM tracing enabled\n"); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | |||
| 223 | static void stm_hwevent_disable_hw(struct stm_drvdata *drvdata) | ||
| 224 | { | ||
| 225 | CS_UNLOCK(drvdata->base); | ||
| 226 | |||
| 227 | writel_relaxed(0x0, drvdata->base + STMHEMCR); | ||
| 228 | writel_relaxed(0x0, drvdata->base + STMHEER); | ||
| 229 | writel_relaxed(0x0, drvdata->base + STMHETER); | ||
| 230 | |||
| 231 | CS_LOCK(drvdata->base); | ||
| 232 | } | ||
| 233 | |||
| 234 | static void stm_port_disable_hw(struct stm_drvdata *drvdata) | ||
| 235 | { | ||
| 236 | CS_UNLOCK(drvdata->base); | ||
| 237 | |||
| 238 | writel_relaxed(0x0, drvdata->base + STMSPER); | ||
| 239 | writel_relaxed(0x0, drvdata->base + STMSPTRIGCSR); | ||
| 240 | |||
| 241 | CS_LOCK(drvdata->base); | ||
| 242 | } | ||
| 243 | |||
| 244 | static void stm_disable_hw(struct stm_drvdata *drvdata) | ||
| 245 | { | ||
| 246 | u32 val; | ||
| 247 | |||
| 248 | CS_UNLOCK(drvdata->base); | ||
| 249 | |||
| 250 | val = readl_relaxed(drvdata->base + STMTCSR); | ||
| 251 | val &= ~0x1; /* clear global STM enable [0] */ | ||
| 252 | writel_relaxed(val, drvdata->base + STMTCSR); | ||
| 253 | |||
| 254 | CS_LOCK(drvdata->base); | ||
| 255 | |||
| 256 | stm_port_disable_hw(drvdata); | ||
| 257 | if (drvdata->stmheer) | ||
| 258 | stm_hwevent_disable_hw(drvdata); | ||
| 259 | } | ||
| 260 | |||
| 261 | static void stm_disable(struct coresight_device *csdev) | ||
| 262 | { | ||
| 263 | struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 264 | |||
| 265 | /* | ||
| 266 | * For as long as the tracer isn't disabled another entity can't | ||
| 267 | * change its status. As such we can read the status here without | ||
| 268 | * fearing it will change under us. | ||
| 269 | */ | ||
| 270 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { | ||
| 271 | spin_lock(&drvdata->spinlock); | ||
| 272 | stm_disable_hw(drvdata); | ||
| 273 | spin_unlock(&drvdata->spinlock); | ||
| 274 | |||
| 275 | /* Wait until the engine has completely stopped */ | ||
| 276 | coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0); | ||
| 277 | |||
| 278 | pm_runtime_put(drvdata->dev); | ||
| 279 | |||
| 280 | local_set(&drvdata->mode, CS_MODE_DISABLED); | ||
| 281 | dev_info(drvdata->dev, "STM tracing disabled\n"); | ||
| 282 | } | ||
| 283 | } | ||
| 284 | |||
| 285 | static int stm_trace_id(struct coresight_device *csdev) | ||
| 286 | { | ||
| 287 | struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 288 | |||
| 289 | return drvdata->traceid; | ||
| 290 | } | ||
| 291 | |||
| 292 | static const struct coresight_ops_source stm_source_ops = { | ||
| 293 | .trace_id = stm_trace_id, | ||
| 294 | .enable = stm_enable, | ||
| 295 | .disable = stm_disable, | ||
| 296 | }; | ||
| 297 | |||
| 298 | static const struct coresight_ops stm_cs_ops = { | ||
| 299 | .source_ops = &stm_source_ops, | ||
| 300 | }; | ||
| 301 | |||
| 302 | static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes) | ||
| 303 | { | ||
| 304 | return ((unsigned long)addr & (write_bytes - 1)); | ||
| 305 | } | ||
| 306 | |||
| 307 | static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes) | ||
| 308 | { | ||
| 309 | u8 paload[8]; | ||
| 310 | |||
| 311 | if (stm_addr_unaligned(data, write_bytes)) { | ||
| 312 | memcpy(paload, data, size); | ||
| 313 | data = paload; | ||
| 314 | } | ||
| 315 | |||
| 316 | /* now we are 64bit/32bit aligned */ | ||
| 317 | switch (size) { | ||
| 318 | #ifdef CONFIG_64BIT | ||
| 319 | case 8: | ||
| 320 | writeq_relaxed(*(u64 *)data, addr); | ||
| 321 | break; | ||
| 322 | #endif | ||
| 323 | case 4: | ||
| 324 | writel_relaxed(*(u32 *)data, addr); | ||
| 325 | break; | ||
| 326 | case 2: | ||
| 327 | writew_relaxed(*(u16 *)data, addr); | ||
| 328 | break; | ||
| 329 | case 1: | ||
| 330 | writeb_relaxed(*(u8 *)data, addr); | ||
| 331 | break; | ||
| 332 | default: | ||
| 333 | break; | ||
| 334 | } | ||
| 335 | } | ||
| 336 | |||
| 337 | static int stm_generic_link(struct stm_data *stm_data, | ||
| 338 | unsigned int master, unsigned int channel) | ||
| 339 | { | ||
| 340 | struct stm_drvdata *drvdata = container_of(stm_data, | ||
| 341 | struct stm_drvdata, stm); | ||
| 342 | if (!drvdata || !drvdata->csdev) | ||
| 343 | return -EINVAL; | ||
| 344 | |||
| 345 | return coresight_enable(drvdata->csdev); | ||
| 346 | } | ||
| 347 | |||
| 348 | static void stm_generic_unlink(struct stm_data *stm_data, | ||
| 349 | unsigned int master, unsigned int channel) | ||
| 350 | { | ||
| 351 | struct stm_drvdata *drvdata = container_of(stm_data, | ||
| 352 | struct stm_drvdata, stm); | ||
| 353 | if (!drvdata || !drvdata->csdev) | ||
| 354 | return; | ||
| 355 | |||
| 356 | stm_disable(drvdata->csdev); | ||
| 357 | } | ||
| 358 | |||
| 359 | static long stm_generic_set_options(struct stm_data *stm_data, | ||
| 360 | unsigned int master, | ||
| 361 | unsigned int channel, | ||
| 362 | unsigned int nr_chans, | ||
| 363 | unsigned long options) | ||
| 364 | { | ||
| 365 | struct stm_drvdata *drvdata = container_of(stm_data, | ||
| 366 | struct stm_drvdata, stm); | ||
| 367 | if (!(drvdata && local_read(&drvdata->mode))) | ||
| 368 | return -EINVAL; | ||
| 369 | |||
| 370 | if (channel >= drvdata->numsp) | ||
| 371 | return -EINVAL; | ||
| 372 | |||
| 373 | switch (options) { | ||
| 374 | case STM_OPTION_GUARANTEED: | ||
| 375 | set_bit(channel, drvdata->chs.guaranteed); | ||
| 376 | break; | ||
| 377 | |||
| 378 | case STM_OPTION_INVARIANT: | ||
| 379 | clear_bit(channel, drvdata->chs.guaranteed); | ||
| 380 | break; | ||
| 381 | |||
| 382 | default: | ||
| 383 | return -EINVAL; | ||
| 384 | } | ||
| 385 | |||
| 386 | return 0; | ||
| 387 | } | ||
| 388 | |||
| 389 | static ssize_t stm_generic_packet(struct stm_data *stm_data, | ||
| 390 | unsigned int master, | ||
| 391 | unsigned int channel, | ||
| 392 | unsigned int packet, | ||
| 393 | unsigned int flags, | ||
| 394 | unsigned int size, | ||
| 395 | const unsigned char *payload) | ||
| 396 | { | ||
| 397 | unsigned long ch_addr; | ||
| 398 | struct stm_drvdata *drvdata = container_of(stm_data, | ||
| 399 | struct stm_drvdata, stm); | ||
| 400 | |||
| 401 | if (!(drvdata && local_read(&drvdata->mode))) | ||
| 402 | return 0; | ||
| 403 | |||
| 404 | if (channel >= drvdata->numsp) | ||
| 405 | return 0; | ||
| 406 | |||
| 407 | ch_addr = (unsigned long)stm_channel_addr(drvdata, channel); | ||
| 408 | |||
| 409 | flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0; | ||
| 410 | flags |= test_bit(channel, drvdata->chs.guaranteed) ? | ||
| 411 | STM_FLAG_GUARANTEED : 0; | ||
| 412 | |||
| 413 | if (size > drvdata->write_bytes) | ||
| 414 | size = drvdata->write_bytes; | ||
| 415 | else | ||
| 416 | size = rounddown_pow_of_two(size); | ||
| 417 | |||
| 418 | switch (packet) { | ||
| 419 | case STP_PACKET_FLAG: | ||
| 420 | ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags); | ||
| 421 | |||
| 422 | /* | ||
| 423 | * The generic STM core sets a size of '0' on flag packets. | ||
| 424 | * As such send a flag packet of size '1' and tell the | ||
| 425 | * core we did so. | ||
| 426 | */ | ||
| 427 | stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes); | ||
| 428 | size = 1; | ||
| 429 | break; | ||
| 430 | |||
| 431 | case STP_PACKET_DATA: | ||
| 432 | ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags); | ||
| 433 | stm_send((void *)ch_addr, payload, size, | ||
| 434 | drvdata->write_bytes); | ||
| 435 | break; | ||
| 436 | |||
| 437 | default: | ||
| 438 | return -ENOTSUPP; | ||
| 439 | } | ||
| 440 | |||
| 441 | return size; | ||
| 442 | } | ||
| 443 | |||
| 444 | static ssize_t hwevent_enable_show(struct device *dev, | ||
| 445 | struct device_attribute *attr, char *buf) | ||
| 446 | { | ||
| 447 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 448 | unsigned long val = drvdata->stmheer; | ||
| 449 | |||
| 450 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 451 | } | ||
| 452 | |||
| 453 | static ssize_t hwevent_enable_store(struct device *dev, | ||
| 454 | struct device_attribute *attr, | ||
| 455 | const char *buf, size_t size) | ||
| 456 | { | ||
| 457 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 458 | unsigned long val; | ||
| 459 | int ret = 0; | ||
| 460 | |||
| 461 | ret = kstrtoul(buf, 16, &val); | ||
| 462 | if (ret) | ||
| 463 | return -EINVAL; | ||
| 464 | |||
| 465 | drvdata->stmheer = val; | ||
| 466 | /* HW event enable and trigger go hand in hand */ | ||
| 467 | drvdata->stmheter = val; | ||
| 468 | |||
| 469 | return size; | ||
| 470 | } | ||
| 471 | static DEVICE_ATTR_RW(hwevent_enable); | ||
| 472 | |||
| 473 | static ssize_t hwevent_select_show(struct device *dev, | ||
| 474 | struct device_attribute *attr, char *buf) | ||
| 475 | { | ||
| 476 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 477 | unsigned long val = drvdata->stmhebsr; | ||
| 478 | |||
| 479 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 480 | } | ||
| 481 | |||
| 482 | static ssize_t hwevent_select_store(struct device *dev, | ||
| 483 | struct device_attribute *attr, | ||
| 484 | const char *buf, size_t size) | ||
| 485 | { | ||
| 486 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 487 | unsigned long val; | ||
| 488 | int ret = 0; | ||
| 489 | |||
| 490 | ret = kstrtoul(buf, 16, &val); | ||
| 491 | if (ret) | ||
| 492 | return -EINVAL; | ||
| 493 | |||
| 494 | drvdata->stmhebsr = val; | ||
| 495 | |||
| 496 | return size; | ||
| 497 | } | ||
| 498 | static DEVICE_ATTR_RW(hwevent_select); | ||
| 499 | |||
| 500 | static ssize_t port_select_show(struct device *dev, | ||
| 501 | struct device_attribute *attr, char *buf) | ||
| 502 | { | ||
| 503 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 504 | unsigned long val; | ||
| 505 | |||
| 506 | if (!local_read(&drvdata->mode)) { | ||
| 507 | val = drvdata->stmspscr; | ||
| 508 | } else { | ||
| 509 | spin_lock(&drvdata->spinlock); | ||
| 510 | val = readl_relaxed(drvdata->base + STMSPSCR); | ||
| 511 | spin_unlock(&drvdata->spinlock); | ||
| 512 | } | ||
| 513 | |||
| 514 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 515 | } | ||
| 516 | |||
| 517 | static ssize_t port_select_store(struct device *dev, | ||
| 518 | struct device_attribute *attr, | ||
| 519 | const char *buf, size_t size) | ||
| 520 | { | ||
| 521 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 522 | unsigned long val, stmsper; | ||
| 523 | int ret = 0; | ||
| 524 | |||
| 525 | ret = kstrtoul(buf, 16, &val); | ||
| 526 | if (ret) | ||
| 527 | return ret; | ||
| 528 | |||
| 529 | spin_lock(&drvdata->spinlock); | ||
| 530 | drvdata->stmspscr = val; | ||
| 531 | |||
| 532 | if (local_read(&drvdata->mode)) { | ||
| 533 | CS_UNLOCK(drvdata->base); | ||
| 534 | /* Process as per ARM's TRM recommendation */ | ||
| 535 | stmsper = readl_relaxed(drvdata->base + STMSPER); | ||
| 536 | writel_relaxed(0x0, drvdata->base + STMSPER); | ||
| 537 | writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR); | ||
| 538 | writel_relaxed(stmsper, drvdata->base + STMSPER); | ||
| 539 | CS_LOCK(drvdata->base); | ||
| 540 | } | ||
| 541 | spin_unlock(&drvdata->spinlock); | ||
| 542 | |||
| 543 | return size; | ||
| 544 | } | ||
| 545 | static DEVICE_ATTR_RW(port_select); | ||
| 546 | |||
| 547 | static ssize_t port_enable_show(struct device *dev, | ||
| 548 | struct device_attribute *attr, char *buf) | ||
| 549 | { | ||
| 550 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 551 | unsigned long val; | ||
| 552 | |||
| 553 | if (!local_read(&drvdata->mode)) { | ||
| 554 | val = drvdata->stmsper; | ||
| 555 | } else { | ||
| 556 | spin_lock(&drvdata->spinlock); | ||
| 557 | val = readl_relaxed(drvdata->base + STMSPER); | ||
| 558 | spin_unlock(&drvdata->spinlock); | ||
| 559 | } | ||
| 560 | |||
| 561 | return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); | ||
| 562 | } | ||
| 563 | |||
| 564 | static ssize_t port_enable_store(struct device *dev, | ||
| 565 | struct device_attribute *attr, | ||
| 566 | const char *buf, size_t size) | ||
| 567 | { | ||
| 568 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 569 | unsigned long val; | ||
| 570 | int ret = 0; | ||
| 571 | |||
| 572 | ret = kstrtoul(buf, 16, &val); | ||
| 573 | if (ret) | ||
| 574 | return ret; | ||
| 575 | |||
| 576 | spin_lock(&drvdata->spinlock); | ||
| 577 | drvdata->stmsper = val; | ||
| 578 | |||
| 579 | if (local_read(&drvdata->mode)) { | ||
| 580 | CS_UNLOCK(drvdata->base); | ||
| 581 | writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER); | ||
| 582 | CS_LOCK(drvdata->base); | ||
| 583 | } | ||
| 584 | spin_unlock(&drvdata->spinlock); | ||
| 585 | |||
| 586 | return size; | ||
| 587 | } | ||
| 588 | static DEVICE_ATTR_RW(port_enable); | ||
| 589 | |||
| 590 | static ssize_t traceid_show(struct device *dev, | ||
| 591 | struct device_attribute *attr, char *buf) | ||
| 592 | { | ||
| 593 | unsigned long val; | ||
| 594 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 595 | |||
| 596 | val = drvdata->traceid; | ||
| 597 | return sprintf(buf, "%#lx\n", val); | ||
| 598 | } | ||
| 599 | |||
| 600 | static ssize_t traceid_store(struct device *dev, | ||
| 601 | struct device_attribute *attr, | ||
| 602 | const char *buf, size_t size) | ||
| 603 | { | ||
| 604 | int ret; | ||
| 605 | unsigned long val; | ||
| 606 | struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 607 | |||
| 608 | ret = kstrtoul(buf, 16, &val); | ||
| 609 | if (ret) | ||
| 610 | return ret; | ||
| 611 | |||
| 612 | /* traceid field is 7bit wide on STM32 */ | ||
| 613 | drvdata->traceid = val & 0x7f; | ||
| 614 | return size; | ||
| 615 | } | ||
| 616 | static DEVICE_ATTR_RW(traceid); | ||
| 617 | |||
| 618 | #define coresight_stm_simple_func(name, offset) \ | ||
| 619 | coresight_simple_func(struct stm_drvdata, name, offset) | ||
| 620 | |||
| 621 | coresight_stm_simple_func(tcsr, STMTCSR); | ||
| 622 | coresight_stm_simple_func(tsfreqr, STMTSFREQR); | ||
| 623 | coresight_stm_simple_func(syncr, STMSYNCR); | ||
| 624 | coresight_stm_simple_func(sper, STMSPER); | ||
| 625 | coresight_stm_simple_func(spter, STMSPTER); | ||
| 626 | coresight_stm_simple_func(privmaskr, STMPRIVMASKR); | ||
| 627 | coresight_stm_simple_func(spscr, STMSPSCR); | ||
| 628 | coresight_stm_simple_func(spmscr, STMSPMSCR); | ||
| 629 | coresight_stm_simple_func(spfeat1r, STMSPFEAT1R); | ||
| 630 | coresight_stm_simple_func(spfeat2r, STMSPFEAT2R); | ||
| 631 | coresight_stm_simple_func(spfeat3r, STMSPFEAT3R); | ||
| 632 | coresight_stm_simple_func(devid, CORESIGHT_DEVID); | ||
| 633 | |||
| 634 | static struct attribute *coresight_stm_attrs[] = { | ||
| 635 | &dev_attr_hwevent_enable.attr, | ||
| 636 | &dev_attr_hwevent_select.attr, | ||
| 637 | &dev_attr_port_enable.attr, | ||
| 638 | &dev_attr_port_select.attr, | ||
| 639 | &dev_attr_traceid.attr, | ||
| 640 | NULL, | ||
| 641 | }; | ||
| 642 | |||
| 643 | static struct attribute *coresight_stm_mgmt_attrs[] = { | ||
| 644 | &dev_attr_tcsr.attr, | ||
| 645 | &dev_attr_tsfreqr.attr, | ||
| 646 | &dev_attr_syncr.attr, | ||
| 647 | &dev_attr_sper.attr, | ||
| 648 | &dev_attr_spter.attr, | ||
| 649 | &dev_attr_privmaskr.attr, | ||
| 650 | &dev_attr_spscr.attr, | ||
| 651 | &dev_attr_spmscr.attr, | ||
| 652 | &dev_attr_spfeat1r.attr, | ||
| 653 | &dev_attr_spfeat2r.attr, | ||
| 654 | &dev_attr_spfeat3r.attr, | ||
| 655 | &dev_attr_devid.attr, | ||
| 656 | NULL, | ||
| 657 | }; | ||
| 658 | |||
| 659 | static const struct attribute_group coresight_stm_group = { | ||
| 660 | .attrs = coresight_stm_attrs, | ||
| 661 | }; | ||
| 662 | |||
| 663 | static const struct attribute_group coresight_stm_mgmt_group = { | ||
| 664 | .attrs = coresight_stm_mgmt_attrs, | ||
| 665 | .name = "mgmt", | ||
| 666 | }; | ||
| 667 | |||
| 668 | static const struct attribute_group *coresight_stm_groups[] = { | ||
| 669 | &coresight_stm_group, | ||
| 670 | &coresight_stm_mgmt_group, | ||
| 671 | NULL, | ||
| 672 | }; | ||
| 673 | |||
| 674 | static int stm_get_resource_byname(struct device_node *np, | ||
| 675 | char *ch_base, struct resource *res) | ||
| 676 | { | ||
| 677 | const char *name = NULL; | ||
| 678 | int index = 0, found = 0; | ||
| 679 | |||
| 680 | while (!of_property_read_string_index(np, "reg-names", index, &name)) { | ||
| 681 | if (strcmp(ch_base, name)) { | ||
| 682 | index++; | ||
| 683 | continue; | ||
| 684 | } | ||
| 685 | |||
| 686 | /* We have a match and @index is where it's at */ | ||
| 687 | found = 1; | ||
| 688 | break; | ||
| 689 | } | ||
| 690 | |||
| 691 | if (!found) | ||
| 692 | return -EINVAL; | ||
| 693 | |||
| 694 | return of_address_to_resource(np, index, res); | ||
| 695 | } | ||
| 696 | |||
| 697 | static u32 stm_fundamental_data_size(struct stm_drvdata *drvdata) | ||
| 698 | { | ||
| 699 | u32 stmspfeat2r; | ||
| 700 | |||
| 701 | if (!IS_ENABLED(CONFIG_64BIT)) | ||
| 702 | return 4; | ||
| 703 | |||
| 704 | stmspfeat2r = readl_relaxed(drvdata->base + STMSPFEAT2R); | ||
| 705 | |||
| 706 | /* | ||
| 707 | * bit[15:12] represents the fundamental data size | ||
| 708 | * 0 - 32-bit data | ||
| 709 | * 1 - 64-bit data | ||
| 710 | */ | ||
| 711 | return BMVAL(stmspfeat2r, 12, 15) ? 8 : 4; | ||
| 712 | } | ||
| 713 | |||
| 714 | static u32 stm_num_stimulus_port(struct stm_drvdata *drvdata) | ||
| 715 | { | ||
| 716 | u32 numsp; | ||
| 717 | |||
| 718 | numsp = readl_relaxed(drvdata->base + CORESIGHT_DEVID); | ||
| 719 | /* | ||
| 720 | * NUMPS in STMDEVID is 17 bit long and if equal to 0x0, | ||
| 721 | * 32 stimulus ports are supported. | ||
| 722 | */ | ||
| 723 | numsp &= 0x1ffff; | ||
| 724 | if (!numsp) | ||
| 725 | numsp = STM_32_CHANNEL; | ||
| 726 | return numsp; | ||
| 727 | } | ||
| 728 | |||
| 729 | static void stm_init_default_data(struct stm_drvdata *drvdata) | ||
| 730 | { | ||
| 731 | /* Don't use port selection */ | ||
| 732 | drvdata->stmspscr = 0x0; | ||
| 733 | /* | ||
| 734 | * Enable all channel regardless of their number. When port | ||
| 735 | * selection isn't used (see above) STMSPER applies to all | ||
| 736 | * 32 channel group available, hence setting all 32 bits to 1 | ||
| 737 | */ | ||
| 738 | drvdata->stmsper = ~0x0; | ||
| 739 | |||
| 740 | /* | ||
| 741 | * The trace ID value for *ETM* tracers start at CPU_ID * 2 + 0x10 and | ||
| 742 | * anything equal to or higher than 0x70 is reserved. Since 0x00 is | ||
| 743 | * also reserved the STM trace ID needs to be higher than 0x00 and | ||
| 744 | * lowner than 0x10. | ||
| 745 | */ | ||
| 746 | drvdata->traceid = 0x1; | ||
| 747 | |||
| 748 | /* Set invariant transaction timing on all channels */ | ||
| 749 | bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp); | ||
| 750 | } | ||
| 751 | |||
| 752 | static void stm_init_generic_data(struct stm_drvdata *drvdata) | ||
| 753 | { | ||
| 754 | drvdata->stm.name = dev_name(drvdata->dev); | ||
| 755 | |||
| 756 | /* | ||
| 757 | * MasterIDs are assigned at HW design phase. As such the core is | ||
| 758 | * using a single master for interaction with this device. | ||
| 759 | */ | ||
| 760 | drvdata->stm.sw_start = 1; | ||
| 761 | drvdata->stm.sw_end = 1; | ||
| 762 | drvdata->stm.hw_override = true; | ||
| 763 | drvdata->stm.sw_nchannels = drvdata->numsp; | ||
| 764 | drvdata->stm.packet = stm_generic_packet; | ||
| 765 | drvdata->stm.link = stm_generic_link; | ||
| 766 | drvdata->stm.unlink = stm_generic_unlink; | ||
| 767 | drvdata->stm.set_options = stm_generic_set_options; | ||
| 768 | } | ||
| 769 | |||
| 770 | static int stm_probe(struct amba_device *adev, const struct amba_id *id) | ||
| 771 | { | ||
| 772 | int ret; | ||
| 773 | void __iomem *base; | ||
| 774 | unsigned long *guaranteed; | ||
| 775 | struct device *dev = &adev->dev; | ||
| 776 | struct coresight_platform_data *pdata = NULL; | ||
| 777 | struct stm_drvdata *drvdata; | ||
| 778 | struct resource *res = &adev->res; | ||
| 779 | struct resource ch_res; | ||
| 780 | size_t res_size, bitmap_size; | ||
| 781 | struct coresight_desc *desc; | ||
| 782 | struct device_node *np = adev->dev.of_node; | ||
| 783 | |||
| 784 | if (np) { | ||
| 785 | pdata = of_get_coresight_platform_data(dev, np); | ||
| 786 | if (IS_ERR(pdata)) | ||
| 787 | return PTR_ERR(pdata); | ||
| 788 | adev->dev.platform_data = pdata; | ||
| 789 | } | ||
| 790 | drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); | ||
| 791 | if (!drvdata) | ||
| 792 | return -ENOMEM; | ||
| 793 | |||
| 794 | drvdata->dev = &adev->dev; | ||
| 795 | drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ | ||
| 796 | if (!IS_ERR(drvdata->atclk)) { | ||
| 797 | ret = clk_prepare_enable(drvdata->atclk); | ||
| 798 | if (ret) | ||
| 799 | return ret; | ||
| 800 | } | ||
| 801 | dev_set_drvdata(dev, drvdata); | ||
| 802 | |||
| 803 | base = devm_ioremap_resource(dev, res); | ||
| 804 | if (IS_ERR(base)) | ||
| 805 | return PTR_ERR(base); | ||
| 806 | drvdata->base = base; | ||
| 807 | |||
| 808 | ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res); | ||
| 809 | if (ret) | ||
| 810 | return ret; | ||
| 811 | |||
| 812 | base = devm_ioremap_resource(dev, &ch_res); | ||
| 813 | if (IS_ERR(base)) | ||
| 814 | return PTR_ERR(base); | ||
| 815 | drvdata->chs.base = base; | ||
| 816 | |||
| 817 | drvdata->write_bytes = stm_fundamental_data_size(drvdata); | ||
| 818 | |||
| 819 | if (boot_nr_channel) { | ||
| 820 | drvdata->numsp = boot_nr_channel; | ||
| 821 | res_size = min((resource_size_t)(boot_nr_channel * | ||
| 822 | BYTES_PER_CHANNEL), resource_size(res)); | ||
| 823 | } else { | ||
| 824 | drvdata->numsp = stm_num_stimulus_port(drvdata); | ||
| 825 | res_size = min((resource_size_t)(drvdata->numsp * | ||
| 826 | BYTES_PER_CHANNEL), resource_size(res)); | ||
| 827 | } | ||
| 828 | bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long); | ||
| 829 | |||
| 830 | guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); | ||
| 831 | if (!guaranteed) | ||
| 832 | return -ENOMEM; | ||
| 833 | drvdata->chs.guaranteed = guaranteed; | ||
| 834 | |||
| 835 | spin_lock_init(&drvdata->spinlock); | ||
| 836 | |||
| 837 | stm_init_default_data(drvdata); | ||
| 838 | stm_init_generic_data(drvdata); | ||
| 839 | |||
| 840 | if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) { | ||
| 841 | dev_info(dev, | ||
| 842 | "stm_register_device failed, probing deffered\n"); | ||
| 843 | return -EPROBE_DEFER; | ||
| 844 | } | ||
| 845 | |||
| 846 | desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); | ||
| 847 | if (!desc) { | ||
| 848 | ret = -ENOMEM; | ||
| 849 | goto stm_unregister; | ||
| 850 | } | ||
| 851 | |||
| 852 | desc->type = CORESIGHT_DEV_TYPE_SOURCE; | ||
| 853 | desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE; | ||
| 854 | desc->ops = &stm_cs_ops; | ||
| 855 | desc->pdata = pdata; | ||
| 856 | desc->dev = dev; | ||
| 857 | desc->groups = coresight_stm_groups; | ||
| 858 | drvdata->csdev = coresight_register(desc); | ||
| 859 | if (IS_ERR(drvdata->csdev)) { | ||
| 860 | ret = PTR_ERR(drvdata->csdev); | ||
| 861 | goto stm_unregister; | ||
| 862 | } | ||
| 863 | |||
| 864 | pm_runtime_put(&adev->dev); | ||
| 865 | |||
| 866 | dev_info(dev, "%s initialized\n", (char *)id->data); | ||
| 867 | return 0; | ||
| 868 | |||
| 869 | stm_unregister: | ||
| 870 | stm_unregister_device(&drvdata->stm); | ||
| 871 | return ret; | ||
| 872 | } | ||
| 873 | |||
| 874 | #ifdef CONFIG_PM | ||
| 875 | static int stm_runtime_suspend(struct device *dev) | ||
| 876 | { | ||
| 877 | struct stm_drvdata *drvdata = dev_get_drvdata(dev); | ||
| 878 | |||
| 879 | if (drvdata && !IS_ERR(drvdata->atclk)) | ||
| 880 | clk_disable_unprepare(drvdata->atclk); | ||
| 881 | |||
| 882 | return 0; | ||
| 883 | } | ||
| 884 | |||
| 885 | static int stm_runtime_resume(struct device *dev) | ||
| 886 | { | ||
| 887 | struct stm_drvdata *drvdata = dev_get_drvdata(dev); | ||
| 888 | |||
| 889 | if (drvdata && !IS_ERR(drvdata->atclk)) | ||
| 890 | clk_prepare_enable(drvdata->atclk); | ||
| 891 | |||
| 892 | return 0; | ||
| 893 | } | ||
| 894 | #endif | ||
| 895 | |||
| 896 | static const struct dev_pm_ops stm_dev_pm_ops = { | ||
| 897 | SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL) | ||
| 898 | }; | ||
| 899 | |||
| 900 | static struct amba_id stm_ids[] = { | ||
| 901 | { | ||
| 902 | .id = 0x0003b962, | ||
| 903 | .mask = 0x0003ffff, | ||
| 904 | .data = "STM32", | ||
| 905 | }, | ||
| 906 | { 0, 0}, | ||
| 907 | }; | ||
| 908 | |||
| 909 | static struct amba_driver stm_driver = { | ||
| 910 | .drv = { | ||
| 911 | .name = "coresight-stm", | ||
| 912 | .owner = THIS_MODULE, | ||
| 913 | .pm = &stm_dev_pm_ops, | ||
| 914 | .suppress_bind_attrs = true, | ||
| 915 | }, | ||
| 916 | .probe = stm_probe, | ||
| 917 | .id_table = stm_ids, | ||
| 918 | }; | ||
| 919 | |||
| 920 | builtin_amba_driver(stm_driver); | ||
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c new file mode 100644 index 000000000000..466af86fd76f --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c | |||
| @@ -0,0 +1,604 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(C) 2016 Linaro Limited. All rights reserved. | ||
| 3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/circ_buf.h> | ||
| 19 | #include <linux/coresight.h> | ||
| 20 | #include <linux/perf_event.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | #include "coresight-priv.h" | ||
| 23 | #include "coresight-tmc.h" | ||
| 24 | |||
| 25 | void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) | ||
| 26 | { | ||
| 27 | CS_UNLOCK(drvdata->base); | ||
| 28 | |||
| 29 | /* Wait for TMCSReady bit to be set */ | ||
| 30 | tmc_wait_for_tmcready(drvdata); | ||
| 31 | |||
| 32 | writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); | ||
| 33 | writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | | ||
| 34 | TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | | ||
| 35 | TMC_FFCR_TRIGON_TRIGIN, | ||
| 36 | drvdata->base + TMC_FFCR); | ||
| 37 | |||
| 38 | writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); | ||
| 39 | tmc_enable_hw(drvdata); | ||
| 40 | |||
| 41 | CS_LOCK(drvdata->base); | ||
| 42 | } | ||
| 43 | |||
| 44 | static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) | ||
| 45 | { | ||
| 46 | char *bufp; | ||
| 47 | u32 read_data; | ||
| 48 | int i; | ||
| 49 | |||
| 50 | bufp = drvdata->buf; | ||
| 51 | while (1) { | ||
| 52 | for (i = 0; i < drvdata->memwidth; i++) { | ||
| 53 | read_data = readl_relaxed(drvdata->base + TMC_RRD); | ||
| 54 | if (read_data == 0xFFFFFFFF) | ||
| 55 | return; | ||
| 56 | memcpy(bufp, &read_data, 4); | ||
| 57 | bufp += 4; | ||
| 58 | } | ||
| 59 | } | ||
| 60 | } | ||
| 61 | |||
| 62 | static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) | ||
| 63 | { | ||
| 64 | CS_UNLOCK(drvdata->base); | ||
| 65 | |||
| 66 | tmc_flush_and_stop(drvdata); | ||
| 67 | /* | ||
| 68 | * When operating in sysFS mode the content of the buffer needs to be | ||
| 69 | * read before the TMC is disabled. | ||
| 70 | */ | ||
| 71 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) | ||
| 72 | tmc_etb_dump_hw(drvdata); | ||
| 73 | tmc_disable_hw(drvdata); | ||
| 74 | |||
| 75 | CS_LOCK(drvdata->base); | ||
| 76 | } | ||
| 77 | |||
| 78 | static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) | ||
| 79 | { | ||
| 80 | CS_UNLOCK(drvdata->base); | ||
| 81 | |||
| 82 | /* Wait for TMCSReady bit to be set */ | ||
| 83 | tmc_wait_for_tmcready(drvdata); | ||
| 84 | |||
| 85 | writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); | ||
| 86 | writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, | ||
| 87 | drvdata->base + TMC_FFCR); | ||
| 88 | writel_relaxed(0x0, drvdata->base + TMC_BUFWM); | ||
| 89 | tmc_enable_hw(drvdata); | ||
| 90 | |||
| 91 | CS_LOCK(drvdata->base); | ||
| 92 | } | ||
| 93 | |||
| 94 | static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) | ||
| 95 | { | ||
| 96 | CS_UNLOCK(drvdata->base); | ||
| 97 | |||
| 98 | tmc_flush_and_stop(drvdata); | ||
| 99 | tmc_disable_hw(drvdata); | ||
| 100 | |||
| 101 | CS_LOCK(drvdata->base); | ||
| 102 | } | ||
| 103 | |||
| 104 | static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode) | ||
| 105 | { | ||
| 106 | int ret = 0; | ||
| 107 | bool used = false; | ||
| 108 | char *buf = NULL; | ||
| 109 | long val; | ||
| 110 | unsigned long flags; | ||
| 111 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 112 | |||
| 113 | /* This shouldn't be happening */ | ||
| 114 | if (WARN_ON(mode != CS_MODE_SYSFS)) | ||
| 115 | return -EINVAL; | ||
| 116 | |||
| 117 | /* | ||
| 118 | * If we don't have a buffer release the lock and allocate memory. | ||
| 119 | * Otherwise keep the lock and move along. | ||
| 120 | */ | ||
| 121 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 122 | if (!drvdata->buf) { | ||
| 123 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 124 | |||
| 125 | /* Allocating the memory here while outside of the spinlock */ | ||
| 126 | buf = kzalloc(drvdata->size, GFP_KERNEL); | ||
| 127 | if (!buf) | ||
| 128 | return -ENOMEM; | ||
| 129 | |||
| 130 | /* Let's try again */ | ||
| 131 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 132 | } | ||
| 133 | |||
| 134 | if (drvdata->reading) { | ||
| 135 | ret = -EBUSY; | ||
| 136 | goto out; | ||
| 137 | } | ||
| 138 | |||
| 139 | val = local_xchg(&drvdata->mode, mode); | ||
| 140 | /* | ||
| 141 | * In sysFS mode we can have multiple writers per sink. Since this | ||
| 142 | * sink is already enabled no memory is needed and the HW need not be | ||
| 143 | * touched. | ||
| 144 | */ | ||
| 145 | if (val == CS_MODE_SYSFS) | ||
| 146 | goto out; | ||
| 147 | |||
| 148 | /* | ||
| 149 | * If drvdata::buf isn't NULL, memory was allocated for a previous | ||
| 150 | * trace run but wasn't read. If so simply zero-out the memory. | ||
| 151 | * Otherwise use the memory allocated above. | ||
| 152 | * | ||
| 153 | * The memory is freed when users read the buffer using the | ||
| 154 | * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for | ||
| 155 | * details. | ||
| 156 | */ | ||
| 157 | if (drvdata->buf) { | ||
| 158 | memset(drvdata->buf, 0, drvdata->size); | ||
| 159 | } else { | ||
| 160 | used = true; | ||
| 161 | drvdata->buf = buf; | ||
| 162 | } | ||
| 163 | |||
| 164 | tmc_etb_enable_hw(drvdata); | ||
| 165 | out: | ||
| 166 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 167 | |||
| 168 | /* Free memory outside the spinlock if need be */ | ||
| 169 | if (!used && buf) | ||
| 170 | kfree(buf); | ||
| 171 | |||
| 172 | if (!ret) | ||
| 173 | dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n"); | ||
| 174 | |||
| 175 | return ret; | ||
| 176 | } | ||
| 177 | |||
| 178 | static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode) | ||
| 179 | { | ||
| 180 | int ret = 0; | ||
| 181 | long val; | ||
| 182 | unsigned long flags; | ||
| 183 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 184 | |||
| 185 | /* This shouldn't be happening */ | ||
| 186 | if (WARN_ON(mode != CS_MODE_PERF)) | ||
| 187 | return -EINVAL; | ||
| 188 | |||
| 189 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 190 | if (drvdata->reading) { | ||
| 191 | ret = -EINVAL; | ||
| 192 | goto out; | ||
| 193 | } | ||
| 194 | |||
| 195 | val = local_xchg(&drvdata->mode, mode); | ||
| 196 | /* | ||
| 197 | * In Perf mode there can be only one writer per sink. There | ||
| 198 | * is also no need to continue if the ETB/ETR is already operated | ||
| 199 | * from sysFS. | ||
| 200 | */ | ||
| 201 | if (val != CS_MODE_DISABLED) { | ||
| 202 | ret = -EINVAL; | ||
| 203 | goto out; | ||
| 204 | } | ||
| 205 | |||
| 206 | tmc_etb_enable_hw(drvdata); | ||
| 207 | out: | ||
| 208 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 209 | |||
| 210 | return ret; | ||
| 211 | } | ||
| 212 | |||
| 213 | static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode) | ||
| 214 | { | ||
| 215 | switch (mode) { | ||
| 216 | case CS_MODE_SYSFS: | ||
| 217 | return tmc_enable_etf_sink_sysfs(csdev, mode); | ||
| 218 | case CS_MODE_PERF: | ||
| 219 | return tmc_enable_etf_sink_perf(csdev, mode); | ||
| 220 | } | ||
| 221 | |||
| 222 | /* We shouldn't be here */ | ||
| 223 | return -EINVAL; | ||
| 224 | } | ||
| 225 | |||
| 226 | static void tmc_disable_etf_sink(struct coresight_device *csdev) | ||
| 227 | { | ||
| 228 | long val; | ||
| 229 | unsigned long flags; | ||
| 230 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 231 | |||
| 232 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 233 | if (drvdata->reading) { | ||
| 234 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 235 | return; | ||
| 236 | } | ||
| 237 | |||
| 238 | val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); | ||
| 239 | /* Disable the TMC only if it needs to */ | ||
| 240 | if (val != CS_MODE_DISABLED) | ||
| 241 | tmc_etb_disable_hw(drvdata); | ||
| 242 | |||
| 243 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 244 | |||
| 245 | dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n"); | ||
| 246 | } | ||
| 247 | |||
| 248 | static int tmc_enable_etf_link(struct coresight_device *csdev, | ||
| 249 | int inport, int outport) | ||
| 250 | { | ||
| 251 | unsigned long flags; | ||
| 252 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 253 | |||
| 254 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 255 | if (drvdata->reading) { | ||
| 256 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 257 | return -EBUSY; | ||
| 258 | } | ||
| 259 | |||
| 260 | tmc_etf_enable_hw(drvdata); | ||
| 261 | local_set(&drvdata->mode, CS_MODE_SYSFS); | ||
| 262 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 263 | |||
| 264 | dev_info(drvdata->dev, "TMC-ETF enabled\n"); | ||
| 265 | return 0; | ||
| 266 | } | ||
| 267 | |||
| 268 | static void tmc_disable_etf_link(struct coresight_device *csdev, | ||
| 269 | int inport, int outport) | ||
| 270 | { | ||
| 271 | unsigned long flags; | ||
| 272 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 273 | |||
| 274 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 275 | if (drvdata->reading) { | ||
| 276 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 277 | return; | ||
| 278 | } | ||
| 279 | |||
| 280 | tmc_etf_disable_hw(drvdata); | ||
| 281 | local_set(&drvdata->mode, CS_MODE_DISABLED); | ||
| 282 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 283 | |||
| 284 | dev_info(drvdata->dev, "TMC disabled\n"); | ||
| 285 | } | ||
| 286 | |||
| 287 | static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, | ||
| 288 | void **pages, int nr_pages, bool overwrite) | ||
| 289 | { | ||
| 290 | int node; | ||
| 291 | struct cs_buffers *buf; | ||
| 292 | |||
| 293 | if (cpu == -1) | ||
| 294 | cpu = smp_processor_id(); | ||
| 295 | node = cpu_to_node(cpu); | ||
| 296 | |||
| 297 | /* Allocate memory structure for interaction with Perf */ | ||
| 298 | buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node); | ||
| 299 | if (!buf) | ||
| 300 | return NULL; | ||
| 301 | |||
| 302 | buf->snapshot = overwrite; | ||
| 303 | buf->nr_pages = nr_pages; | ||
| 304 | buf->data_pages = pages; | ||
| 305 | |||
| 306 | return buf; | ||
| 307 | } | ||
| 308 | |||
| 309 | static void tmc_free_etf_buffer(void *config) | ||
| 310 | { | ||
| 311 | struct cs_buffers *buf = config; | ||
| 312 | |||
| 313 | kfree(buf); | ||
| 314 | } | ||
| 315 | |||
| 316 | static int tmc_set_etf_buffer(struct coresight_device *csdev, | ||
| 317 | struct perf_output_handle *handle, | ||
| 318 | void *sink_config) | ||
| 319 | { | ||
| 320 | int ret = 0; | ||
| 321 | unsigned long head; | ||
| 322 | struct cs_buffers *buf = sink_config; | ||
| 323 | |||
| 324 | /* wrap head around to the amount of space we have */ | ||
| 325 | head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); | ||
| 326 | |||
| 327 | /* find the page to write to */ | ||
| 328 | buf->cur = head / PAGE_SIZE; | ||
| 329 | |||
| 330 | /* and offset within that page */ | ||
| 331 | buf->offset = head % PAGE_SIZE; | ||
| 332 | |||
| 333 | local_set(&buf->data_size, 0); | ||
| 334 | |||
| 335 | return ret; | ||
| 336 | } | ||
| 337 | |||
| 338 | static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev, | ||
| 339 | struct perf_output_handle *handle, | ||
| 340 | void *sink_config, bool *lost) | ||
| 341 | { | ||
| 342 | long size = 0; | ||
| 343 | struct cs_buffers *buf = sink_config; | ||
| 344 | |||
| 345 | if (buf) { | ||
| 346 | /* | ||
| 347 | * In snapshot mode ->data_size holds the new address of the | ||
| 348 | * ring buffer's head. The size itself is the whole address | ||
| 349 | * range since we want the latest information. | ||
| 350 | */ | ||
| 351 | if (buf->snapshot) | ||
| 352 | handle->head = local_xchg(&buf->data_size, | ||
| 353 | buf->nr_pages << PAGE_SHIFT); | ||
| 354 | /* | ||
| 355 | * Tell the tracer PMU how much we got in this run and if | ||
| 356 | * something went wrong along the way. Nobody else can use | ||
| 357 | * this cs_buffers instance until we are done. As such | ||
| 358 | * resetting parameters here and squaring off with the ring | ||
| 359 | * buffer API in the tracer PMU is fine. | ||
| 360 | */ | ||
| 361 | *lost = !!local_xchg(&buf->lost, 0); | ||
| 362 | size = local_xchg(&buf->data_size, 0); | ||
| 363 | } | ||
| 364 | |||
| 365 | return size; | ||
| 366 | } | ||
| 367 | |||
| 368 | static void tmc_update_etf_buffer(struct coresight_device *csdev, | ||
| 369 | struct perf_output_handle *handle, | ||
| 370 | void *sink_config) | ||
| 371 | { | ||
| 372 | int i, cur; | ||
| 373 | u32 *buf_ptr; | ||
| 374 | u32 read_ptr, write_ptr; | ||
| 375 | u32 status, to_read; | ||
| 376 | unsigned long offset; | ||
| 377 | struct cs_buffers *buf = sink_config; | ||
| 378 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 379 | |||
| 380 | if (!buf) | ||
| 381 | return; | ||
| 382 | |||
| 383 | /* This shouldn't happen */ | ||
| 384 | if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF)) | ||
| 385 | return; | ||
| 386 | |||
| 387 | CS_UNLOCK(drvdata->base); | ||
| 388 | |||
| 389 | tmc_flush_and_stop(drvdata); | ||
| 390 | |||
| 391 | read_ptr = readl_relaxed(drvdata->base + TMC_RRP); | ||
| 392 | write_ptr = readl_relaxed(drvdata->base + TMC_RWP); | ||
| 393 | |||
| 394 | /* | ||
| 395 | * Get a hold of the status register and see if a wrap around | ||
| 396 | * has occurred. If so adjust things accordingly. | ||
| 397 | */ | ||
| 398 | status = readl_relaxed(drvdata->base + TMC_STS); | ||
| 399 | if (status & TMC_STS_FULL) { | ||
| 400 | local_inc(&buf->lost); | ||
| 401 | to_read = drvdata->size; | ||
| 402 | } else { | ||
| 403 | to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size); | ||
| 404 | } | ||
| 405 | |||
| 406 | /* | ||
| 407 | * The TMC RAM buffer may be bigger than the space available in the | ||
| 408 | * perf ring buffer (handle->size). If so advance the RRP so that we | ||
| 409 | * get the latest trace data. | ||
| 410 | */ | ||
| 411 | if (to_read > handle->size) { | ||
| 412 | u32 mask = 0; | ||
| 413 | |||
| 414 | /* | ||
| 415 | * The value written to RRP must be byte-address aligned to | ||
| 416 | * the width of the trace memory databus _and_ to a frame | ||
| 417 | * boundary (16 byte), whichever is the biggest. For example, | ||
| 418 | * for 32-bit, 64-bit and 128-bit wide trace memory, the four | ||
| 419 | * LSBs must be 0s. For 256-bit wide trace memory, the five | ||
| 420 | * LSBs must be 0s. | ||
| 421 | */ | ||
| 422 | switch (drvdata->memwidth) { | ||
| 423 | case TMC_MEM_INTF_WIDTH_32BITS: | ||
| 424 | case TMC_MEM_INTF_WIDTH_64BITS: | ||
| 425 | case TMC_MEM_INTF_WIDTH_128BITS: | ||
| 426 | mask = GENMASK(31, 5); | ||
| 427 | break; | ||
| 428 | case TMC_MEM_INTF_WIDTH_256BITS: | ||
| 429 | mask = GENMASK(31, 6); | ||
| 430 | break; | ||
| 431 | } | ||
| 432 | |||
| 433 | /* | ||
| 434 | * Make sure the new size is aligned in accordance with the | ||
| 435 | * requirement explained above. | ||
| 436 | */ | ||
| 437 | to_read = handle->size & mask; | ||
| 438 | /* Move the RAM read pointer up */ | ||
| 439 | read_ptr = (write_ptr + drvdata->size) - to_read; | ||
| 440 | /* Make sure we are still within our limits */ | ||
| 441 | if (read_ptr > (drvdata->size - 1)) | ||
| 442 | read_ptr -= drvdata->size; | ||
| 443 | /* Tell the HW */ | ||
| 444 | writel_relaxed(read_ptr, drvdata->base + TMC_RRP); | ||
| 445 | local_inc(&buf->lost); | ||
| 446 | } | ||
| 447 | |||
| 448 | cur = buf->cur; | ||
| 449 | offset = buf->offset; | ||
| 450 | |||
| 451 | /* for every byte to read */ | ||
| 452 | for (i = 0; i < to_read; i += 4) { | ||
| 453 | buf_ptr = buf->data_pages[cur] + offset; | ||
| 454 | *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD); | ||
| 455 | |||
| 456 | offset += 4; | ||
| 457 | if (offset >= PAGE_SIZE) { | ||
| 458 | offset = 0; | ||
| 459 | cur++; | ||
| 460 | /* wrap around at the end of the buffer */ | ||
| 461 | cur &= buf->nr_pages - 1; | ||
| 462 | } | ||
| 463 | } | ||
| 464 | |||
| 465 | /* | ||
| 466 | * In snapshot mode all we have to do is communicate to | ||
| 467 | * perf_aux_output_end() the address of the current head. In full | ||
| 468 | * trace mode the same function expects a size to move rb->aux_head | ||
| 469 | * forward. | ||
| 470 | */ | ||
| 471 | if (buf->snapshot) | ||
| 472 | local_set(&buf->data_size, (cur * PAGE_SIZE) + offset); | ||
| 473 | else | ||
| 474 | local_add(to_read, &buf->data_size); | ||
| 475 | |||
| 476 | CS_LOCK(drvdata->base); | ||
| 477 | } | ||
| 478 | |||
| 479 | static const struct coresight_ops_sink tmc_etf_sink_ops = { | ||
| 480 | .enable = tmc_enable_etf_sink, | ||
| 481 | .disable = tmc_disable_etf_sink, | ||
| 482 | .alloc_buffer = tmc_alloc_etf_buffer, | ||
| 483 | .free_buffer = tmc_free_etf_buffer, | ||
| 484 | .set_buffer = tmc_set_etf_buffer, | ||
| 485 | .reset_buffer = tmc_reset_etf_buffer, | ||
| 486 | .update_buffer = tmc_update_etf_buffer, | ||
| 487 | }; | ||
| 488 | |||
| 489 | static const struct coresight_ops_link tmc_etf_link_ops = { | ||
| 490 | .enable = tmc_enable_etf_link, | ||
| 491 | .disable = tmc_disable_etf_link, | ||
| 492 | }; | ||
| 493 | |||
| 494 | const struct coresight_ops tmc_etb_cs_ops = { | ||
| 495 | .sink_ops = &tmc_etf_sink_ops, | ||
| 496 | }; | ||
| 497 | |||
| 498 | const struct coresight_ops tmc_etf_cs_ops = { | ||
| 499 | .sink_ops = &tmc_etf_sink_ops, | ||
| 500 | .link_ops = &tmc_etf_link_ops, | ||
| 501 | }; | ||
| 502 | |||
| 503 | int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) | ||
| 504 | { | ||
| 505 | long val; | ||
| 506 | enum tmc_mode mode; | ||
| 507 | int ret = 0; | ||
| 508 | unsigned long flags; | ||
| 509 | |||
| 510 | /* config types are set a boot time and never change */ | ||
| 511 | if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && | ||
| 512 | drvdata->config_type != TMC_CONFIG_TYPE_ETF)) | ||
| 513 | return -EINVAL; | ||
| 514 | |||
| 515 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 516 | |||
| 517 | if (drvdata->reading) { | ||
| 518 | ret = -EBUSY; | ||
| 519 | goto out; | ||
| 520 | } | ||
| 521 | |||
| 522 | /* There is no point in reading a TMC in HW FIFO mode */ | ||
| 523 | mode = readl_relaxed(drvdata->base + TMC_MODE); | ||
| 524 | if (mode != TMC_MODE_CIRCULAR_BUFFER) { | ||
| 525 | ret = -EINVAL; | ||
| 526 | goto out; | ||
| 527 | } | ||
| 528 | |||
| 529 | val = local_read(&drvdata->mode); | ||
| 530 | /* Don't interfere if operated from Perf */ | ||
| 531 | if (val == CS_MODE_PERF) { | ||
| 532 | ret = -EINVAL; | ||
| 533 | goto out; | ||
| 534 | } | ||
| 535 | |||
| 536 | /* If drvdata::buf is NULL the trace data has been read already */ | ||
| 537 | if (drvdata->buf == NULL) { | ||
| 538 | ret = -EINVAL; | ||
| 539 | goto out; | ||
| 540 | } | ||
| 541 | |||
| 542 | /* Disable the TMC if need be */ | ||
| 543 | if (val == CS_MODE_SYSFS) | ||
| 544 | tmc_etb_disable_hw(drvdata); | ||
| 545 | |||
| 546 | drvdata->reading = true; | ||
| 547 | out: | ||
| 548 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 549 | |||
| 550 | return ret; | ||
| 551 | } | ||
| 552 | |||
| 553 | int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) | ||
| 554 | { | ||
| 555 | char *buf = NULL; | ||
| 556 | enum tmc_mode mode; | ||
| 557 | unsigned long flags; | ||
| 558 | |||
| 559 | /* config types are set a boot time and never change */ | ||
| 560 | if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB && | ||
| 561 | drvdata->config_type != TMC_CONFIG_TYPE_ETF)) | ||
| 562 | return -EINVAL; | ||
| 563 | |||
| 564 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 565 | |||
| 566 | /* There is no point in reading a TMC in HW FIFO mode */ | ||
| 567 | mode = readl_relaxed(drvdata->base + TMC_MODE); | ||
| 568 | if (mode != TMC_MODE_CIRCULAR_BUFFER) { | ||
| 569 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 570 | return -EINVAL; | ||
| 571 | } | ||
| 572 | |||
| 573 | /* Re-enable the TMC if need be */ | ||
| 574 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { | ||
| 575 | /* | ||
| 576 | * The trace run will continue with the same allocated trace | ||
| 577 | * buffer. As such zero-out the buffer so that we don't end | ||
| 578 | * up with stale data. | ||
| 579 | * | ||
| 580 | * Since the tracer is still enabled drvdata::buf | ||
| 581 | * can't be NULL. | ||
| 582 | */ | ||
| 583 | memset(drvdata->buf, 0, drvdata->size); | ||
| 584 | tmc_etb_enable_hw(drvdata); | ||
| 585 | } else { | ||
| 586 | /* | ||
| 587 | * The ETB/ETF is not tracing and the buffer was just read. | ||
| 588 | * As such prepare to free the trace buffer. | ||
| 589 | */ | ||
| 590 | buf = drvdata->buf; | ||
| 591 | drvdata->buf = NULL; | ||
| 592 | } | ||
| 593 | |||
| 594 | drvdata->reading = false; | ||
| 595 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 596 | |||
| 597 | /* | ||
| 598 | * Free allocated memory outside of the spinlock. There is no need | ||
| 599 | * to assert the validity of 'buf' since calling kfree(NULL) is safe. | ||
| 600 | */ | ||
| 601 | kfree(buf); | ||
| 602 | |||
| 603 | return 0; | ||
| 604 | } | ||
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c new file mode 100644 index 000000000000..847d1b5f2c13 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c | |||
| @@ -0,0 +1,329 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(C) 2016 Linaro Limited. All rights reserved. | ||
| 3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/coresight.h> | ||
| 19 | #include <linux/dma-mapping.h> | ||
| 20 | #include "coresight-priv.h" | ||
| 21 | #include "coresight-tmc.h" | ||
| 22 | |||
| 23 | void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) | ||
| 24 | { | ||
| 25 | u32 axictl; | ||
| 26 | |||
| 27 | /* Zero out the memory to help with debug */ | ||
| 28 | memset(drvdata->vaddr, 0, drvdata->size); | ||
| 29 | |||
| 30 | CS_UNLOCK(drvdata->base); | ||
| 31 | |||
| 32 | /* Wait for TMCSReady bit to be set */ | ||
| 33 | tmc_wait_for_tmcready(drvdata); | ||
| 34 | |||
| 35 | writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ); | ||
| 36 | writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); | ||
| 37 | |||
| 38 | axictl = readl_relaxed(drvdata->base + TMC_AXICTL); | ||
| 39 | axictl |= TMC_AXICTL_WR_BURST_16; | ||
| 40 | writel_relaxed(axictl, drvdata->base + TMC_AXICTL); | ||
| 41 | axictl &= ~TMC_AXICTL_SCT_GAT_MODE; | ||
| 42 | writel_relaxed(axictl, drvdata->base + TMC_AXICTL); | ||
| 43 | axictl = (axictl & | ||
| 44 | ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) | | ||
| 45 | TMC_AXICTL_PROT_CTL_B1; | ||
| 46 | writel_relaxed(axictl, drvdata->base + TMC_AXICTL); | ||
| 47 | |||
| 48 | writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO); | ||
| 49 | writel_relaxed(0x0, drvdata->base + TMC_DBAHI); | ||
| 50 | writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | | ||
| 51 | TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | | ||
| 52 | TMC_FFCR_TRIGON_TRIGIN, | ||
| 53 | drvdata->base + TMC_FFCR); | ||
| 54 | writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); | ||
| 55 | tmc_enable_hw(drvdata); | ||
| 56 | |||
| 57 | CS_LOCK(drvdata->base); | ||
| 58 | } | ||
| 59 | |||
| 60 | static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata) | ||
| 61 | { | ||
| 62 | u32 rwp, val; | ||
| 63 | |||
| 64 | rwp = readl_relaxed(drvdata->base + TMC_RWP); | ||
| 65 | val = readl_relaxed(drvdata->base + TMC_STS); | ||
| 66 | |||
| 67 | /* How much memory do we still have */ | ||
| 68 | if (val & BIT(0)) | ||
| 69 | drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr; | ||
| 70 | else | ||
| 71 | drvdata->buf = drvdata->vaddr; | ||
| 72 | } | ||
| 73 | |||
| 74 | static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) | ||
| 75 | { | ||
| 76 | CS_UNLOCK(drvdata->base); | ||
| 77 | |||
| 78 | tmc_flush_and_stop(drvdata); | ||
| 79 | /* | ||
| 80 | * When operating in sysFS mode the content of the buffer needs to be | ||
| 81 | * read before the TMC is disabled. | ||
| 82 | */ | ||
| 83 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) | ||
| 84 | tmc_etr_dump_hw(drvdata); | ||
| 85 | tmc_disable_hw(drvdata); | ||
| 86 | |||
| 87 | CS_LOCK(drvdata->base); | ||
| 88 | } | ||
| 89 | |||
| 90 | static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode) | ||
| 91 | { | ||
| 92 | int ret = 0; | ||
| 93 | bool used = false; | ||
| 94 | long val; | ||
| 95 | unsigned long flags; | ||
| 96 | void __iomem *vaddr = NULL; | ||
| 97 | dma_addr_t paddr; | ||
| 98 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 99 | |||
| 100 | /* This shouldn't be happening */ | ||
| 101 | if (WARN_ON(mode != CS_MODE_SYSFS)) | ||
| 102 | return -EINVAL; | ||
| 103 | |||
| 104 | /* | ||
| 105 | * If we don't have a buffer release the lock and allocate memory. | ||
| 106 | * Otherwise keep the lock and move along. | ||
| 107 | */ | ||
| 108 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 109 | if (!drvdata->vaddr) { | ||
| 110 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Contiguous memory can't be allocated while a spinlock is | ||
| 114 | * held. As such allocate memory here and free it if a buffer | ||
| 115 | * has already been allocated (from a previous session). | ||
| 116 | */ | ||
| 117 | vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size, | ||
| 118 | &paddr, GFP_KERNEL); | ||
| 119 | if (!vaddr) | ||
| 120 | return -ENOMEM; | ||
| 121 | |||
| 122 | /* Let's try again */ | ||
| 123 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 124 | } | ||
| 125 | |||
| 126 | if (drvdata->reading) { | ||
| 127 | ret = -EBUSY; | ||
| 128 | goto out; | ||
| 129 | } | ||
| 130 | |||
| 131 | val = local_xchg(&drvdata->mode, mode); | ||
| 132 | /* | ||
| 133 | * In sysFS mode we can have multiple writers per sink. Since this | ||
| 134 | * sink is already enabled no memory is needed and the HW need not be | ||
| 135 | * touched. | ||
| 136 | */ | ||
| 137 | if (val == CS_MODE_SYSFS) | ||
| 138 | goto out; | ||
| 139 | |||
| 140 | /* | ||
| 141 | * If drvdata::buf == NULL, use the memory allocated above. | ||
| 142 | * Otherwise a buffer still exists from a previous session, so | ||
| 143 | * simply use that. | ||
| 144 | */ | ||
| 145 | if (drvdata->buf == NULL) { | ||
| 146 | used = true; | ||
| 147 | drvdata->vaddr = vaddr; | ||
| 148 | drvdata->paddr = paddr; | ||
| 149 | drvdata->buf = drvdata->vaddr; | ||
| 150 | } | ||
| 151 | |||
| 152 | memset(drvdata->vaddr, 0, drvdata->size); | ||
| 153 | |||
| 154 | tmc_etr_enable_hw(drvdata); | ||
| 155 | out: | ||
| 156 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 157 | |||
| 158 | /* Free memory outside the spinlock if need be */ | ||
| 159 | if (!used && vaddr) | ||
| 160 | dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr); | ||
| 161 | |||
| 162 | if (!ret) | ||
| 163 | dev_info(drvdata->dev, "TMC-ETR enabled\n"); | ||
| 164 | |||
| 165 | return ret; | ||
| 166 | } | ||
| 167 | |||
| 168 | static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode) | ||
| 169 | { | ||
| 170 | int ret = 0; | ||
| 171 | long val; | ||
| 172 | unsigned long flags; | ||
| 173 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 174 | |||
| 175 | /* This shouldn't be happening */ | ||
| 176 | if (WARN_ON(mode != CS_MODE_PERF)) | ||
| 177 | return -EINVAL; | ||
| 178 | |||
| 179 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 180 | if (drvdata->reading) { | ||
| 181 | ret = -EINVAL; | ||
| 182 | goto out; | ||
| 183 | } | ||
| 184 | |||
| 185 | val = local_xchg(&drvdata->mode, mode); | ||
| 186 | /* | ||
| 187 | * In Perf mode there can be only one writer per sink. There | ||
| 188 | * is also no need to continue if the ETR is already operated | ||
| 189 | * from sysFS. | ||
| 190 | */ | ||
| 191 | if (val != CS_MODE_DISABLED) { | ||
| 192 | ret = -EINVAL; | ||
| 193 | goto out; | ||
| 194 | } | ||
| 195 | |||
| 196 | tmc_etr_enable_hw(drvdata); | ||
| 197 | out: | ||
| 198 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 199 | |||
| 200 | return ret; | ||
| 201 | } | ||
| 202 | |||
| 203 | static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode) | ||
| 204 | { | ||
| 205 | switch (mode) { | ||
| 206 | case CS_MODE_SYSFS: | ||
| 207 | return tmc_enable_etr_sink_sysfs(csdev, mode); | ||
| 208 | case CS_MODE_PERF: | ||
| 209 | return tmc_enable_etr_sink_perf(csdev, mode); | ||
| 210 | } | ||
| 211 | |||
| 212 | /* We shouldn't be here */ | ||
| 213 | return -EINVAL; | ||
| 214 | } | ||
| 215 | |||
| 216 | static void tmc_disable_etr_sink(struct coresight_device *csdev) | ||
| 217 | { | ||
| 218 | long val; | ||
| 219 | unsigned long flags; | ||
| 220 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 221 | |||
| 222 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 223 | if (drvdata->reading) { | ||
| 224 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 225 | return; | ||
| 226 | } | ||
| 227 | |||
| 228 | val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); | ||
| 229 | /* Disable the TMC only if it needs to */ | ||
| 230 | if (val != CS_MODE_DISABLED) | ||
| 231 | tmc_etr_disable_hw(drvdata); | ||
| 232 | |||
| 233 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 234 | |||
| 235 | dev_info(drvdata->dev, "TMC-ETR disabled\n"); | ||
| 236 | } | ||
| 237 | |||
| 238 | static const struct coresight_ops_sink tmc_etr_sink_ops = { | ||
| 239 | .enable = tmc_enable_etr_sink, | ||
| 240 | .disable = tmc_disable_etr_sink, | ||
| 241 | }; | ||
| 242 | |||
| 243 | const struct coresight_ops tmc_etr_cs_ops = { | ||
| 244 | .sink_ops = &tmc_etr_sink_ops, | ||
| 245 | }; | ||
| 246 | |||
| 247 | int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) | ||
| 248 | { | ||
| 249 | int ret = 0; | ||
| 250 | long val; | ||
| 251 | unsigned long flags; | ||
| 252 | |||
| 253 | /* config types are set a boot time and never change */ | ||
| 254 | if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) | ||
| 255 | return -EINVAL; | ||
| 256 | |||
| 257 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 258 | if (drvdata->reading) { | ||
| 259 | ret = -EBUSY; | ||
| 260 | goto out; | ||
| 261 | } | ||
| 262 | |||
| 263 | val = local_read(&drvdata->mode); | ||
| 264 | /* Don't interfere if operated from Perf */ | ||
| 265 | if (val == CS_MODE_PERF) { | ||
| 266 | ret = -EINVAL; | ||
| 267 | goto out; | ||
| 268 | } | ||
| 269 | |||
| 270 | /* If drvdata::buf is NULL the trace data has been read already */ | ||
| 271 | if (drvdata->buf == NULL) { | ||
| 272 | ret = -EINVAL; | ||
| 273 | goto out; | ||
| 274 | } | ||
| 275 | |||
| 276 | /* Disable the TMC if need be */ | ||
| 277 | if (val == CS_MODE_SYSFS) | ||
| 278 | tmc_etr_disable_hw(drvdata); | ||
| 279 | |||
| 280 | drvdata->reading = true; | ||
| 281 | out: | ||
| 282 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 283 | |||
| 284 | return ret; | ||
| 285 | } | ||
| 286 | |||
| 287 | int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) | ||
| 288 | { | ||
| 289 | unsigned long flags; | ||
| 290 | dma_addr_t paddr; | ||
| 291 | void __iomem *vaddr = NULL; | ||
| 292 | |||
| 293 | /* config types are set a boot time and never change */ | ||
| 294 | if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) | ||
| 295 | return -EINVAL; | ||
| 296 | |||
| 297 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 298 | |||
| 299 | /* RE-enable the TMC if need be */ | ||
| 300 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { | ||
| 301 | /* | ||
| 302 | * The trace run will continue with the same allocated trace | ||
| 303 | * buffer. As such zero-out the buffer so that we don't end | ||
| 304 | * up with stale data. | ||
| 305 | * | ||
| 306 | * Since the tracer is still enabled drvdata::buf | ||
| 307 | * can't be NULL. | ||
| 308 | */ | ||
| 309 | memset(drvdata->buf, 0, drvdata->size); | ||
| 310 | tmc_etr_enable_hw(drvdata); | ||
| 311 | } else { | ||
| 312 | /* | ||
| 313 | * The ETR is not tracing and the buffer was just read. | ||
| 314 | * As such prepare to free the trace buffer. | ||
| 315 | */ | ||
| 316 | vaddr = drvdata->vaddr; | ||
| 317 | paddr = drvdata->paddr; | ||
| 318 | drvdata->buf = NULL; | ||
| 319 | } | ||
| 320 | |||
| 321 | drvdata->reading = false; | ||
| 322 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 323 | |||
| 324 | /* Free allocated memory out side of the spinlock */ | ||
| 325 | if (vaddr) | ||
| 326 | dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr); | ||
| 327 | |||
| 328 | return 0; | ||
| 329 | } | ||
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 1be191f5d39c..9e02ac963cd0 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c | |||
| @@ -30,127 +30,27 @@ | |||
| 30 | #include <linux/amba/bus.h> | 30 | #include <linux/amba/bus.h> |
| 31 | 31 | ||
| 32 | #include "coresight-priv.h" | 32 | #include "coresight-priv.h" |
| 33 | #include "coresight-tmc.h" | ||
| 33 | 34 | ||
| 34 | #define TMC_RSZ 0x004 | 35 | void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata) |
| 35 | #define TMC_STS 0x00c | ||
| 36 | #define TMC_RRD 0x010 | ||
| 37 | #define TMC_RRP 0x014 | ||
| 38 | #define TMC_RWP 0x018 | ||
| 39 | #define TMC_TRG 0x01c | ||
| 40 | #define TMC_CTL 0x020 | ||
| 41 | #define TMC_RWD 0x024 | ||
| 42 | #define TMC_MODE 0x028 | ||
| 43 | #define TMC_LBUFLEVEL 0x02c | ||
| 44 | #define TMC_CBUFLEVEL 0x030 | ||
| 45 | #define TMC_BUFWM 0x034 | ||
| 46 | #define TMC_RRPHI 0x038 | ||
| 47 | #define TMC_RWPHI 0x03c | ||
| 48 | #define TMC_AXICTL 0x110 | ||
| 49 | #define TMC_DBALO 0x118 | ||
| 50 | #define TMC_DBAHI 0x11c | ||
| 51 | #define TMC_FFSR 0x300 | ||
| 52 | #define TMC_FFCR 0x304 | ||
| 53 | #define TMC_PSCR 0x308 | ||
| 54 | #define TMC_ITMISCOP0 0xee0 | ||
| 55 | #define TMC_ITTRFLIN 0xee8 | ||
| 56 | #define TMC_ITATBDATA0 0xeec | ||
| 57 | #define TMC_ITATBCTR2 0xef0 | ||
| 58 | #define TMC_ITATBCTR1 0xef4 | ||
| 59 | #define TMC_ITATBCTR0 0xef8 | ||
| 60 | |||
| 61 | /* register description */ | ||
| 62 | /* TMC_CTL - 0x020 */ | ||
| 63 | #define TMC_CTL_CAPT_EN BIT(0) | ||
| 64 | /* TMC_STS - 0x00C */ | ||
| 65 | #define TMC_STS_TRIGGERED BIT(1) | ||
| 66 | /* TMC_AXICTL - 0x110 */ | ||
| 67 | #define TMC_AXICTL_PROT_CTL_B0 BIT(0) | ||
| 68 | #define TMC_AXICTL_PROT_CTL_B1 BIT(1) | ||
| 69 | #define TMC_AXICTL_SCT_GAT_MODE BIT(7) | ||
| 70 | #define TMC_AXICTL_WR_BURST_LEN 0xF00 | ||
| 71 | /* TMC_FFCR - 0x304 */ | ||
| 72 | #define TMC_FFCR_EN_FMT BIT(0) | ||
| 73 | #define TMC_FFCR_EN_TI BIT(1) | ||
| 74 | #define TMC_FFCR_FON_FLIN BIT(4) | ||
| 75 | #define TMC_FFCR_FON_TRIG_EVT BIT(5) | ||
| 76 | #define TMC_FFCR_FLUSHMAN BIT(6) | ||
| 77 | #define TMC_FFCR_TRIGON_TRIGIN BIT(8) | ||
| 78 | #define TMC_FFCR_STOP_ON_FLUSH BIT(12) | ||
| 79 | |||
| 80 | #define TMC_STS_TRIGGERED_BIT 2 | ||
| 81 | #define TMC_FFCR_FLUSHMAN_BIT 6 | ||
| 82 | |||
| 83 | enum tmc_config_type { | ||
| 84 | TMC_CONFIG_TYPE_ETB, | ||
| 85 | TMC_CONFIG_TYPE_ETR, | ||
| 86 | TMC_CONFIG_TYPE_ETF, | ||
| 87 | }; | ||
| 88 | |||
| 89 | enum tmc_mode { | ||
| 90 | TMC_MODE_CIRCULAR_BUFFER, | ||
| 91 | TMC_MODE_SOFTWARE_FIFO, | ||
| 92 | TMC_MODE_HARDWARE_FIFO, | ||
| 93 | }; | ||
| 94 | |||
| 95 | enum tmc_mem_intf_width { | ||
| 96 | TMC_MEM_INTF_WIDTH_32BITS = 0x2, | ||
| 97 | TMC_MEM_INTF_WIDTH_64BITS = 0x3, | ||
| 98 | TMC_MEM_INTF_WIDTH_128BITS = 0x4, | ||
| 99 | TMC_MEM_INTF_WIDTH_256BITS = 0x5, | ||
| 100 | }; | ||
| 101 | |||
| 102 | /** | ||
| 103 | * struct tmc_drvdata - specifics associated to an TMC component | ||
| 104 | * @base: memory mapped base address for this component. | ||
| 105 | * @dev: the device entity associated to this component. | ||
| 106 | * @csdev: component vitals needed by the framework. | ||
| 107 | * @miscdev: specifics to handle "/dev/xyz.tmc" entry. | ||
| 108 | * @spinlock: only one at a time pls. | ||
| 109 | * @read_count: manages preparation of buffer for reading. | ||
| 110 | * @buf: area of memory where trace data get sent. | ||
| 111 | * @paddr: DMA start location in RAM. | ||
| 112 | * @vaddr: virtual representation of @paddr. | ||
| 113 | * @size: @buf size. | ||
| 114 | * @enable: this TMC is being used. | ||
| 115 | * @config_type: TMC variant, must be of type @tmc_config_type. | ||
| 116 | * @trigger_cntr: amount of words to store after a trigger. | ||
| 117 | */ | ||
| 118 | struct tmc_drvdata { | ||
| 119 | void __iomem *base; | ||
| 120 | struct device *dev; | ||
| 121 | struct coresight_device *csdev; | ||
| 122 | struct miscdevice miscdev; | ||
| 123 | spinlock_t spinlock; | ||
| 124 | int read_count; | ||
| 125 | bool reading; | ||
| 126 | char *buf; | ||
| 127 | dma_addr_t paddr; | ||
| 128 | void *vaddr; | ||
| 129 | u32 size; | ||
| 130 | bool enable; | ||
| 131 | enum tmc_config_type config_type; | ||
| 132 | u32 trigger_cntr; | ||
| 133 | }; | ||
| 134 | |||
| 135 | static void tmc_wait_for_ready(struct tmc_drvdata *drvdata) | ||
| 136 | { | 36 | { |
| 137 | /* Ensure formatter, unformatter and hardware fifo are empty */ | 37 | /* Ensure formatter, unformatter and hardware fifo are empty */ |
| 138 | if (coresight_timeout(drvdata->base, | 38 | if (coresight_timeout(drvdata->base, |
| 139 | TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) { | 39 | TMC_STS, TMC_STS_TMCREADY_BIT, 1)) { |
| 140 | dev_err(drvdata->dev, | 40 | dev_err(drvdata->dev, |
| 141 | "timeout observed when probing at offset %#x\n", | 41 | "timeout observed when probing at offset %#x\n", |
| 142 | TMC_STS); | 42 | TMC_STS); |
| 143 | } | 43 | } |
| 144 | } | 44 | } |
| 145 | 45 | ||
| 146 | static void tmc_flush_and_stop(struct tmc_drvdata *drvdata) | 46 | void tmc_flush_and_stop(struct tmc_drvdata *drvdata) |
| 147 | { | 47 | { |
| 148 | u32 ffcr; | 48 | u32 ffcr; |
| 149 | 49 | ||
| 150 | ffcr = readl_relaxed(drvdata->base + TMC_FFCR); | 50 | ffcr = readl_relaxed(drvdata->base + TMC_FFCR); |
| 151 | ffcr |= TMC_FFCR_STOP_ON_FLUSH; | 51 | ffcr |= TMC_FFCR_STOP_ON_FLUSH; |
| 152 | writel_relaxed(ffcr, drvdata->base + TMC_FFCR); | 52 | writel_relaxed(ffcr, drvdata->base + TMC_FFCR); |
| 153 | ffcr |= TMC_FFCR_FLUSHMAN; | 53 | ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT); |
| 154 | writel_relaxed(ffcr, drvdata->base + TMC_FFCR); | 54 | writel_relaxed(ffcr, drvdata->base + TMC_FFCR); |
| 155 | /* Ensure flush completes */ | 55 | /* Ensure flush completes */ |
| 156 | if (coresight_timeout(drvdata->base, | 56 | if (coresight_timeout(drvdata->base, |
| @@ -160,338 +60,73 @@ static void tmc_flush_and_stop(struct tmc_drvdata *drvdata) | |||
| 160 | TMC_FFCR); | 60 | TMC_FFCR); |
| 161 | } | 61 | } |
| 162 | 62 | ||
| 163 | tmc_wait_for_ready(drvdata); | 63 | tmc_wait_for_tmcready(drvdata); |
| 164 | } | 64 | } |
| 165 | 65 | ||
| 166 | static void tmc_enable_hw(struct tmc_drvdata *drvdata) | 66 | void tmc_enable_hw(struct tmc_drvdata *drvdata) |
| 167 | { | 67 | { |
| 168 | writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL); | 68 | writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL); |
| 169 | } | 69 | } |
| 170 | 70 | ||
| 171 | static void tmc_disable_hw(struct tmc_drvdata *drvdata) | 71 | void tmc_disable_hw(struct tmc_drvdata *drvdata) |
| 172 | { | 72 | { |
| 173 | writel_relaxed(0x0, drvdata->base + TMC_CTL); | 73 | writel_relaxed(0x0, drvdata->base + TMC_CTL); |
| 174 | } | 74 | } |
| 175 | 75 | ||
| 176 | static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) | 76 | static int tmc_read_prepare(struct tmc_drvdata *drvdata) |
| 177 | { | ||
| 178 | /* Zero out the memory to help with debug */ | ||
| 179 | memset(drvdata->buf, 0, drvdata->size); | ||
| 180 | |||
| 181 | CS_UNLOCK(drvdata->base); | ||
| 182 | |||
| 183 | writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); | ||
| 184 | writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | | ||
| 185 | TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | | ||
| 186 | TMC_FFCR_TRIGON_TRIGIN, | ||
| 187 | drvdata->base + TMC_FFCR); | ||
| 188 | |||
| 189 | writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); | ||
| 190 | tmc_enable_hw(drvdata); | ||
| 191 | |||
| 192 | CS_LOCK(drvdata->base); | ||
| 193 | } | ||
| 194 | |||
| 195 | static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata) | ||
| 196 | { | ||
| 197 | u32 axictl; | ||
| 198 | |||
| 199 | /* Zero out the memory to help with debug */ | ||
| 200 | memset(drvdata->vaddr, 0, drvdata->size); | ||
| 201 | |||
| 202 | CS_UNLOCK(drvdata->base); | ||
| 203 | |||
| 204 | writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ); | ||
| 205 | writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); | ||
| 206 | |||
| 207 | axictl = readl_relaxed(drvdata->base + TMC_AXICTL); | ||
| 208 | axictl |= TMC_AXICTL_WR_BURST_LEN; | ||
| 209 | writel_relaxed(axictl, drvdata->base + TMC_AXICTL); | ||
| 210 | axictl &= ~TMC_AXICTL_SCT_GAT_MODE; | ||
| 211 | writel_relaxed(axictl, drvdata->base + TMC_AXICTL); | ||
| 212 | axictl = (axictl & | ||
| 213 | ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) | | ||
| 214 | TMC_AXICTL_PROT_CTL_B1; | ||
| 215 | writel_relaxed(axictl, drvdata->base + TMC_AXICTL); | ||
| 216 | |||
| 217 | writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO); | ||
| 218 | writel_relaxed(0x0, drvdata->base + TMC_DBAHI); | ||
| 219 | writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | | ||
| 220 | TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | | ||
| 221 | TMC_FFCR_TRIGON_TRIGIN, | ||
| 222 | drvdata->base + TMC_FFCR); | ||
| 223 | writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); | ||
| 224 | tmc_enable_hw(drvdata); | ||
| 225 | |||
| 226 | CS_LOCK(drvdata->base); | ||
| 227 | } | ||
| 228 | |||
| 229 | static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata) | ||
| 230 | { | ||
| 231 | CS_UNLOCK(drvdata->base); | ||
| 232 | |||
| 233 | writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE); | ||
| 234 | writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI, | ||
| 235 | drvdata->base + TMC_FFCR); | ||
| 236 | writel_relaxed(0x0, drvdata->base + TMC_BUFWM); | ||
| 237 | tmc_enable_hw(drvdata); | ||
| 238 | |||
| 239 | CS_LOCK(drvdata->base); | ||
| 240 | } | ||
| 241 | |||
| 242 | static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) | ||
| 243 | { | ||
| 244 | unsigned long flags; | ||
| 245 | |||
| 246 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 247 | if (drvdata->reading) { | ||
| 248 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 249 | return -EBUSY; | ||
| 250 | } | ||
| 251 | |||
| 252 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { | ||
| 253 | tmc_etb_enable_hw(drvdata); | ||
| 254 | } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { | ||
| 255 | tmc_etr_enable_hw(drvdata); | ||
| 256 | } else { | ||
| 257 | if (mode == TMC_MODE_CIRCULAR_BUFFER) | ||
| 258 | tmc_etb_enable_hw(drvdata); | ||
| 259 | else | ||
| 260 | tmc_etf_enable_hw(drvdata); | ||
| 261 | } | ||
| 262 | drvdata->enable = true; | ||
| 263 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 264 | |||
| 265 | dev_info(drvdata->dev, "TMC enabled\n"); | ||
| 266 | return 0; | ||
| 267 | } | ||
| 268 | |||
| 269 | static int tmc_enable_sink(struct coresight_device *csdev, u32 mode) | ||
| 270 | { | ||
| 271 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 272 | |||
| 273 | return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER); | ||
| 274 | } | ||
| 275 | |||
| 276 | static int tmc_enable_link(struct coresight_device *csdev, int inport, | ||
| 277 | int outport) | ||
| 278 | { | 77 | { |
| 279 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | 78 | int ret = 0; |
| 280 | |||
| 281 | return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO); | ||
| 282 | } | ||
| 283 | 79 | ||
| 284 | static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) | 80 | switch (drvdata->config_type) { |
| 285 | { | 81 | case TMC_CONFIG_TYPE_ETB: |
| 286 | enum tmc_mem_intf_width memwidth; | 82 | case TMC_CONFIG_TYPE_ETF: |
| 287 | u8 memwords; | 83 | ret = tmc_read_prepare_etb(drvdata); |
| 288 | char *bufp; | 84 | break; |
| 289 | u32 read_data; | 85 | case TMC_CONFIG_TYPE_ETR: |
| 290 | int i; | 86 | ret = tmc_read_prepare_etr(drvdata); |
| 291 | 87 | break; | |
| 292 | memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10); | 88 | default: |
| 293 | if (memwidth == TMC_MEM_INTF_WIDTH_32BITS) | 89 | ret = -EINVAL; |
| 294 | memwords = 1; | ||
| 295 | else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS) | ||
| 296 | memwords = 2; | ||
| 297 | else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS) | ||
| 298 | memwords = 4; | ||
| 299 | else | ||
| 300 | memwords = 8; | ||
| 301 | |||
| 302 | bufp = drvdata->buf; | ||
| 303 | while (1) { | ||
| 304 | for (i = 0; i < memwords; i++) { | ||
| 305 | read_data = readl_relaxed(drvdata->base + TMC_RRD); | ||
| 306 | if (read_data == 0xFFFFFFFF) | ||
| 307 | return; | ||
| 308 | memcpy(bufp, &read_data, 4); | ||
| 309 | bufp += 4; | ||
| 310 | } | ||
| 311 | } | 90 | } |
| 312 | } | ||
| 313 | |||
| 314 | static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata) | ||
| 315 | { | ||
| 316 | CS_UNLOCK(drvdata->base); | ||
| 317 | |||
| 318 | tmc_flush_and_stop(drvdata); | ||
| 319 | tmc_etb_dump_hw(drvdata); | ||
| 320 | tmc_disable_hw(drvdata); | ||
| 321 | |||
| 322 | CS_LOCK(drvdata->base); | ||
| 323 | } | ||
| 324 | |||
| 325 | static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata) | ||
| 326 | { | ||
| 327 | u32 rwp, val; | ||
| 328 | 91 | ||
| 329 | rwp = readl_relaxed(drvdata->base + TMC_RWP); | 92 | if (!ret) |
| 330 | val = readl_relaxed(drvdata->base + TMC_STS); | 93 | dev_info(drvdata->dev, "TMC read start\n"); |
| 331 | |||
| 332 | /* How much memory do we still have */ | ||
| 333 | if (val & BIT(0)) | ||
| 334 | drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr; | ||
| 335 | else | ||
| 336 | drvdata->buf = drvdata->vaddr; | ||
| 337 | } | ||
| 338 | 94 | ||
| 339 | static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) | 95 | return ret; |
| 340 | { | ||
| 341 | CS_UNLOCK(drvdata->base); | ||
| 342 | |||
| 343 | tmc_flush_and_stop(drvdata); | ||
| 344 | tmc_etr_dump_hw(drvdata); | ||
| 345 | tmc_disable_hw(drvdata); | ||
| 346 | |||
| 347 | CS_LOCK(drvdata->base); | ||
| 348 | } | ||
| 349 | |||
| 350 | static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata) | ||
| 351 | { | ||
| 352 | CS_UNLOCK(drvdata->base); | ||
| 353 | |||
| 354 | tmc_flush_and_stop(drvdata); | ||
| 355 | tmc_disable_hw(drvdata); | ||
| 356 | |||
| 357 | CS_LOCK(drvdata->base); | ||
| 358 | } | 96 | } |
| 359 | 97 | ||
| 360 | static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode) | 98 | static int tmc_read_unprepare(struct tmc_drvdata *drvdata) |
| 361 | { | 99 | { |
| 362 | unsigned long flags; | 100 | int ret = 0; |
| 363 | |||
| 364 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 365 | if (drvdata->reading) | ||
| 366 | goto out; | ||
| 367 | 101 | ||
| 368 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { | 102 | switch (drvdata->config_type) { |
| 369 | tmc_etb_disable_hw(drvdata); | 103 | case TMC_CONFIG_TYPE_ETB: |
| 370 | } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { | 104 | case TMC_CONFIG_TYPE_ETF: |
| 371 | tmc_etr_disable_hw(drvdata); | 105 | ret = tmc_read_unprepare_etb(drvdata); |
| 372 | } else { | 106 | break; |
| 373 | if (mode == TMC_MODE_CIRCULAR_BUFFER) | 107 | case TMC_CONFIG_TYPE_ETR: |
| 374 | tmc_etb_disable_hw(drvdata); | 108 | ret = tmc_read_unprepare_etr(drvdata); |
| 375 | else | 109 | break; |
| 376 | tmc_etf_disable_hw(drvdata); | 110 | default: |
| 111 | ret = -EINVAL; | ||
| 377 | } | 112 | } |
| 378 | out: | ||
| 379 | drvdata->enable = false; | ||
| 380 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 381 | |||
| 382 | dev_info(drvdata->dev, "TMC disabled\n"); | ||
| 383 | } | ||
| 384 | |||
| 385 | static void tmc_disable_sink(struct coresight_device *csdev) | ||
| 386 | { | ||
| 387 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 388 | |||
| 389 | tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER); | ||
| 390 | } | ||
| 391 | |||
| 392 | static void tmc_disable_link(struct coresight_device *csdev, int inport, | ||
| 393 | int outport) | ||
| 394 | { | ||
| 395 | struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | ||
| 396 | |||
| 397 | tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO); | ||
| 398 | } | ||
| 399 | |||
| 400 | static const struct coresight_ops_sink tmc_sink_ops = { | ||
| 401 | .enable = tmc_enable_sink, | ||
| 402 | .disable = tmc_disable_sink, | ||
| 403 | }; | ||
| 404 | |||
| 405 | static const struct coresight_ops_link tmc_link_ops = { | ||
| 406 | .enable = tmc_enable_link, | ||
| 407 | .disable = tmc_disable_link, | ||
| 408 | }; | ||
| 409 | |||
| 410 | static const struct coresight_ops tmc_etb_cs_ops = { | ||
| 411 | .sink_ops = &tmc_sink_ops, | ||
| 412 | }; | ||
| 413 | |||
| 414 | static const struct coresight_ops tmc_etr_cs_ops = { | ||
| 415 | .sink_ops = &tmc_sink_ops, | ||
| 416 | }; | ||
| 417 | |||
| 418 | static const struct coresight_ops tmc_etf_cs_ops = { | ||
| 419 | .sink_ops = &tmc_sink_ops, | ||
| 420 | .link_ops = &tmc_link_ops, | ||
| 421 | }; | ||
| 422 | |||
| 423 | static int tmc_read_prepare(struct tmc_drvdata *drvdata) | ||
| 424 | { | ||
| 425 | int ret; | ||
| 426 | unsigned long flags; | ||
| 427 | enum tmc_mode mode; | ||
| 428 | 113 | ||
| 429 | spin_lock_irqsave(&drvdata->spinlock, flags); | 114 | if (!ret) |
| 430 | if (!drvdata->enable) | 115 | dev_info(drvdata->dev, "TMC read end\n"); |
| 431 | goto out; | ||
| 432 | 116 | ||
| 433 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { | ||
| 434 | tmc_etb_disable_hw(drvdata); | ||
| 435 | } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { | ||
| 436 | tmc_etr_disable_hw(drvdata); | ||
| 437 | } else { | ||
| 438 | mode = readl_relaxed(drvdata->base + TMC_MODE); | ||
| 439 | if (mode == TMC_MODE_CIRCULAR_BUFFER) { | ||
| 440 | tmc_etb_disable_hw(drvdata); | ||
| 441 | } else { | ||
| 442 | ret = -ENODEV; | ||
| 443 | goto err; | ||
| 444 | } | ||
| 445 | } | ||
| 446 | out: | ||
| 447 | drvdata->reading = true; | ||
| 448 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 449 | |||
| 450 | dev_info(drvdata->dev, "TMC read start\n"); | ||
| 451 | return 0; | ||
| 452 | err: | ||
| 453 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 454 | return ret; | 117 | return ret; |
| 455 | } | 118 | } |
| 456 | 119 | ||
| 457 | static void tmc_read_unprepare(struct tmc_drvdata *drvdata) | ||
| 458 | { | ||
| 459 | unsigned long flags; | ||
| 460 | enum tmc_mode mode; | ||
| 461 | |||
| 462 | spin_lock_irqsave(&drvdata->spinlock, flags); | ||
| 463 | if (!drvdata->enable) | ||
| 464 | goto out; | ||
| 465 | |||
| 466 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { | ||
| 467 | tmc_etb_enable_hw(drvdata); | ||
| 468 | } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { | ||
| 469 | tmc_etr_enable_hw(drvdata); | ||
| 470 | } else { | ||
| 471 | mode = readl_relaxed(drvdata->base + TMC_MODE); | ||
| 472 | if (mode == TMC_MODE_CIRCULAR_BUFFER) | ||
| 473 | tmc_etb_enable_hw(drvdata); | ||
| 474 | } | ||
| 475 | out: | ||
| 476 | drvdata->reading = false; | ||
| 477 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 478 | |||
| 479 | dev_info(drvdata->dev, "TMC read end\n"); | ||
| 480 | } | ||
| 481 | |||
| 482 | static int tmc_open(struct inode *inode, struct file *file) | 120 | static int tmc_open(struct inode *inode, struct file *file) |
| 483 | { | 121 | { |
| 122 | int ret; | ||
| 484 | struct tmc_drvdata *drvdata = container_of(file->private_data, | 123 | struct tmc_drvdata *drvdata = container_of(file->private_data, |
| 485 | struct tmc_drvdata, miscdev); | 124 | struct tmc_drvdata, miscdev); |
| 486 | int ret = 0; | ||
| 487 | |||
| 488 | if (drvdata->read_count++) | ||
| 489 | goto out; | ||
| 490 | 125 | ||
| 491 | ret = tmc_read_prepare(drvdata); | 126 | ret = tmc_read_prepare(drvdata); |
| 492 | if (ret) | 127 | if (ret) |
| 493 | return ret; | 128 | return ret; |
| 494 | out: | 129 | |
| 495 | nonseekable_open(inode, file); | 130 | nonseekable_open(inode, file); |
| 496 | 131 | ||
| 497 | dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); | 132 | dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); |
| @@ -531,19 +166,14 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, | |||
| 531 | 166 | ||
| 532 | static int tmc_release(struct inode *inode, struct file *file) | 167 | static int tmc_release(struct inode *inode, struct file *file) |
| 533 | { | 168 | { |
| 169 | int ret; | ||
| 534 | struct tmc_drvdata *drvdata = container_of(file->private_data, | 170 | struct tmc_drvdata *drvdata = container_of(file->private_data, |
| 535 | struct tmc_drvdata, miscdev); | 171 | struct tmc_drvdata, miscdev); |
| 536 | 172 | ||
| 537 | if (--drvdata->read_count) { | 173 | ret = tmc_read_unprepare(drvdata); |
| 538 | if (drvdata->read_count < 0) { | 174 | if (ret) |
| 539 | dev_err(drvdata->dev, "mismatched close\n"); | 175 | return ret; |
| 540 | drvdata->read_count = 0; | ||
| 541 | } | ||
| 542 | goto out; | ||
| 543 | } | ||
| 544 | 176 | ||
| 545 | tmc_read_unprepare(drvdata); | ||
| 546 | out: | ||
| 547 | dev_dbg(drvdata->dev, "%s: released\n", __func__); | 177 | dev_dbg(drvdata->dev, "%s: released\n", __func__); |
| 548 | return 0; | 178 | return 0; |
| 549 | } | 179 | } |
| @@ -556,56 +186,71 @@ static const struct file_operations tmc_fops = { | |||
| 556 | .llseek = no_llseek, | 186 | .llseek = no_llseek, |
| 557 | }; | 187 | }; |
| 558 | 188 | ||
| 559 | static ssize_t status_show(struct device *dev, | 189 | static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid) |
| 560 | struct device_attribute *attr, char *buf) | ||
| 561 | { | 190 | { |
| 562 | unsigned long flags; | 191 | enum tmc_mem_intf_width memwidth; |
| 563 | u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg; | ||
| 564 | u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr; | ||
| 565 | u32 devid; | ||
| 566 | struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); | ||
| 567 | 192 | ||
| 568 | pm_runtime_get_sync(drvdata->dev); | 193 | /* |
| 569 | spin_lock_irqsave(&drvdata->spinlock, flags); | 194 | * Excerpt from the TRM: |
| 570 | CS_UNLOCK(drvdata->base); | 195 | * |
| 571 | 196 | * DEVID::MEMWIDTH[10:8] | |
| 572 | tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ); | 197 | * 0x2 Memory interface databus is 32 bits wide. |
| 573 | tmc_sts = readl_relaxed(drvdata->base + TMC_STS); | 198 | * 0x3 Memory interface databus is 64 bits wide. |
| 574 | tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP); | 199 | * 0x4 Memory interface databus is 128 bits wide. |
| 575 | tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP); | 200 | * 0x5 Memory interface databus is 256 bits wide. |
| 576 | tmc_trg = readl_relaxed(drvdata->base + TMC_TRG); | 201 | */ |
| 577 | tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL); | 202 | switch (BMVAL(devid, 8, 10)) { |
| 578 | tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR); | 203 | case 0x2: |
| 579 | tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR); | 204 | memwidth = TMC_MEM_INTF_WIDTH_32BITS; |
| 580 | tmc_mode = readl_relaxed(drvdata->base + TMC_MODE); | 205 | break; |
| 581 | tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR); | 206 | case 0x3: |
| 582 | devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); | 207 | memwidth = TMC_MEM_INTF_WIDTH_64BITS; |
| 208 | break; | ||
| 209 | case 0x4: | ||
| 210 | memwidth = TMC_MEM_INTF_WIDTH_128BITS; | ||
| 211 | break; | ||
| 212 | case 0x5: | ||
| 213 | memwidth = TMC_MEM_INTF_WIDTH_256BITS; | ||
| 214 | break; | ||
| 215 | default: | ||
| 216 | memwidth = 0; | ||
| 217 | } | ||
| 583 | 218 | ||
| 584 | CS_LOCK(drvdata->base); | 219 | return memwidth; |
| 585 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | ||
| 586 | pm_runtime_put(drvdata->dev); | ||
| 587 | |||
| 588 | return sprintf(buf, | ||
| 589 | "Depth:\t\t0x%x\n" | ||
| 590 | "Status:\t\t0x%x\n" | ||
| 591 | "RAM read ptr:\t0x%x\n" | ||
| 592 | "RAM wrt ptr:\t0x%x\n" | ||
| 593 | "Trigger cnt:\t0x%x\n" | ||
| 594 | "Control:\t0x%x\n" | ||
| 595 | "Flush status:\t0x%x\n" | ||
| 596 | "Flush ctrl:\t0x%x\n" | ||
| 597 | "Mode:\t\t0x%x\n" | ||
| 598 | "PSRC:\t\t0x%x\n" | ||
| 599 | "DEVID:\t\t0x%x\n", | ||
| 600 | tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg, | ||
| 601 | tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid); | ||
| 602 | |||
| 603 | return -EINVAL; | ||
| 604 | } | 220 | } |
| 605 | static DEVICE_ATTR_RO(status); | ||
| 606 | 221 | ||
| 607 | static ssize_t trigger_cntr_show(struct device *dev, | 222 | #define coresight_tmc_simple_func(name, offset) \ |
| 608 | struct device_attribute *attr, char *buf) | 223 | coresight_simple_func(struct tmc_drvdata, name, offset) |
| 224 | |||
| 225 | coresight_tmc_simple_func(rsz, TMC_RSZ); | ||
| 226 | coresight_tmc_simple_func(sts, TMC_STS); | ||
| 227 | coresight_tmc_simple_func(rrp, TMC_RRP); | ||
| 228 | coresight_tmc_simple_func(rwp, TMC_RWP); | ||
| 229 | coresight_tmc_simple_func(trg, TMC_TRG); | ||
| 230 | coresight_tmc_simple_func(ctl, TMC_CTL); | ||
| 231 | coresight_tmc_simple_func(ffsr, TMC_FFSR); | ||
| 232 | coresight_tmc_simple_func(ffcr, TMC_FFCR); | ||
| 233 | coresight_tmc_simple_func(mode, TMC_MODE); | ||
| 234 | coresight_tmc_simple_func(pscr, TMC_PSCR); | ||
| 235 | coresight_tmc_simple_func(devid, CORESIGHT_DEVID); | ||
| 236 | |||
| 237 | static struct attribute *coresight_tmc_mgmt_attrs[] = { | ||
| 238 | &dev_attr_rsz.attr, | ||
| 239 | &dev_attr_sts.attr, | ||
| 240 | &dev_attr_rrp.attr, | ||
| 241 | &dev_attr_rwp.attr, | ||
| 242 | &dev_attr_trg.attr, | ||
| 243 | &dev_attr_ctl.attr, | ||
| 244 | &dev_attr_ffsr.attr, | ||
| 245 | &dev_attr_ffcr.attr, | ||
| 246 | &dev_attr_mode.attr, | ||
| 247 | &dev_attr_pscr.attr, | ||
| 248 | &dev_attr_devid.attr, | ||
| 249 | NULL, | ||
| 250 | }; | ||
| 251 | |||
| 252 | ssize_t trigger_cntr_show(struct device *dev, | ||
| 253 | struct device_attribute *attr, char *buf) | ||
| 609 | { | 254 | { |
| 610 | struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); | 255 | struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); |
| 611 | unsigned long val = drvdata->trigger_cntr; | 256 | unsigned long val = drvdata->trigger_cntr; |
| @@ -630,26 +275,25 @@ static ssize_t trigger_cntr_store(struct device *dev, | |||
| 630 | } | 275 | } |
| 631 | static DEVICE_ATTR_RW(trigger_cntr); | 276 | static DEVICE_ATTR_RW(trigger_cntr); |
| 632 | 277 | ||
| 633 | static struct attribute *coresight_etb_attrs[] = { | 278 | static struct attribute *coresight_tmc_attrs[] = { |
| 634 | &dev_attr_trigger_cntr.attr, | 279 | &dev_attr_trigger_cntr.attr, |
| 635 | &dev_attr_status.attr, | ||
| 636 | NULL, | 280 | NULL, |
| 637 | }; | 281 | }; |
| 638 | ATTRIBUTE_GROUPS(coresight_etb); | ||
| 639 | 282 | ||
| 640 | static struct attribute *coresight_etr_attrs[] = { | 283 | static const struct attribute_group coresight_tmc_group = { |
| 641 | &dev_attr_trigger_cntr.attr, | 284 | .attrs = coresight_tmc_attrs, |
| 642 | &dev_attr_status.attr, | ||
| 643 | NULL, | ||
| 644 | }; | 285 | }; |
| 645 | ATTRIBUTE_GROUPS(coresight_etr); | ||
| 646 | 286 | ||
| 647 | static struct attribute *coresight_etf_attrs[] = { | 287 | static const struct attribute_group coresight_tmc_mgmt_group = { |
| 648 | &dev_attr_trigger_cntr.attr, | 288 | .attrs = coresight_tmc_mgmt_attrs, |
| 649 | &dev_attr_status.attr, | 289 | .name = "mgmt", |
| 290 | }; | ||
| 291 | |||
| 292 | const struct attribute_group *coresight_tmc_groups[] = { | ||
| 293 | &coresight_tmc_group, | ||
| 294 | &coresight_tmc_mgmt_group, | ||
| 650 | NULL, | 295 | NULL, |
| 651 | }; | 296 | }; |
| 652 | ATTRIBUTE_GROUPS(coresight_etf); | ||
| 653 | 297 | ||
| 654 | static int tmc_probe(struct amba_device *adev, const struct amba_id *id) | 298 | static int tmc_probe(struct amba_device *adev, const struct amba_id *id) |
| 655 | { | 299 | { |
| @@ -688,6 +332,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 688 | 332 | ||
| 689 | devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); | 333 | devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); |
| 690 | drvdata->config_type = BMVAL(devid, 6, 7); | 334 | drvdata->config_type = BMVAL(devid, 6, 7); |
| 335 | drvdata->memwidth = tmc_get_memwidth(devid); | ||
| 691 | 336 | ||
| 692 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { | 337 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { |
| 693 | if (np) | 338 | if (np) |
| @@ -702,20 +347,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 702 | 347 | ||
| 703 | pm_runtime_put(&adev->dev); | 348 | pm_runtime_put(&adev->dev); |
| 704 | 349 | ||
| 705 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { | ||
| 706 | drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size, | ||
| 707 | &drvdata->paddr, GFP_KERNEL); | ||
| 708 | if (!drvdata->vaddr) | ||
| 709 | return -ENOMEM; | ||
| 710 | |||
| 711 | memset(drvdata->vaddr, 0, drvdata->size); | ||
| 712 | drvdata->buf = drvdata->vaddr; | ||
| 713 | } else { | ||
| 714 | drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL); | ||
| 715 | if (!drvdata->buf) | ||
| 716 | return -ENOMEM; | ||
| 717 | } | ||
| 718 | |||
| 719 | desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); | 350 | desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); |
| 720 | if (!desc) { | 351 | if (!desc) { |
| 721 | ret = -ENOMEM; | 352 | ret = -ENOMEM; |
| @@ -725,20 +356,18 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 725 | desc->pdata = pdata; | 356 | desc->pdata = pdata; |
| 726 | desc->dev = dev; | 357 | desc->dev = dev; |
| 727 | desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; | 358 | desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; |
| 359 | desc->groups = coresight_tmc_groups; | ||
| 728 | 360 | ||
| 729 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { | 361 | if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { |
| 730 | desc->type = CORESIGHT_DEV_TYPE_SINK; | 362 | desc->type = CORESIGHT_DEV_TYPE_SINK; |
| 731 | desc->ops = &tmc_etb_cs_ops; | 363 | desc->ops = &tmc_etb_cs_ops; |
| 732 | desc->groups = coresight_etb_groups; | ||
| 733 | } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { | 364 | } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { |
| 734 | desc->type = CORESIGHT_DEV_TYPE_SINK; | 365 | desc->type = CORESIGHT_DEV_TYPE_SINK; |
| 735 | desc->ops = &tmc_etr_cs_ops; | 366 | desc->ops = &tmc_etr_cs_ops; |
| 736 | desc->groups = coresight_etr_groups; | ||
| 737 | } else { | 367 | } else { |
| 738 | desc->type = CORESIGHT_DEV_TYPE_LINKSINK; | 368 | desc->type = CORESIGHT_DEV_TYPE_LINKSINK; |
| 739 | desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; | 369 | desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; |
| 740 | desc->ops = &tmc_etf_cs_ops; | 370 | desc->ops = &tmc_etf_cs_ops; |
| 741 | desc->groups = coresight_etf_groups; | ||
| 742 | } | 371 | } |
| 743 | 372 | ||
| 744 | drvdata->csdev = coresight_register(desc); | 373 | drvdata->csdev = coresight_register(desc); |
| @@ -754,7 +383,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 754 | if (ret) | 383 | if (ret) |
| 755 | goto err_misc_register; | 384 | goto err_misc_register; |
| 756 | 385 | ||
| 757 | dev_info(dev, "TMC initialized\n"); | ||
| 758 | return 0; | 386 | return 0; |
| 759 | 387 | ||
| 760 | err_misc_register: | 388 | err_misc_register: |
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h new file mode 100644 index 000000000000..5c5fe2ad2ca7 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-tmc.h | |||
| @@ -0,0 +1,140 @@ | |||
| 1 | /* | ||
| 2 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | ||
| 3 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published by | ||
| 7 | * the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef _CORESIGHT_TMC_H | ||
| 19 | #define _CORESIGHT_TMC_H | ||
| 20 | |||
| 21 | #include <linux/miscdevice.h> | ||
| 22 | |||
| 23 | #define TMC_RSZ 0x004 | ||
| 24 | #define TMC_STS 0x00c | ||
| 25 | #define TMC_RRD 0x010 | ||
| 26 | #define TMC_RRP 0x014 | ||
| 27 | #define TMC_RWP 0x018 | ||
| 28 | #define TMC_TRG 0x01c | ||
| 29 | #define TMC_CTL 0x020 | ||
| 30 | #define TMC_RWD 0x024 | ||
| 31 | #define TMC_MODE 0x028 | ||
| 32 | #define TMC_LBUFLEVEL 0x02c | ||
| 33 | #define TMC_CBUFLEVEL 0x030 | ||
| 34 | #define TMC_BUFWM 0x034 | ||
| 35 | #define TMC_RRPHI 0x038 | ||
| 36 | #define TMC_RWPHI 0x03c | ||
| 37 | #define TMC_AXICTL 0x110 | ||
| 38 | #define TMC_DBALO 0x118 | ||
| 39 | #define TMC_DBAHI 0x11c | ||
| 40 | #define TMC_FFSR 0x300 | ||
| 41 | #define TMC_FFCR 0x304 | ||
| 42 | #define TMC_PSCR 0x308 | ||
| 43 | #define TMC_ITMISCOP0 0xee0 | ||
| 44 | #define TMC_ITTRFLIN 0xee8 | ||
| 45 | #define TMC_ITATBDATA0 0xeec | ||
| 46 | #define TMC_ITATBCTR2 0xef0 | ||
| 47 | #define TMC_ITATBCTR1 0xef4 | ||
| 48 | #define TMC_ITATBCTR0 0xef8 | ||
| 49 | |||
| 50 | /* register description */ | ||
| 51 | /* TMC_CTL - 0x020 */ | ||
| 52 | #define TMC_CTL_CAPT_EN BIT(0) | ||
| 53 | /* TMC_STS - 0x00C */ | ||
| 54 | #define TMC_STS_TMCREADY_BIT 2 | ||
| 55 | #define TMC_STS_FULL BIT(0) | ||
| 56 | #define TMC_STS_TRIGGERED BIT(1) | ||
| 57 | /* TMC_AXICTL - 0x110 */ | ||
| 58 | #define TMC_AXICTL_PROT_CTL_B0 BIT(0) | ||
| 59 | #define TMC_AXICTL_PROT_CTL_B1 BIT(1) | ||
| 60 | #define TMC_AXICTL_SCT_GAT_MODE BIT(7) | ||
| 61 | #define TMC_AXICTL_WR_BURST_16 0xF00 | ||
| 62 | /* TMC_FFCR - 0x304 */ | ||
| 63 | #define TMC_FFCR_FLUSHMAN_BIT 6 | ||
| 64 | #define TMC_FFCR_EN_FMT BIT(0) | ||
| 65 | #define TMC_FFCR_EN_TI BIT(1) | ||
| 66 | #define TMC_FFCR_FON_FLIN BIT(4) | ||
| 67 | #define TMC_FFCR_FON_TRIG_EVT BIT(5) | ||
| 68 | #define TMC_FFCR_TRIGON_TRIGIN BIT(8) | ||
| 69 | #define TMC_FFCR_STOP_ON_FLUSH BIT(12) | ||
| 70 | |||
| 71 | |||
| 72 | enum tmc_config_type { | ||
| 73 | TMC_CONFIG_TYPE_ETB, | ||
| 74 | TMC_CONFIG_TYPE_ETR, | ||
| 75 | TMC_CONFIG_TYPE_ETF, | ||
| 76 | }; | ||
| 77 | |||
| 78 | enum tmc_mode { | ||
| 79 | TMC_MODE_CIRCULAR_BUFFER, | ||
| 80 | TMC_MODE_SOFTWARE_FIFO, | ||
| 81 | TMC_MODE_HARDWARE_FIFO, | ||
| 82 | }; | ||
| 83 | |||
| 84 | enum tmc_mem_intf_width { | ||
| 85 | TMC_MEM_INTF_WIDTH_32BITS = 1, | ||
| 86 | TMC_MEM_INTF_WIDTH_64BITS = 2, | ||
| 87 | TMC_MEM_INTF_WIDTH_128BITS = 4, | ||
| 88 | TMC_MEM_INTF_WIDTH_256BITS = 8, | ||
| 89 | }; | ||
| 90 | |||
| 91 | /** | ||
| 92 | * struct tmc_drvdata - specifics associated to an TMC component | ||
| 93 | * @base: memory mapped base address for this component. | ||
| 94 | * @dev: the device entity associated to this component. | ||
| 95 | * @csdev: component vitals needed by the framework. | ||
| 96 | * @miscdev: specifics to handle "/dev/xyz.tmc" entry. | ||
| 97 | * @spinlock: only one at a time pls. | ||
| 98 | * @buf: area of memory where trace data get sent. | ||
| 99 | * @paddr: DMA start location in RAM. | ||
| 100 | * @vaddr: virtual representation of @paddr. | ||
| 101 | * @size: @buf size. | ||
| 102 | * @mode: how this TMC is being used. | ||
| 103 | * @config_type: TMC variant, must be of type @tmc_config_type. | ||
| 104 | * @memwidth: width of the memory interface databus, in bytes. | ||
| 105 | * @trigger_cntr: amount of words to store after a trigger. | ||
| 106 | */ | ||
| 107 | struct tmc_drvdata { | ||
| 108 | void __iomem *base; | ||
| 109 | struct device *dev; | ||
| 110 | struct coresight_device *csdev; | ||
| 111 | struct miscdevice miscdev; | ||
| 112 | spinlock_t spinlock; | ||
| 113 | bool reading; | ||
| 114 | char *buf; | ||
| 115 | dma_addr_t paddr; | ||
| 116 | void __iomem *vaddr; | ||
| 117 | u32 size; | ||
| 118 | local_t mode; | ||
| 119 | enum tmc_config_type config_type; | ||
| 120 | enum tmc_mem_intf_width memwidth; | ||
| 121 | u32 trigger_cntr; | ||
| 122 | }; | ||
| 123 | |||
| 124 | /* Generic functions */ | ||
| 125 | void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata); | ||
| 126 | void tmc_flush_and_stop(struct tmc_drvdata *drvdata); | ||
| 127 | void tmc_enable_hw(struct tmc_drvdata *drvdata); | ||
| 128 | void tmc_disable_hw(struct tmc_drvdata *drvdata); | ||
| 129 | |||
| 130 | /* ETB/ETF functions */ | ||
| 131 | int tmc_read_prepare_etb(struct tmc_drvdata *drvdata); | ||
| 132 | int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata); | ||
| 133 | extern const struct coresight_ops tmc_etb_cs_ops; | ||
| 134 | extern const struct coresight_ops tmc_etf_cs_ops; | ||
| 135 | |||
| 136 | /* ETR functions */ | ||
| 137 | int tmc_read_prepare_etr(struct tmc_drvdata *drvdata); | ||
| 138 | int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata); | ||
| 139 | extern const struct coresight_ops tmc_etr_cs_ops; | ||
| 140 | #endif | ||
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 8fb09d9237ab..4e471e2e9d89 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c | |||
| @@ -167,7 +167,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 167 | if (IS_ERR(drvdata->csdev)) | 167 | if (IS_ERR(drvdata->csdev)) |
| 168 | return PTR_ERR(drvdata->csdev); | 168 | return PTR_ERR(drvdata->csdev); |
| 169 | 169 | ||
| 170 | dev_info(dev, "TPIU initialized\n"); | ||
| 171 | return 0; | 170 | return 0; |
| 172 | } | 171 | } |
| 173 | 172 | ||
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 2ea5961092c1..5443d03a1eec 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c | |||
| @@ -43,7 +43,15 @@ struct coresight_node { | |||
| 43 | * When operating Coresight drivers from the sysFS interface, only a single | 43 | * When operating Coresight drivers from the sysFS interface, only a single |
| 44 | * path can exist from a tracer (associated to a CPU) to a sink. | 44 | * path can exist from a tracer (associated to a CPU) to a sink. |
| 45 | */ | 45 | */ |
| 46 | static DEFINE_PER_CPU(struct list_head *, sysfs_path); | 46 | static DEFINE_PER_CPU(struct list_head *, tracer_path); |
| 47 | |||
| 48 | /* | ||
| 49 | * As of this writing only a single STM can be found in CS topologies. Since | ||
| 50 | * there is no way to know if we'll ever see more and what kind of | ||
| 51 | * configuration they will enact, for the time being only define a single path | ||
| 52 | * for STM. | ||
| 53 | */ | ||
| 54 | static struct list_head *stm_path; | ||
| 47 | 55 | ||
| 48 | static int coresight_id_match(struct device *dev, void *data) | 56 | static int coresight_id_match(struct device *dev, void *data) |
| 49 | { | 57 | { |
| @@ -257,15 +265,27 @@ static void coresight_disable_source(struct coresight_device *csdev) | |||
| 257 | 265 | ||
| 258 | void coresight_disable_path(struct list_head *path) | 266 | void coresight_disable_path(struct list_head *path) |
| 259 | { | 267 | { |
| 268 | u32 type; | ||
| 260 | struct coresight_node *nd; | 269 | struct coresight_node *nd; |
| 261 | struct coresight_device *csdev, *parent, *child; | 270 | struct coresight_device *csdev, *parent, *child; |
| 262 | 271 | ||
| 263 | list_for_each_entry(nd, path, link) { | 272 | list_for_each_entry(nd, path, link) { |
| 264 | csdev = nd->csdev; | 273 | csdev = nd->csdev; |
| 274 | type = csdev->type; | ||
| 275 | |||
| 276 | /* | ||
| 277 | * ETF devices are tricky... They can be a link or a sink, | ||
| 278 | * depending on how they are configured. If an ETF has been | ||
| 279 | * "activated" it will be configured as a sink, otherwise | ||
| 280 | * go ahead with the link configuration. | ||
| 281 | */ | ||
| 282 | if (type == CORESIGHT_DEV_TYPE_LINKSINK) | ||
| 283 | type = (csdev == coresight_get_sink(path)) ? | ||
| 284 | CORESIGHT_DEV_TYPE_SINK : | ||
| 285 | CORESIGHT_DEV_TYPE_LINK; | ||
| 265 | 286 | ||
| 266 | switch (csdev->type) { | 287 | switch (type) { |
| 267 | case CORESIGHT_DEV_TYPE_SINK: | 288 | case CORESIGHT_DEV_TYPE_SINK: |
| 268 | case CORESIGHT_DEV_TYPE_LINKSINK: | ||
| 269 | coresight_disable_sink(csdev); | 289 | coresight_disable_sink(csdev); |
| 270 | break; | 290 | break; |
| 271 | case CORESIGHT_DEV_TYPE_SOURCE: | 291 | case CORESIGHT_DEV_TYPE_SOURCE: |
| @@ -286,15 +306,27 @@ int coresight_enable_path(struct list_head *path, u32 mode) | |||
| 286 | { | 306 | { |
| 287 | 307 | ||
| 288 | int ret = 0; | 308 | int ret = 0; |
| 309 | u32 type; | ||
| 289 | struct coresight_node *nd; | 310 | struct coresight_node *nd; |
| 290 | struct coresight_device *csdev, *parent, *child; | 311 | struct coresight_device *csdev, *parent, *child; |
| 291 | 312 | ||
| 292 | list_for_each_entry_reverse(nd, path, link) { | 313 | list_for_each_entry_reverse(nd, path, link) { |
| 293 | csdev = nd->csdev; | 314 | csdev = nd->csdev; |
| 315 | type = csdev->type; | ||
| 294 | 316 | ||
| 295 | switch (csdev->type) { | 317 | /* |
| 318 | * ETF devices are tricky... They can be a link or a sink, | ||
| 319 | * depending on how they are configured. If an ETF has been | ||
| 320 | * "activated" it will be configured as a sink, otherwise | ||
| 321 | * go ahead with the link configuration. | ||
| 322 | */ | ||
| 323 | if (type == CORESIGHT_DEV_TYPE_LINKSINK) | ||
| 324 | type = (csdev == coresight_get_sink(path)) ? | ||
| 325 | CORESIGHT_DEV_TYPE_SINK : | ||
| 326 | CORESIGHT_DEV_TYPE_LINK; | ||
| 327 | |||
| 328 | switch (type) { | ||
| 296 | case CORESIGHT_DEV_TYPE_SINK: | 329 | case CORESIGHT_DEV_TYPE_SINK: |
| 297 | case CORESIGHT_DEV_TYPE_LINKSINK: | ||
| 298 | ret = coresight_enable_sink(csdev, mode); | 330 | ret = coresight_enable_sink(csdev, mode); |
| 299 | if (ret) | 331 | if (ret) |
| 300 | goto err; | 332 | goto err; |
| @@ -432,18 +464,45 @@ void coresight_release_path(struct list_head *path) | |||
| 432 | path = NULL; | 464 | path = NULL; |
| 433 | } | 465 | } |
| 434 | 466 | ||
| 467 | /** coresight_validate_source - make sure a source has the right credentials | ||
| 468 | * @csdev: the device structure for a source. | ||
| 469 | * @function: the function this was called from. | ||
| 470 | * | ||
| 471 | * Assumes the coresight_mutex is held. | ||
| 472 | */ | ||
| 473 | static int coresight_validate_source(struct coresight_device *csdev, | ||
| 474 | const char *function) | ||
| 475 | { | ||
| 476 | u32 type, subtype; | ||
| 477 | |||
| 478 | type = csdev->type; | ||
| 479 | subtype = csdev->subtype.source_subtype; | ||
| 480 | |||
| 481 | if (type != CORESIGHT_DEV_TYPE_SOURCE) { | ||
| 482 | dev_err(&csdev->dev, "wrong device type in %s\n", function); | ||
| 483 | return -EINVAL; | ||
| 484 | } | ||
| 485 | |||
| 486 | if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC && | ||
| 487 | subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE) { | ||
| 488 | dev_err(&csdev->dev, "wrong device subtype in %s\n", function); | ||
| 489 | return -EINVAL; | ||
| 490 | } | ||
| 491 | |||
| 492 | return 0; | ||
| 493 | } | ||
| 494 | |||
| 435 | int coresight_enable(struct coresight_device *csdev) | 495 | int coresight_enable(struct coresight_device *csdev) |
| 436 | { | 496 | { |
| 437 | int ret = 0; | 497 | int cpu, ret = 0; |
| 438 | int cpu; | ||
| 439 | struct list_head *path; | 498 | struct list_head *path; |
| 440 | 499 | ||
| 441 | mutex_lock(&coresight_mutex); | 500 | mutex_lock(&coresight_mutex); |
| 442 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { | 501 | |
| 443 | ret = -EINVAL; | 502 | ret = coresight_validate_source(csdev, __func__); |
| 444 | dev_err(&csdev->dev, "wrong device type in %s\n", __func__); | 503 | if (ret) |
| 445 | goto out; | 504 | goto out; |
| 446 | } | 505 | |
| 447 | if (csdev->enable) | 506 | if (csdev->enable) |
| 448 | goto out; | 507 | goto out; |
| 449 | 508 | ||
| @@ -461,15 +520,25 @@ int coresight_enable(struct coresight_device *csdev) | |||
| 461 | if (ret) | 520 | if (ret) |
| 462 | goto err_source; | 521 | goto err_source; |
| 463 | 522 | ||
| 464 | /* | 523 | switch (csdev->subtype.source_subtype) { |
| 465 | * When working from sysFS it is important to keep track | 524 | case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC: |
| 466 | * of the paths that were created so that they can be | 525 | /* |
| 467 | * undone in 'coresight_disable()'. Since there can only | 526 | * When working from sysFS it is important to keep track |
| 468 | * be a single session per tracer (when working from sysFS) | 527 | * of the paths that were created so that they can be |
| 469 | * a per-cpu variable will do just fine. | 528 | * undone in 'coresight_disable()'. Since there can only |
| 470 | */ | 529 | * be a single session per tracer (when working from sysFS) |
| 471 | cpu = source_ops(csdev)->cpu_id(csdev); | 530 | * a per-cpu variable will do just fine. |
| 472 | per_cpu(sysfs_path, cpu) = path; | 531 | */ |
| 532 | cpu = source_ops(csdev)->cpu_id(csdev); | ||
| 533 | per_cpu(tracer_path, cpu) = path; | ||
| 534 | break; | ||
| 535 | case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE: | ||
| 536 | stm_path = path; | ||
| 537 | break; | ||
| 538 | default: | ||
| 539 | /* We can't be here */ | ||
| 540 | break; | ||
| 541 | } | ||
| 473 | 542 | ||
| 474 | out: | 543 | out: |
| 475 | mutex_unlock(&coresight_mutex); | 544 | mutex_unlock(&coresight_mutex); |
| @@ -486,23 +555,36 @@ EXPORT_SYMBOL_GPL(coresight_enable); | |||
| 486 | 555 | ||
| 487 | void coresight_disable(struct coresight_device *csdev) | 556 | void coresight_disable(struct coresight_device *csdev) |
| 488 | { | 557 | { |
| 489 | int cpu; | 558 | int cpu, ret; |
| 490 | struct list_head *path; | 559 | struct list_head *path = NULL; |
| 491 | 560 | ||
| 492 | mutex_lock(&coresight_mutex); | 561 | mutex_lock(&coresight_mutex); |
| 493 | if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { | 562 | |
| 494 | dev_err(&csdev->dev, "wrong device type in %s\n", __func__); | 563 | ret = coresight_validate_source(csdev, __func__); |
| 564 | if (ret) | ||
| 495 | goto out; | 565 | goto out; |
| 496 | } | 566 | |
| 497 | if (!csdev->enable) | 567 | if (!csdev->enable) |
| 498 | goto out; | 568 | goto out; |
| 499 | 569 | ||
| 500 | cpu = source_ops(csdev)->cpu_id(csdev); | 570 | switch (csdev->subtype.source_subtype) { |
| 501 | path = per_cpu(sysfs_path, cpu); | 571 | case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC: |
| 572 | cpu = source_ops(csdev)->cpu_id(csdev); | ||
| 573 | path = per_cpu(tracer_path, cpu); | ||
| 574 | per_cpu(tracer_path, cpu) = NULL; | ||
| 575 | break; | ||
| 576 | case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE: | ||
| 577 | path = stm_path; | ||
| 578 | stm_path = NULL; | ||
| 579 | break; | ||
| 580 | default: | ||
| 581 | /* We can't be here */ | ||
| 582 | break; | ||
| 583 | } | ||
| 584 | |||
| 502 | coresight_disable_source(csdev); | 585 | coresight_disable_source(csdev); |
| 503 | coresight_disable_path(path); | 586 | coresight_disable_path(path); |
| 504 | coresight_release_path(path); | 587 | coresight_release_path(path); |
| 505 | per_cpu(sysfs_path, cpu) = NULL; | ||
| 506 | 588 | ||
| 507 | out: | 589 | out: |
| 508 | mutex_unlock(&coresight_mutex); | 590 | mutex_unlock(&coresight_mutex); |
| @@ -514,7 +596,7 @@ static ssize_t enable_sink_show(struct device *dev, | |||
| 514 | { | 596 | { |
| 515 | struct coresight_device *csdev = to_coresight_device(dev); | 597 | struct coresight_device *csdev = to_coresight_device(dev); |
| 516 | 598 | ||
| 517 | return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated); | 599 | return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->activated); |
| 518 | } | 600 | } |
| 519 | 601 | ||
| 520 | static ssize_t enable_sink_store(struct device *dev, | 602 | static ssize_t enable_sink_store(struct device *dev, |
| @@ -544,7 +626,7 @@ static ssize_t enable_source_show(struct device *dev, | |||
| 544 | { | 626 | { |
| 545 | struct coresight_device *csdev = to_coresight_device(dev); | 627 | struct coresight_device *csdev = to_coresight_device(dev); |
| 546 | 628 | ||
| 547 | return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable); | 629 | return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->enable); |
| 548 | } | 630 | } |
| 549 | 631 | ||
| 550 | static ssize_t enable_source_store(struct device *dev, | 632 | static ssize_t enable_source_store(struct device *dev, |
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index 4272f2ce5f6e..1be543e8e42f 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c | |||
| @@ -71,6 +71,15 @@ static int intel_th_probe(struct device *dev) | |||
| 71 | if (ret) | 71 | if (ret) |
| 72 | return ret; | 72 | return ret; |
| 73 | 73 | ||
| 74 | if (thdrv->attr_group) { | ||
| 75 | ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group); | ||
| 76 | if (ret) { | ||
| 77 | thdrv->remove(thdev); | ||
| 78 | |||
| 79 | return ret; | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 74 | if (thdev->type == INTEL_TH_OUTPUT && | 83 | if (thdev->type == INTEL_TH_OUTPUT && |
| 75 | !intel_th_output_assigned(thdev)) | 84 | !intel_th_output_assigned(thdev)) |
| 76 | ret = hubdrv->assign(hub, thdev); | 85 | ret = hubdrv->assign(hub, thdev); |
| @@ -91,6 +100,9 @@ static int intel_th_remove(struct device *dev) | |||
| 91 | return err; | 100 | return err; |
| 92 | } | 101 | } |
| 93 | 102 | ||
| 103 | if (thdrv->attr_group) | ||
| 104 | sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group); | ||
| 105 | |||
| 94 | thdrv->remove(thdev); | 106 | thdrv->remove(thdev); |
| 95 | 107 | ||
| 96 | if (intel_th_output_assigned(thdev)) { | 108 | if (intel_th_output_assigned(thdev)) { |
| @@ -171,7 +183,14 @@ static DEVICE_ATTR_RO(port); | |||
| 171 | 183 | ||
| 172 | static int intel_th_output_activate(struct intel_th_device *thdev) | 184 | static int intel_th_output_activate(struct intel_th_device *thdev) |
| 173 | { | 185 | { |
| 174 | struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver); | 186 | struct intel_th_driver *thdrv = |
| 187 | to_intel_th_driver_or_null(thdev->dev.driver); | ||
| 188 | |||
| 189 | if (!thdrv) | ||
| 190 | return -ENODEV; | ||
| 191 | |||
| 192 | if (!try_module_get(thdrv->driver.owner)) | ||
| 193 | return -ENODEV; | ||
| 175 | 194 | ||
| 176 | if (thdrv->activate) | 195 | if (thdrv->activate) |
| 177 | return thdrv->activate(thdev); | 196 | return thdrv->activate(thdev); |
| @@ -183,12 +202,18 @@ static int intel_th_output_activate(struct intel_th_device *thdev) | |||
| 183 | 202 | ||
| 184 | static void intel_th_output_deactivate(struct intel_th_device *thdev) | 203 | static void intel_th_output_deactivate(struct intel_th_device *thdev) |
| 185 | { | 204 | { |
| 186 | struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver); | 205 | struct intel_th_driver *thdrv = |
| 206 | to_intel_th_driver_or_null(thdev->dev.driver); | ||
| 207 | |||
| 208 | if (!thdrv) | ||
| 209 | return; | ||
| 187 | 210 | ||
| 188 | if (thdrv->deactivate) | 211 | if (thdrv->deactivate) |
| 189 | thdrv->deactivate(thdev); | 212 | thdrv->deactivate(thdev); |
| 190 | else | 213 | else |
| 191 | intel_th_trace_disable(thdev); | 214 | intel_th_trace_disable(thdev); |
| 215 | |||
| 216 | module_put(thdrv->driver.owner); | ||
| 192 | } | 217 | } |
| 193 | 218 | ||
| 194 | static ssize_t active_show(struct device *dev, struct device_attribute *attr, | 219 | static ssize_t active_show(struct device *dev, struct device_attribute *attr, |
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index eedd09332db6..0df22e30673d 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h | |||
| @@ -115,6 +115,7 @@ intel_th_output_assigned(struct intel_th_device *thdev) | |||
| 115 | * @enable: enable tracing for a given output device | 115 | * @enable: enable tracing for a given output device |
| 116 | * @disable: disable tracing for a given output device | 116 | * @disable: disable tracing for a given output device |
| 117 | * @fops: file operations for device nodes | 117 | * @fops: file operations for device nodes |
| 118 | * @attr_group: attributes provided by the driver | ||
| 118 | * | 119 | * |
| 119 | * Callbacks @probe and @remove are required for all device types. | 120 | * Callbacks @probe and @remove are required for all device types. |
| 120 | * Switch device driver needs to fill in @assign, @enable and @disable | 121 | * Switch device driver needs to fill in @assign, @enable and @disable |
| @@ -139,6 +140,8 @@ struct intel_th_driver { | |||
| 139 | void (*deactivate)(struct intel_th_device *thdev); | 140 | void (*deactivate)(struct intel_th_device *thdev); |
| 140 | /* file_operations for those who want a device node */ | 141 | /* file_operations for those who want a device node */ |
| 141 | const struct file_operations *fops; | 142 | const struct file_operations *fops; |
| 143 | /* optional attributes */ | ||
| 144 | struct attribute_group *attr_group; | ||
| 142 | 145 | ||
| 143 | /* source ops */ | 146 | /* source ops */ |
| 144 | int (*set_output)(struct intel_th_device *thdev, | 147 | int (*set_output)(struct intel_th_device *thdev, |
| @@ -148,6 +151,9 @@ struct intel_th_driver { | |||
| 148 | #define to_intel_th_driver(_d) \ | 151 | #define to_intel_th_driver(_d) \ |
| 149 | container_of((_d), struct intel_th_driver, driver) | 152 | container_of((_d), struct intel_th_driver, driver) |
| 150 | 153 | ||
| 154 | #define to_intel_th_driver_or_null(_d) \ | ||
| 155 | ((_d) ? to_intel_th_driver(_d) : NULL) | ||
| 156 | |||
| 151 | static inline struct intel_th_device * | 157 | static inline struct intel_th_device * |
| 152 | to_intel_th_hub(struct intel_th_device *thdev) | 158 | to_intel_th_hub(struct intel_th_device *thdev) |
| 153 | { | 159 | { |
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index d2209147dc89..e8d55a153a65 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c | |||
| @@ -122,7 +122,6 @@ struct msc { | |||
| 122 | atomic_t mmap_count; | 122 | atomic_t mmap_count; |
| 123 | struct mutex buf_mutex; | 123 | struct mutex buf_mutex; |
| 124 | 124 | ||
| 125 | struct mutex iter_mutex; | ||
| 126 | struct list_head iter_list; | 125 | struct list_head iter_list; |
| 127 | 126 | ||
| 128 | /* config */ | 127 | /* config */ |
| @@ -257,23 +256,37 @@ static struct msc_iter *msc_iter_install(struct msc *msc) | |||
| 257 | 256 | ||
| 258 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 257 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
| 259 | if (!iter) | 258 | if (!iter) |
| 260 | return NULL; | 259 | return ERR_PTR(-ENOMEM); |
| 260 | |||
| 261 | mutex_lock(&msc->buf_mutex); | ||
| 262 | |||
| 263 | /* | ||
| 264 | * Reading and tracing are mutually exclusive; if msc is | ||
| 265 | * enabled, open() will fail; otherwise existing readers | ||
| 266 | * will prevent enabling the msc and the rest of fops don't | ||
| 267 | * need to worry about it. | ||
| 268 | */ | ||
| 269 | if (msc->enabled) { | ||
| 270 | kfree(iter); | ||
| 271 | iter = ERR_PTR(-EBUSY); | ||
| 272 | goto unlock; | ||
| 273 | } | ||
| 261 | 274 | ||
| 262 | msc_iter_init(iter); | 275 | msc_iter_init(iter); |
| 263 | iter->msc = msc; | 276 | iter->msc = msc; |
| 264 | 277 | ||
| 265 | mutex_lock(&msc->iter_mutex); | ||
| 266 | list_add_tail(&iter->entry, &msc->iter_list); | 278 | list_add_tail(&iter->entry, &msc->iter_list); |
| 267 | mutex_unlock(&msc->iter_mutex); | 279 | unlock: |
| 280 | mutex_unlock(&msc->buf_mutex); | ||
| 268 | 281 | ||
| 269 | return iter; | 282 | return iter; |
| 270 | } | 283 | } |
| 271 | 284 | ||
| 272 | static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) | 285 | static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) |
| 273 | { | 286 | { |
| 274 | mutex_lock(&msc->iter_mutex); | 287 | mutex_lock(&msc->buf_mutex); |
| 275 | list_del(&iter->entry); | 288 | list_del(&iter->entry); |
| 276 | mutex_unlock(&msc->iter_mutex); | 289 | mutex_unlock(&msc->buf_mutex); |
| 277 | 290 | ||
| 278 | kfree(iter); | 291 | kfree(iter); |
| 279 | } | 292 | } |
| @@ -454,7 +467,6 @@ static void msc_buffer_clear_hw_header(struct msc *msc) | |||
| 454 | { | 467 | { |
| 455 | struct msc_window *win; | 468 | struct msc_window *win; |
| 456 | 469 | ||
| 457 | mutex_lock(&msc->buf_mutex); | ||
| 458 | list_for_each_entry(win, &msc->win_list, entry) { | 470 | list_for_each_entry(win, &msc->win_list, entry) { |
| 459 | unsigned int blk; | 471 | unsigned int blk; |
| 460 | size_t hw_sz = sizeof(struct msc_block_desc) - | 472 | size_t hw_sz = sizeof(struct msc_block_desc) - |
| @@ -466,7 +478,6 @@ static void msc_buffer_clear_hw_header(struct msc *msc) | |||
| 466 | memset(&bdesc->hw_tag, 0, hw_sz); | 478 | memset(&bdesc->hw_tag, 0, hw_sz); |
| 467 | } | 479 | } |
| 468 | } | 480 | } |
| 469 | mutex_unlock(&msc->buf_mutex); | ||
| 470 | } | 481 | } |
| 471 | 482 | ||
| 472 | /** | 483 | /** |
| @@ -474,12 +485,15 @@ static void msc_buffer_clear_hw_header(struct msc *msc) | |||
| 474 | * @msc: the MSC device to configure | 485 | * @msc: the MSC device to configure |
| 475 | * | 486 | * |
| 476 | * Program storage mode, wrapping, burst length and trace buffer address | 487 | * Program storage mode, wrapping, burst length and trace buffer address |
| 477 | * into a given MSC. If msc::enabled is set, enable the trace, too. | 488 | * into a given MSC. Then, enable tracing and set msc::enabled. |
| 489 | * The latter is serialized on msc::buf_mutex, so make sure to hold it. | ||
| 478 | */ | 490 | */ |
| 479 | static int msc_configure(struct msc *msc) | 491 | static int msc_configure(struct msc *msc) |
| 480 | { | 492 | { |
| 481 | u32 reg; | 493 | u32 reg; |
| 482 | 494 | ||
| 495 | lockdep_assert_held(&msc->buf_mutex); | ||
| 496 | |||
| 483 | if (msc->mode > MSC_MODE_MULTI) | 497 | if (msc->mode > MSC_MODE_MULTI) |
| 484 | return -ENOTSUPP; | 498 | return -ENOTSUPP; |
| 485 | 499 | ||
| @@ -497,21 +511,19 @@ static int msc_configure(struct msc *msc) | |||
| 497 | reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); | 511 | reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); |
| 498 | reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); | 512 | reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); |
| 499 | 513 | ||
| 514 | reg |= MSC_EN; | ||
| 500 | reg |= msc->mode << __ffs(MSC_MODE); | 515 | reg |= msc->mode << __ffs(MSC_MODE); |
| 501 | reg |= msc->burst_len << __ffs(MSC_LEN); | 516 | reg |= msc->burst_len << __ffs(MSC_LEN); |
| 502 | /*if (msc->mode == MSC_MODE_MULTI) | 517 | |
| 503 | reg |= MSC_RD_HDR_OVRD; */ | ||
| 504 | if (msc->wrap) | 518 | if (msc->wrap) |
| 505 | reg |= MSC_WRAPEN; | 519 | reg |= MSC_WRAPEN; |
| 506 | if (msc->enabled) | ||
| 507 | reg |= MSC_EN; | ||
| 508 | 520 | ||
| 509 | iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); | 521 | iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); |
| 510 | 522 | ||
| 511 | if (msc->enabled) { | 523 | msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; |
| 512 | msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; | 524 | intel_th_trace_enable(msc->thdev); |
| 513 | intel_th_trace_enable(msc->thdev); | 525 | msc->enabled = 1; |
| 514 | } | 526 | |
| 515 | 527 | ||
| 516 | return 0; | 528 | return 0; |
| 517 | } | 529 | } |
| @@ -521,15 +533,14 @@ static int msc_configure(struct msc *msc) | |||
| 521 | * @msc: MSC device to disable | 533 | * @msc: MSC device to disable |
| 522 | * | 534 | * |
| 523 | * If @msc is enabled, disable tracing on the switch and then disable MSC | 535 | * If @msc is enabled, disable tracing on the switch and then disable MSC |
| 524 | * storage. | 536 | * storage. Caller must hold msc::buf_mutex. |
| 525 | */ | 537 | */ |
| 526 | static void msc_disable(struct msc *msc) | 538 | static void msc_disable(struct msc *msc) |
| 527 | { | 539 | { |
| 528 | unsigned long count; | 540 | unsigned long count; |
| 529 | u32 reg; | 541 | u32 reg; |
| 530 | 542 | ||
| 531 | if (!msc->enabled) | 543 | lockdep_assert_held(&msc->buf_mutex); |
| 532 | return; | ||
| 533 | 544 | ||
| 534 | intel_th_trace_disable(msc->thdev); | 545 | intel_th_trace_disable(msc->thdev); |
| 535 | 546 | ||
| @@ -569,33 +580,35 @@ static void msc_disable(struct msc *msc) | |||
| 569 | static int intel_th_msc_activate(struct intel_th_device *thdev) | 580 | static int intel_th_msc_activate(struct intel_th_device *thdev) |
| 570 | { | 581 | { |
| 571 | struct msc *msc = dev_get_drvdata(&thdev->dev); | 582 | struct msc *msc = dev_get_drvdata(&thdev->dev); |
| 572 | int ret = 0; | 583 | int ret = -EBUSY; |
| 573 | 584 | ||
| 574 | if (!atomic_inc_unless_negative(&msc->user_count)) | 585 | if (!atomic_inc_unless_negative(&msc->user_count)) |
| 575 | return -ENODEV; | 586 | return -ENODEV; |
| 576 | 587 | ||
| 577 | mutex_lock(&msc->iter_mutex); | 588 | mutex_lock(&msc->buf_mutex); |
| 578 | if (!list_empty(&msc->iter_list)) | ||
| 579 | ret = -EBUSY; | ||
| 580 | mutex_unlock(&msc->iter_mutex); | ||
| 581 | 589 | ||
| 582 | if (ret) { | 590 | /* if there are readers, refuse */ |
| 583 | atomic_dec(&msc->user_count); | 591 | if (list_empty(&msc->iter_list)) |
| 584 | return ret; | 592 | ret = msc_configure(msc); |
| 585 | } | ||
| 586 | 593 | ||
| 587 | msc->enabled = 1; | 594 | mutex_unlock(&msc->buf_mutex); |
| 595 | |||
| 596 | if (ret) | ||
| 597 | atomic_dec(&msc->user_count); | ||
| 588 | 598 | ||
| 589 | return msc_configure(msc); | 599 | return ret; |
| 590 | } | 600 | } |
| 591 | 601 | ||
| 592 | static void intel_th_msc_deactivate(struct intel_th_device *thdev) | 602 | static void intel_th_msc_deactivate(struct intel_th_device *thdev) |
| 593 | { | 603 | { |
| 594 | struct msc *msc = dev_get_drvdata(&thdev->dev); | 604 | struct msc *msc = dev_get_drvdata(&thdev->dev); |
| 595 | 605 | ||
| 596 | msc_disable(msc); | 606 | mutex_lock(&msc->buf_mutex); |
| 597 | 607 | if (msc->enabled) { | |
| 598 | atomic_dec(&msc->user_count); | 608 | msc_disable(msc); |
| 609 | atomic_dec(&msc->user_count); | ||
| 610 | } | ||
| 611 | mutex_unlock(&msc->buf_mutex); | ||
| 599 | } | 612 | } |
| 600 | 613 | ||
| 601 | /** | 614 | /** |
| @@ -1035,8 +1048,8 @@ static int intel_th_msc_open(struct inode *inode, struct file *file) | |||
| 1035 | return -EPERM; | 1048 | return -EPERM; |
| 1036 | 1049 | ||
| 1037 | iter = msc_iter_install(msc); | 1050 | iter = msc_iter_install(msc); |
| 1038 | if (!iter) | 1051 | if (IS_ERR(iter)) |
| 1039 | return -ENOMEM; | 1052 | return PTR_ERR(iter); |
| 1040 | 1053 | ||
| 1041 | file->private_data = iter; | 1054 | file->private_data = iter; |
| 1042 | 1055 | ||
| @@ -1101,11 +1114,6 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, | |||
| 1101 | if (!atomic_inc_unless_negative(&msc->user_count)) | 1114 | if (!atomic_inc_unless_negative(&msc->user_count)) |
| 1102 | return 0; | 1115 | return 0; |
| 1103 | 1116 | ||
| 1104 | if (msc->enabled) { | ||
| 1105 | ret = -EBUSY; | ||
| 1106 | goto put_count; | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) | 1117 | if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) |
| 1110 | size = msc->single_sz; | 1118 | size = msc->single_sz; |
| 1111 | else | 1119 | else |
| @@ -1245,6 +1253,7 @@ static const struct file_operations intel_th_msc_fops = { | |||
| 1245 | .read = intel_th_msc_read, | 1253 | .read = intel_th_msc_read, |
| 1246 | .mmap = intel_th_msc_mmap, | 1254 | .mmap = intel_th_msc_mmap, |
| 1247 | .llseek = no_llseek, | 1255 | .llseek = no_llseek, |
| 1256 | .owner = THIS_MODULE, | ||
| 1248 | }; | 1257 | }; |
| 1249 | 1258 | ||
| 1250 | static int intel_th_msc_init(struct msc *msc) | 1259 | static int intel_th_msc_init(struct msc *msc) |
| @@ -1254,8 +1263,6 @@ static int intel_th_msc_init(struct msc *msc) | |||
| 1254 | msc->mode = MSC_MODE_MULTI; | 1263 | msc->mode = MSC_MODE_MULTI; |
| 1255 | mutex_init(&msc->buf_mutex); | 1264 | mutex_init(&msc->buf_mutex); |
| 1256 | INIT_LIST_HEAD(&msc->win_list); | 1265 | INIT_LIST_HEAD(&msc->win_list); |
| 1257 | |||
| 1258 | mutex_init(&msc->iter_mutex); | ||
| 1259 | INIT_LIST_HEAD(&msc->iter_list); | 1266 | INIT_LIST_HEAD(&msc->iter_list); |
| 1260 | 1267 | ||
| 1261 | msc->burst_len = | 1268 | msc->burst_len = |
| @@ -1393,6 +1400,11 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, | |||
| 1393 | do { | 1400 | do { |
| 1394 | end = memchr(p, ',', len); | 1401 | end = memchr(p, ',', len); |
| 1395 | s = kstrndup(p, end ? end - p : len, GFP_KERNEL); | 1402 | s = kstrndup(p, end ? end - p : len, GFP_KERNEL); |
| 1403 | if (!s) { | ||
| 1404 | ret = -ENOMEM; | ||
| 1405 | goto free_win; | ||
| 1406 | } | ||
| 1407 | |||
| 1396 | ret = kstrtoul(s, 10, &val); | 1408 | ret = kstrtoul(s, 10, &val); |
| 1397 | kfree(s); | 1409 | kfree(s); |
| 1398 | 1410 | ||
| @@ -1473,10 +1485,6 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) | |||
| 1473 | if (err) | 1485 | if (err) |
| 1474 | return err; | 1486 | return err; |
| 1475 | 1487 | ||
| 1476 | err = sysfs_create_group(&dev->kobj, &msc_output_group); | ||
| 1477 | if (err) | ||
| 1478 | return err; | ||
| 1479 | |||
| 1480 | dev_set_drvdata(dev, msc); | 1488 | dev_set_drvdata(dev, msc); |
| 1481 | 1489 | ||
| 1482 | return 0; | 1490 | return 0; |
| @@ -1484,7 +1492,18 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) | |||
| 1484 | 1492 | ||
| 1485 | static void intel_th_msc_remove(struct intel_th_device *thdev) | 1493 | static void intel_th_msc_remove(struct intel_th_device *thdev) |
| 1486 | { | 1494 | { |
| 1487 | sysfs_remove_group(&thdev->dev.kobj, &msc_output_group); | 1495 | struct msc *msc = dev_get_drvdata(&thdev->dev); |
| 1496 | int ret; | ||
| 1497 | |||
| 1498 | intel_th_msc_deactivate(thdev); | ||
| 1499 | |||
| 1500 | /* | ||
| 1501 | * Buffers should not be used at this point except if the | ||
| 1502 | * output character device is still open and the parent | ||
| 1503 | * device gets detached from its bus, which is a FIXME. | ||
| 1504 | */ | ||
| 1505 | ret = msc_buffer_free_unless_used(msc); | ||
| 1506 | WARN_ON_ONCE(ret); | ||
| 1488 | } | 1507 | } |
| 1489 | 1508 | ||
| 1490 | static struct intel_th_driver intel_th_msc_driver = { | 1509 | static struct intel_th_driver intel_th_msc_driver = { |
| @@ -1493,6 +1512,7 @@ static struct intel_th_driver intel_th_msc_driver = { | |||
| 1493 | .activate = intel_th_msc_activate, | 1512 | .activate = intel_th_msc_activate, |
| 1494 | .deactivate = intel_th_msc_deactivate, | 1513 | .deactivate = intel_th_msc_deactivate, |
| 1495 | .fops = &intel_th_msc_fops, | 1514 | .fops = &intel_th_msc_fops, |
| 1515 | .attr_group = &msc_output_group, | ||
| 1496 | .driver = { | 1516 | .driver = { |
| 1497 | .name = "msc", | 1517 | .name = "msc", |
| 1498 | .owner = THIS_MODULE, | 1518 | .owner = THIS_MODULE, |
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index bca7a2ac00d6..5e25c7eb31d3 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c | |||
| @@ -75,6 +75,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
| 75 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80), | 75 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80), |
| 76 | .driver_data = (kernel_ulong_t)0, | 76 | .driver_data = (kernel_ulong_t)0, |
| 77 | }, | 77 | }, |
| 78 | { | ||
| 79 | /* Broxton B-step */ | ||
| 80 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e), | ||
| 81 | .driver_data = (kernel_ulong_t)0, | ||
| 82 | }, | ||
| 78 | { 0 }, | 83 | { 0 }, |
| 79 | }; | 84 | }; |
| 80 | 85 | ||
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c index 57cbfdcc7ef0..35738b5bfccd 100644 --- a/drivers/hwtracing/intel_th/pti.c +++ b/drivers/hwtracing/intel_th/pti.c | |||
| @@ -200,7 +200,6 @@ static int intel_th_pti_probe(struct intel_th_device *thdev) | |||
| 200 | struct resource *res; | 200 | struct resource *res; |
| 201 | struct pti_device *pti; | 201 | struct pti_device *pti; |
| 202 | void __iomem *base; | 202 | void __iomem *base; |
| 203 | int ret; | ||
| 204 | 203 | ||
| 205 | res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); | 204 | res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); |
| 206 | if (!res) | 205 | if (!res) |
| @@ -219,10 +218,6 @@ static int intel_th_pti_probe(struct intel_th_device *thdev) | |||
| 219 | 218 | ||
| 220 | read_hw_config(pti); | 219 | read_hw_config(pti); |
| 221 | 220 | ||
| 222 | ret = sysfs_create_group(&dev->kobj, &pti_output_group); | ||
| 223 | if (ret) | ||
| 224 | return ret; | ||
| 225 | |||
| 226 | dev_set_drvdata(dev, pti); | 221 | dev_set_drvdata(dev, pti); |
| 227 | 222 | ||
| 228 | return 0; | 223 | return 0; |
| @@ -237,6 +232,7 @@ static struct intel_th_driver intel_th_pti_driver = { | |||
| 237 | .remove = intel_th_pti_remove, | 232 | .remove = intel_th_pti_remove, |
| 238 | .activate = intel_th_pti_activate, | 233 | .activate = intel_th_pti_activate, |
| 239 | .deactivate = intel_th_pti_deactivate, | 234 | .deactivate = intel_th_pti_deactivate, |
| 235 | .attr_group = &pti_output_group, | ||
| 240 | .driver = { | 236 | .driver = { |
| 241 | .name = "pti", | 237 | .name = "pti", |
| 242 | .owner = THIS_MODULE, | 238 | .owner = THIS_MODULE, |
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index de80d45d8df9..ff31108b066f 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c | |||
| @@ -67,9 +67,24 @@ static ssize_t channels_show(struct device *dev, | |||
| 67 | 67 | ||
| 68 | static DEVICE_ATTR_RO(channels); | 68 | static DEVICE_ATTR_RO(channels); |
| 69 | 69 | ||
| 70 | static ssize_t hw_override_show(struct device *dev, | ||
| 71 | struct device_attribute *attr, | ||
| 72 | char *buf) | ||
| 73 | { | ||
| 74 | struct stm_device *stm = to_stm_device(dev); | ||
| 75 | int ret; | ||
| 76 | |||
| 77 | ret = sprintf(buf, "%u\n", stm->data->hw_override); | ||
| 78 | |||
| 79 | return ret; | ||
| 80 | } | ||
| 81 | |||
| 82 | static DEVICE_ATTR_RO(hw_override); | ||
| 83 | |||
| 70 | static struct attribute *stm_attrs[] = { | 84 | static struct attribute *stm_attrs[] = { |
| 71 | &dev_attr_masters.attr, | 85 | &dev_attr_masters.attr, |
| 72 | &dev_attr_channels.attr, | 86 | &dev_attr_channels.attr, |
| 87 | &dev_attr_hw_override.attr, | ||
| 73 | NULL, | 88 | NULL, |
| 74 | }; | 89 | }; |
| 75 | 90 | ||
| @@ -546,8 +561,6 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg) | |||
| 546 | if (ret) | 561 | if (ret) |
| 547 | goto err_free; | 562 | goto err_free; |
| 548 | 563 | ||
| 549 | ret = 0; | ||
| 550 | |||
| 551 | if (stm->data->link) | 564 | if (stm->data->link) |
| 552 | ret = stm->data->link(stm->data, stmf->output.master, | 565 | ret = stm->data->link(stm->data, stmf->output.master, |
| 553 | stmf->output.channel); | 566 | stmf->output.channel); |
| @@ -668,18 +681,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
| 668 | stm->dev.parent = parent; | 681 | stm->dev.parent = parent; |
| 669 | stm->dev.release = stm_device_release; | 682 | stm->dev.release = stm_device_release; |
| 670 | 683 | ||
| 671 | err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name); | ||
| 672 | if (err) | ||
| 673 | goto err_device; | ||
| 674 | |||
| 675 | err = device_add(&stm->dev); | ||
| 676 | if (err) | ||
| 677 | goto err_device; | ||
| 678 | |||
| 679 | mutex_init(&stm->link_mutex); | 684 | mutex_init(&stm->link_mutex); |
| 680 | spin_lock_init(&stm->link_lock); | 685 | spin_lock_init(&stm->link_lock); |
| 681 | INIT_LIST_HEAD(&stm->link_list); | 686 | INIT_LIST_HEAD(&stm->link_list); |
| 682 | 687 | ||
| 688 | /* initialize the object before it is accessible via sysfs */ | ||
| 683 | spin_lock_init(&stm->mc_lock); | 689 | spin_lock_init(&stm->mc_lock); |
| 684 | mutex_init(&stm->policy_mutex); | 690 | mutex_init(&stm->policy_mutex); |
| 685 | stm->sw_nmasters = nmasters; | 691 | stm->sw_nmasters = nmasters; |
| @@ -687,9 +693,19 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, | |||
| 687 | stm->data = stm_data; | 693 | stm->data = stm_data; |
| 688 | stm_data->stm = stm; | 694 | stm_data->stm = stm; |
| 689 | 695 | ||
| 696 | err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name); | ||
| 697 | if (err) | ||
| 698 | goto err_device; | ||
| 699 | |||
| 700 | err = device_add(&stm->dev); | ||
| 701 | if (err) | ||
| 702 | goto err_device; | ||
| 703 | |||
| 690 | return 0; | 704 | return 0; |
| 691 | 705 | ||
| 692 | err_device: | 706 | err_device: |
| 707 | unregister_chrdev(stm->major, stm_data->name); | ||
| 708 | |||
| 693 | /* matches device_initialize() above */ | 709 | /* matches device_initialize() above */ |
| 694 | put_device(&stm->dev); | 710 | put_device(&stm->dev); |
| 695 | err_free: | 711 | err_free: |
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c index 310adf57e7a1..a86612d989f9 100644 --- a/drivers/hwtracing/stm/dummy_stm.c +++ b/drivers/hwtracing/stm/dummy_stm.c | |||
| @@ -46,9 +46,7 @@ static struct stm_data dummy_stm[DUMMY_STM_MAX]; | |||
| 46 | 46 | ||
| 47 | static int nr_dummies = 4; | 47 | static int nr_dummies = 4; |
| 48 | 48 | ||
| 49 | module_param(nr_dummies, int, 0600); | 49 | module_param(nr_dummies, int, 0400); |
| 50 | |||
| 51 | static unsigned int dummy_stm_nr; | ||
| 52 | 50 | ||
| 53 | static unsigned int fail_mode; | 51 | static unsigned int fail_mode; |
| 54 | 52 | ||
| @@ -65,12 +63,12 @@ static int dummy_stm_link(struct stm_data *data, unsigned int master, | |||
| 65 | 63 | ||
| 66 | static int dummy_stm_init(void) | 64 | static int dummy_stm_init(void) |
| 67 | { | 65 | { |
| 68 | int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies); | 66 | int i, ret = -ENOMEM; |
| 69 | 67 | ||
| 70 | if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX) | 68 | if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX) |
| 71 | return -EINVAL; | 69 | return -EINVAL; |
| 72 | 70 | ||
| 73 | for (i = 0; i < __nr_dummies; i++) { | 71 | for (i = 0; i < nr_dummies; i++) { |
| 74 | dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i); | 72 | dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i); |
| 75 | if (!dummy_stm[i].name) | 73 | if (!dummy_stm[i].name) |
| 76 | goto fail_unregister; | 74 | goto fail_unregister; |
| @@ -86,8 +84,6 @@ static int dummy_stm_init(void) | |||
| 86 | goto fail_free; | 84 | goto fail_free; |
| 87 | } | 85 | } |
| 88 | 86 | ||
| 89 | dummy_stm_nr = __nr_dummies; | ||
| 90 | |||
| 91 | return 0; | 87 | return 0; |
| 92 | 88 | ||
| 93 | fail_unregister: | 89 | fail_unregister: |
| @@ -105,7 +101,7 @@ static void dummy_stm_exit(void) | |||
| 105 | { | 101 | { |
| 106 | int i; | 102 | int i; |
| 107 | 103 | ||
| 108 | for (i = 0; i < dummy_stm_nr; i++) { | 104 | for (i = 0; i < nr_dummies; i++) { |
| 109 | stm_unregister_device(&dummy_stm[i]); | 105 | stm_unregister_device(&dummy_stm[i]); |
| 110 | kfree(dummy_stm[i].name); | 106 | kfree(dummy_stm[i].name); |
| 111 | } | 107 | } |
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c index 0133571b506f..3da7b673aab2 100644 --- a/drivers/hwtracing/stm/heartbeat.c +++ b/drivers/hwtracing/stm/heartbeat.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | static int nr_devs = 4; | 26 | static int nr_devs = 4; |
| 27 | static int interval_ms = 10; | 27 | static int interval_ms = 10; |
| 28 | 28 | ||
| 29 | module_param(nr_devs, int, 0600); | 29 | module_param(nr_devs, int, 0400); |
| 30 | module_param(interval_ms, int, 0600); | 30 | module_param(interval_ms, int, 0600); |
| 31 | 31 | ||
| 32 | static struct stm_heartbeat { | 32 | static struct stm_heartbeat { |
| @@ -35,8 +35,6 @@ static struct stm_heartbeat { | |||
| 35 | unsigned int active; | 35 | unsigned int active; |
| 36 | } stm_heartbeat[STM_HEARTBEAT_MAX]; | 36 | } stm_heartbeat[STM_HEARTBEAT_MAX]; |
| 37 | 37 | ||
| 38 | static unsigned int nr_instances; | ||
| 39 | |||
| 40 | static const char str[] = "heartbeat stm source driver is here to serve you"; | 38 | static const char str[] = "heartbeat stm source driver is here to serve you"; |
| 41 | 39 | ||
| 42 | static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr) | 40 | static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr) |
| @@ -74,12 +72,12 @@ static void stm_heartbeat_unlink(struct stm_source_data *data) | |||
| 74 | 72 | ||
| 75 | static int stm_heartbeat_init(void) | 73 | static int stm_heartbeat_init(void) |
| 76 | { | 74 | { |
| 77 | int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs); | 75 | int i, ret = -ENOMEM; |
| 78 | 76 | ||
| 79 | if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX) | 77 | if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX) |
| 80 | return -EINVAL; | 78 | return -EINVAL; |
| 81 | 79 | ||
| 82 | for (i = 0; i < __nr_instances; i++) { | 80 | for (i = 0; i < nr_devs; i++) { |
| 83 | stm_heartbeat[i].data.name = | 81 | stm_heartbeat[i].data.name = |
| 84 | kasprintf(GFP_KERNEL, "heartbeat.%d", i); | 82 | kasprintf(GFP_KERNEL, "heartbeat.%d", i); |
| 85 | if (!stm_heartbeat[i].data.name) | 83 | if (!stm_heartbeat[i].data.name) |
| @@ -98,8 +96,6 @@ static int stm_heartbeat_init(void) | |||
| 98 | goto fail_free; | 96 | goto fail_free; |
| 99 | } | 97 | } |
| 100 | 98 | ||
| 101 | nr_instances = __nr_instances; | ||
| 102 | |||
| 103 | return 0; | 99 | return 0; |
| 104 | 100 | ||
| 105 | fail_unregister: | 101 | fail_unregister: |
| @@ -116,7 +112,7 @@ static void stm_heartbeat_exit(void) | |||
| 116 | { | 112 | { |
| 117 | int i; | 113 | int i; |
| 118 | 114 | ||
| 119 | for (i = 0; i < nr_instances; i++) { | 115 | for (i = 0; i < nr_devs; i++) { |
| 120 | stm_source_unregister_device(&stm_heartbeat[i].data); | 116 | stm_source_unregister_device(&stm_heartbeat[i].data); |
| 121 | kfree(stm_heartbeat[i].data.name); | 117 | kfree(stm_heartbeat[i].data.name); |
| 122 | } | 118 | } |
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c index 1db189657b2b..6c0ae2996326 100644 --- a/drivers/hwtracing/stm/policy.c +++ b/drivers/hwtracing/stm/policy.c | |||
| @@ -107,8 +107,7 @@ stp_policy_node_masters_store(struct config_item *item, const char *page, | |||
| 107 | goto unlock; | 107 | goto unlock; |
| 108 | 108 | ||
| 109 | /* must be within [sw_start..sw_end], which is an inclusive range */ | 109 | /* must be within [sw_start..sw_end], which is an inclusive range */ |
| 110 | if (first > INT_MAX || last > INT_MAX || first > last || | 110 | if (first > last || first < stm->data->sw_start || |
| 111 | first < stm->data->sw_start || | ||
| 112 | last > stm->data->sw_end) { | 111 | last > stm->data->sw_end) { |
| 113 | ret = -ERANGE; | 112 | ret = -ERANGE; |
| 114 | goto unlock; | 113 | goto unlock; |
| @@ -342,7 +341,7 @@ stp_policies_make(struct config_group *group, const char *name) | |||
| 342 | return ERR_PTR(-EINVAL); | 341 | return ERR_PTR(-EINVAL); |
| 343 | } | 342 | } |
| 344 | 343 | ||
| 345 | *p++ = '\0'; | 344 | *p = '\0'; |
| 346 | 345 | ||
| 347 | stm = stm_find_device(devname); | 346 | stm = stm_find_device(devname); |
| 348 | kfree(devname); | 347 | kfree(devname); |
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index a4be451074e5..b73c6e7d28e4 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c | |||
| @@ -83,13 +83,67 @@ static int mcb_remove(struct device *dev) | |||
| 83 | 83 | ||
| 84 | static void mcb_shutdown(struct device *dev) | 84 | static void mcb_shutdown(struct device *dev) |
| 85 | { | 85 | { |
| 86 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); | ||
| 86 | struct mcb_device *mdev = to_mcb_device(dev); | 87 | struct mcb_device *mdev = to_mcb_device(dev); |
| 87 | struct mcb_driver *mdrv = mdev->driver; | ||
| 88 | 88 | ||
| 89 | if (mdrv && mdrv->shutdown) | 89 | if (mdrv && mdrv->shutdown) |
| 90 | mdrv->shutdown(mdev); | 90 | mdrv->shutdown(mdev); |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static ssize_t revision_show(struct device *dev, struct device_attribute *attr, | ||
| 94 | char *buf) | ||
| 95 | { | ||
| 96 | struct mcb_bus *bus = to_mcb_bus(dev); | ||
| 97 | |||
| 98 | return scnprintf(buf, PAGE_SIZE, "%d\n", bus->revision); | ||
| 99 | } | ||
| 100 | static DEVICE_ATTR_RO(revision); | ||
| 101 | |||
| 102 | static ssize_t model_show(struct device *dev, struct device_attribute *attr, | ||
| 103 | char *buf) | ||
| 104 | { | ||
| 105 | struct mcb_bus *bus = to_mcb_bus(dev); | ||
| 106 | |||
| 107 | return scnprintf(buf, PAGE_SIZE, "%c\n", bus->model); | ||
| 108 | } | ||
| 109 | static DEVICE_ATTR_RO(model); | ||
| 110 | |||
| 111 | static ssize_t minor_show(struct device *dev, struct device_attribute *attr, | ||
| 112 | char *buf) | ||
| 113 | { | ||
| 114 | struct mcb_bus *bus = to_mcb_bus(dev); | ||
| 115 | |||
| 116 | return scnprintf(buf, PAGE_SIZE, "%d\n", bus->minor); | ||
| 117 | } | ||
| 118 | static DEVICE_ATTR_RO(minor); | ||
| 119 | |||
| 120 | static ssize_t name_show(struct device *dev, struct device_attribute *attr, | ||
| 121 | char *buf) | ||
| 122 | { | ||
| 123 | struct mcb_bus *bus = to_mcb_bus(dev); | ||
| 124 | |||
| 125 | return scnprintf(buf, PAGE_SIZE, "%s\n", bus->name); | ||
| 126 | } | ||
| 127 | static DEVICE_ATTR_RO(name); | ||
| 128 | |||
| 129 | static struct attribute *mcb_bus_attrs[] = { | ||
| 130 | &dev_attr_revision.attr, | ||
| 131 | &dev_attr_model.attr, | ||
| 132 | &dev_attr_minor.attr, | ||
| 133 | &dev_attr_name.attr, | ||
| 134 | NULL, | ||
| 135 | }; | ||
| 136 | |||
| 137 | static const struct attribute_group mcb_carrier_group = { | ||
| 138 | .attrs = mcb_bus_attrs, | ||
| 139 | }; | ||
| 140 | |||
| 141 | static const struct attribute_group *mcb_carrier_groups[] = { | ||
| 142 | &mcb_carrier_group, | ||
| 143 | NULL, | ||
| 144 | }; | ||
| 145 | |||
| 146 | |||
| 93 | static struct bus_type mcb_bus_type = { | 147 | static struct bus_type mcb_bus_type = { |
| 94 | .name = "mcb", | 148 | .name = "mcb", |
| 95 | .match = mcb_match, | 149 | .match = mcb_match, |
| @@ -99,6 +153,11 @@ static struct bus_type mcb_bus_type = { | |||
| 99 | .shutdown = mcb_shutdown, | 153 | .shutdown = mcb_shutdown, |
| 100 | }; | 154 | }; |
| 101 | 155 | ||
| 156 | static struct device_type mcb_carrier_device_type = { | ||
| 157 | .name = "mcb-carrier", | ||
| 158 | .groups = mcb_carrier_groups, | ||
| 159 | }; | ||
| 160 | |||
| 102 | /** | 161 | /** |
| 103 | * __mcb_register_driver() - Register a @mcb_driver at the system | 162 | * __mcb_register_driver() - Register a @mcb_driver at the system |
| 104 | * @drv: The @mcb_driver | 163 | * @drv: The @mcb_driver |
| @@ -155,6 +214,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev) | |||
| 155 | int device_id; | 214 | int device_id; |
| 156 | 215 | ||
| 157 | device_initialize(&dev->dev); | 216 | device_initialize(&dev->dev); |
| 217 | mcb_bus_get(bus); | ||
| 158 | dev->dev.bus = &mcb_bus_type; | 218 | dev->dev.bus = &mcb_bus_type; |
| 159 | dev->dev.parent = bus->dev.parent; | 219 | dev->dev.parent = bus->dev.parent; |
| 160 | dev->dev.release = mcb_release_dev; | 220 | dev->dev.release = mcb_release_dev; |
| @@ -178,6 +238,15 @@ out: | |||
| 178 | } | 238 | } |
| 179 | EXPORT_SYMBOL_GPL(mcb_device_register); | 239 | EXPORT_SYMBOL_GPL(mcb_device_register); |
| 180 | 240 | ||
| 241 | static void mcb_free_bus(struct device *dev) | ||
| 242 | { | ||
| 243 | struct mcb_bus *bus = to_mcb_bus(dev); | ||
| 244 | |||
| 245 | put_device(bus->carrier); | ||
| 246 | ida_simple_remove(&mcb_ida, bus->bus_nr); | ||
| 247 | kfree(bus); | ||
| 248 | } | ||
| 249 | |||
| 181 | /** | 250 | /** |
| 182 | * mcb_alloc_bus() - Allocate a new @mcb_bus | 251 | * mcb_alloc_bus() - Allocate a new @mcb_bus |
| 183 | * | 252 | * |
| @@ -187,6 +256,7 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier) | |||
| 187 | { | 256 | { |
| 188 | struct mcb_bus *bus; | 257 | struct mcb_bus *bus; |
| 189 | int bus_nr; | 258 | int bus_nr; |
| 259 | int rc; | ||
| 190 | 260 | ||
| 191 | bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL); | 261 | bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL); |
| 192 | if (!bus) | 262 | if (!bus) |
| @@ -194,14 +264,29 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier) | |||
| 194 | 264 | ||
| 195 | bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); | 265 | bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); |
| 196 | if (bus_nr < 0) { | 266 | if (bus_nr < 0) { |
| 197 | kfree(bus); | 267 | rc = bus_nr; |
| 198 | return ERR_PTR(bus_nr); | 268 | goto err_free; |
| 199 | } | 269 | } |
| 200 | 270 | ||
| 201 | INIT_LIST_HEAD(&bus->children); | ||
| 202 | bus->bus_nr = bus_nr; | 271 | bus->bus_nr = bus_nr; |
| 203 | bus->carrier = carrier; | 272 | bus->carrier = get_device(carrier); |
| 273 | |||
| 274 | device_initialize(&bus->dev); | ||
| 275 | bus->dev.parent = carrier; | ||
| 276 | bus->dev.bus = &mcb_bus_type; | ||
| 277 | bus->dev.type = &mcb_carrier_device_type; | ||
| 278 | bus->dev.release = &mcb_free_bus; | ||
| 279 | |||
| 280 | dev_set_name(&bus->dev, "mcb:%d", bus_nr); | ||
| 281 | rc = device_add(&bus->dev); | ||
| 282 | if (rc) | ||
| 283 | goto err_free; | ||
| 284 | |||
| 204 | return bus; | 285 | return bus; |
| 286 | err_free: | ||
| 287 | put_device(carrier); | ||
| 288 | kfree(bus); | ||
| 289 | return ERR_PTR(rc); | ||
| 205 | } | 290 | } |
| 206 | EXPORT_SYMBOL_GPL(mcb_alloc_bus); | 291 | EXPORT_SYMBOL_GPL(mcb_alloc_bus); |
| 207 | 292 | ||
| @@ -224,10 +309,6 @@ static void mcb_devices_unregister(struct mcb_bus *bus) | |||
| 224 | void mcb_release_bus(struct mcb_bus *bus) | 309 | void mcb_release_bus(struct mcb_bus *bus) |
| 225 | { | 310 | { |
| 226 | mcb_devices_unregister(bus); | 311 | mcb_devices_unregister(bus); |
| 227 | |||
| 228 | ida_simple_remove(&mcb_ida, bus->bus_nr); | ||
| 229 | |||
| 230 | kfree(bus); | ||
| 231 | } | 312 | } |
| 232 | EXPORT_SYMBOL_GPL(mcb_release_bus); | 313 | EXPORT_SYMBOL_GPL(mcb_release_bus); |
| 233 | 314 | ||
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h index fb7493dcfb79..5254e0285725 100644 --- a/drivers/mcb/mcb-internal.h +++ b/drivers/mcb/mcb-internal.h | |||
| @@ -5,7 +5,6 @@ | |||
| 5 | 5 | ||
| 6 | #define PCI_VENDOR_ID_MEN 0x1a88 | 6 | #define PCI_VENDOR_ID_MEN 0x1a88 |
| 7 | #define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45 | 7 | #define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45 |
| 8 | #define CHAMELEON_FILENAME_LEN 12 | ||
| 9 | #define CHAMELEONV2_MAGIC 0xabce | 8 | #define CHAMELEONV2_MAGIC 0xabce |
| 10 | #define CHAM_HEADER_SIZE 0x200 | 9 | #define CHAM_HEADER_SIZE 0x200 |
| 11 | 10 | ||
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 004926955263..dbecbed0d258 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c | |||
| @@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, | |||
| 57 | mdev->id = GDD_DEV(reg1); | 57 | mdev->id = GDD_DEV(reg1); |
| 58 | mdev->rev = GDD_REV(reg1); | 58 | mdev->rev = GDD_REV(reg1); |
| 59 | mdev->var = GDD_VAR(reg1); | 59 | mdev->var = GDD_VAR(reg1); |
| 60 | mdev->bar = GDD_BAR(reg1); | 60 | mdev->bar = GDD_BAR(reg2); |
| 61 | mdev->group = GDD_GRP(reg2); | 61 | mdev->group = GDD_GRP(reg2); |
| 62 | mdev->inst = GDD_INS(reg2); | 62 | mdev->inst = GDD_INS(reg2); |
| 63 | 63 | ||
| @@ -113,16 +113,11 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase, | |||
| 113 | } | 113 | } |
| 114 | p += hsize; | 114 | p += hsize; |
| 115 | 115 | ||
| 116 | pr_debug("header->revision = %d\n", header->revision); | 116 | bus->revision = header->revision; |
| 117 | pr_debug("header->model = 0x%x ('%c')\n", header->model, | 117 | bus->model = header->model; |
| 118 | header->model); | 118 | bus->minor = header->minor; |
| 119 | pr_debug("header->minor = %d\n", header->minor); | 119 | snprintf(bus->name, CHAMELEON_FILENAME_LEN + 1, "%s", |
| 120 | pr_debug("header->bus_type = 0x%x\n", header->bus_type); | 120 | header->filename); |
| 121 | |||
| 122 | |||
| 123 | pr_debug("header->magic = 0x%x\n", header->magic); | ||
| 124 | pr_debug("header->filename = \"%.*s\"\n", CHAMELEON_FILENAME_LEN, | ||
| 125 | header->filename); | ||
| 126 | 121 | ||
| 127 | for_each_chameleon_cell(dtype, p) { | 122 | for_each_chameleon_cell(dtype, p) { |
| 128 | switch (dtype) { | 123 | switch (dtype) { |
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index 67d5e7d08df6..b15a0349cd97 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c | |||
| @@ -35,7 +35,6 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 35 | struct resource *res; | 35 | struct resource *res; |
| 36 | struct priv *priv; | 36 | struct priv *priv; |
| 37 | int ret; | 37 | int ret; |
| 38 | int num_cells; | ||
| 39 | unsigned long flags; | 38 | unsigned long flags; |
| 40 | 39 | ||
| 41 | priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL); | 40 | priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL); |
| @@ -55,19 +54,20 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 55 | goto out_disable; | 54 | goto out_disable; |
| 56 | } | 55 | } |
| 57 | 56 | ||
| 58 | res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE, | 57 | res = devm_request_mem_region(&pdev->dev, priv->mapbase, |
| 59 | KBUILD_MODNAME); | 58 | CHAM_HEADER_SIZE, |
| 59 | KBUILD_MODNAME); | ||
| 60 | if (!res) { | 60 | if (!res) { |
| 61 | dev_err(&pdev->dev, "Failed to request PCI memory\n"); | 61 | dev_err(&pdev->dev, "Failed to request PCI memory\n"); |
| 62 | ret = -EBUSY; | 62 | ret = -EBUSY; |
| 63 | goto out_disable; | 63 | goto out_disable; |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE); | 66 | priv->base = devm_ioremap(&pdev->dev, priv->mapbase, CHAM_HEADER_SIZE); |
| 67 | if (!priv->base) { | 67 | if (!priv->base) { |
| 68 | dev_err(&pdev->dev, "Cannot ioremap\n"); | 68 | dev_err(&pdev->dev, "Cannot ioremap\n"); |
| 69 | ret = -ENOMEM; | 69 | ret = -ENOMEM; |
| 70 | goto out_release; | 70 | goto out_disable; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | flags = pci_resource_flags(pdev, 0); | 73 | flags = pci_resource_flags(pdev, 0); |
| @@ -75,7 +75,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 75 | ret = -ENOTSUPP; | 75 | ret = -ENOTSUPP; |
| 76 | dev_err(&pdev->dev, | 76 | dev_err(&pdev->dev, |
| 77 | "IO mapped PCI devices are not supported\n"); | 77 | "IO mapped PCI devices are not supported\n"); |
| 78 | goto out_iounmap; | 78 | goto out_disable; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | pci_set_drvdata(pdev, priv); | 81 | pci_set_drvdata(pdev, priv); |
| @@ -83,7 +83,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 83 | priv->bus = mcb_alloc_bus(&pdev->dev); | 83 | priv->bus = mcb_alloc_bus(&pdev->dev); |
| 84 | if (IS_ERR(priv->bus)) { | 84 | if (IS_ERR(priv->bus)) { |
| 85 | ret = PTR_ERR(priv->bus); | 85 | ret = PTR_ERR(priv->bus); |
| 86 | goto out_iounmap; | 86 | goto out_disable; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | priv->bus->get_irq = mcb_pci_get_irq; | 89 | priv->bus->get_irq = mcb_pci_get_irq; |
| @@ -91,9 +91,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 91 | ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); | 91 | ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); |
| 92 | if (ret < 0) | 92 | if (ret < 0) |
| 93 | goto out_mcb_bus; | 93 | goto out_mcb_bus; |
| 94 | num_cells = ret; | ||
| 95 | 94 | ||
| 96 | dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); | 95 | dev_dbg(&pdev->dev, "Found %d cells\n", ret); |
| 97 | 96 | ||
| 98 | mcb_bus_add_devices(priv->bus); | 97 | mcb_bus_add_devices(priv->bus); |
| 99 | 98 | ||
| @@ -101,10 +100,6 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 101 | 100 | ||
| 102 | out_mcb_bus: | 101 | out_mcb_bus: |
| 103 | mcb_release_bus(priv->bus); | 102 | mcb_release_bus(priv->bus); |
| 104 | out_iounmap: | ||
| 105 | iounmap(priv->base); | ||
| 106 | out_release: | ||
| 107 | pci_release_region(pdev, 0); | ||
| 108 | out_disable: | 103 | out_disable: |
| 109 | pci_disable_device(pdev); | 104 | pci_disable_device(pdev); |
| 110 | return ret; | 105 | return ret; |
| @@ -116,8 +111,6 @@ static void mcb_pci_remove(struct pci_dev *pdev) | |||
| 116 | 111 | ||
| 117 | mcb_release_bus(priv->bus); | 112 | mcb_release_bus(priv->bus); |
| 118 | 113 | ||
| 119 | iounmap(priv->base); | ||
| 120 | release_region(priv->mapbase, CHAM_HEADER_SIZE); | ||
| 121 | pci_disable_device(pdev); | 114 | pci_disable_device(pdev); |
| 122 | } | 115 | } |
| 123 | 116 | ||
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c index 60074351f17e..9daf94bb8f27 100644 --- a/drivers/memory/of_memory.c +++ b/drivers/memory/of_memory.c | |||
| @@ -109,7 +109,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr, | |||
| 109 | struct lpddr2_timings *timings = NULL; | 109 | struct lpddr2_timings *timings = NULL; |
| 110 | u32 arr_sz = 0, i = 0; | 110 | u32 arr_sz = 0, i = 0; |
| 111 | struct device_node *np_tim; | 111 | struct device_node *np_tim; |
| 112 | char *tim_compat; | 112 | char *tim_compat = NULL; |
| 113 | 113 | ||
| 114 | switch (device_type) { | 114 | switch (device_type) { |
| 115 | case DDR_TYPE_LPDDR2_S2: | 115 | case DDR_TYPE_LPDDR2_S2: |
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index cfc493c2e30a..c4e41c26649e 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig | |||
| @@ -3,7 +3,6 @@ menu "EEPROM support" | |||
| 3 | config EEPROM_AT24 | 3 | config EEPROM_AT24 |
| 4 | tristate "I2C EEPROMs / RAMs / ROMs from most vendors" | 4 | tristate "I2C EEPROMs / RAMs / ROMs from most vendors" |
| 5 | depends on I2C && SYSFS | 5 | depends on I2C && SYSFS |
| 6 | select REGMAP | ||
| 7 | select NVMEM | 6 | select NVMEM |
| 8 | help | 7 | help |
| 9 | Enable this driver to get read/write support to most I2C EEPROMs | 8 | Enable this driver to get read/write support to most I2C EEPROMs |
| @@ -32,7 +31,6 @@ config EEPROM_AT24 | |||
| 32 | config EEPROM_AT25 | 31 | config EEPROM_AT25 |
| 33 | tristate "SPI EEPROMs from most vendors" | 32 | tristate "SPI EEPROMs from most vendors" |
| 34 | depends on SPI && SYSFS | 33 | depends on SPI && SYSFS |
| 35 | select REGMAP | ||
| 36 | select NVMEM | 34 | select NVMEM |
| 37 | help | 35 | help |
| 38 | Enable this driver to get read/write support to most SPI EEPROMs, | 36 | Enable this driver to get read/write support to most SPI EEPROMs, |
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 6cc17b7779a5..9ceb63b62be5 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/acpi.h> | 23 | #include <linux/acpi.h> |
| 24 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
| 25 | #include <linux/nvmem-provider.h> | 25 | #include <linux/nvmem-provider.h> |
| 26 | #include <linux/regmap.h> | ||
| 27 | #include <linux/platform_data/at24.h> | 26 | #include <linux/platform_data/at24.h> |
| 28 | 27 | ||
| 29 | /* | 28 | /* |
| @@ -69,7 +68,6 @@ struct at24_data { | |||
| 69 | unsigned write_max; | 68 | unsigned write_max; |
| 70 | unsigned num_addresses; | 69 | unsigned num_addresses; |
| 71 | 70 | ||
| 72 | struct regmap_config regmap_config; | ||
| 73 | struct nvmem_config nvmem_config; | 71 | struct nvmem_config nvmem_config; |
| 74 | struct nvmem_device *nvmem; | 72 | struct nvmem_device *nvmem; |
| 75 | 73 | ||
| @@ -251,10 +249,10 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf, | |||
| 251 | return -ETIMEDOUT; | 249 | return -ETIMEDOUT; |
| 252 | } | 250 | } |
| 253 | 251 | ||
| 254 | static ssize_t at24_read(struct at24_data *at24, | 252 | static int at24_read(void *priv, unsigned int off, void *val, size_t count) |
| 255 | char *buf, loff_t off, size_t count) | ||
| 256 | { | 253 | { |
| 257 | ssize_t retval = 0; | 254 | struct at24_data *at24 = priv; |
| 255 | char *buf = val; | ||
| 258 | 256 | ||
| 259 | if (unlikely(!count)) | 257 | if (unlikely(!count)) |
| 260 | return count; | 258 | return count; |
| @@ -266,23 +264,21 @@ static ssize_t at24_read(struct at24_data *at24, | |||
| 266 | mutex_lock(&at24->lock); | 264 | mutex_lock(&at24->lock); |
| 267 | 265 | ||
| 268 | while (count) { | 266 | while (count) { |
| 269 | ssize_t status; | 267 | int status; |
| 270 | 268 | ||
| 271 | status = at24_eeprom_read(at24, buf, off, count); | 269 | status = at24_eeprom_read(at24, buf, off, count); |
| 272 | if (status <= 0) { | 270 | if (status < 0) { |
| 273 | if (retval == 0) | 271 | mutex_unlock(&at24->lock); |
| 274 | retval = status; | 272 | return status; |
| 275 | break; | ||
| 276 | } | 273 | } |
| 277 | buf += status; | 274 | buf += status; |
| 278 | off += status; | 275 | off += status; |
| 279 | count -= status; | 276 | count -= status; |
| 280 | retval += status; | ||
| 281 | } | 277 | } |
| 282 | 278 | ||
| 283 | mutex_unlock(&at24->lock); | 279 | mutex_unlock(&at24->lock); |
| 284 | 280 | ||
| 285 | return retval; | 281 | return 0; |
| 286 | } | 282 | } |
| 287 | 283 | ||
| 288 | /* | 284 | /* |
| @@ -370,13 +366,13 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf, | |||
| 370 | return -ETIMEDOUT; | 366 | return -ETIMEDOUT; |
| 371 | } | 367 | } |
| 372 | 368 | ||
| 373 | static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off, | 369 | static int at24_write(void *priv, unsigned int off, void *val, size_t count) |
| 374 | size_t count) | ||
| 375 | { | 370 | { |
| 376 | ssize_t retval = 0; | 371 | struct at24_data *at24 = priv; |
| 372 | char *buf = val; | ||
| 377 | 373 | ||
| 378 | if (unlikely(!count)) | 374 | if (unlikely(!count)) |
| 379 | return count; | 375 | return -EINVAL; |
| 380 | 376 | ||
| 381 | /* | 377 | /* |
| 382 | * Write data to chip, protecting against concurrent updates | 378 | * Write data to chip, protecting against concurrent updates |
| @@ -385,70 +381,23 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off, | |||
| 385 | mutex_lock(&at24->lock); | 381 | mutex_lock(&at24->lock); |
| 386 | 382 | ||
| 387 | while (count) { | 383 | while (count) { |
| 388 | ssize_t status; | 384 | int status; |
| 389 | 385 | ||
| 390 | status = at24_eeprom_write(at24, buf, off, count); | 386 | status = at24_eeprom_write(at24, buf, off, count); |
| 391 | if (status <= 0) { | 387 | if (status < 0) { |
| 392 | if (retval == 0) | 388 | mutex_unlock(&at24->lock); |
| 393 | retval = status; | 389 | return status; |
| 394 | break; | ||
| 395 | } | 390 | } |
| 396 | buf += status; | 391 | buf += status; |
| 397 | off += status; | 392 | off += status; |
| 398 | count -= status; | 393 | count -= status; |
| 399 | retval += status; | ||
| 400 | } | 394 | } |
| 401 | 395 | ||
| 402 | mutex_unlock(&at24->lock); | 396 | mutex_unlock(&at24->lock); |
| 403 | 397 | ||
| 404 | return retval; | ||
| 405 | } | ||
| 406 | |||
| 407 | /*-------------------------------------------------------------------------*/ | ||
| 408 | |||
| 409 | /* | ||
| 410 | * Provide a regmap interface, which is registered with the NVMEM | ||
| 411 | * framework | ||
| 412 | */ | ||
| 413 | static int at24_regmap_read(void *context, const void *reg, size_t reg_size, | ||
| 414 | void *val, size_t val_size) | ||
| 415 | { | ||
| 416 | struct at24_data *at24 = context; | ||
| 417 | off_t offset = *(u32 *)reg; | ||
| 418 | int err; | ||
| 419 | |||
| 420 | err = at24_read(at24, val, offset, val_size); | ||
| 421 | if (err) | ||
| 422 | return err; | ||
| 423 | return 0; | ||
| 424 | } | ||
| 425 | |||
| 426 | static int at24_regmap_write(void *context, const void *data, size_t count) | ||
| 427 | { | ||
| 428 | struct at24_data *at24 = context; | ||
| 429 | const char *buf; | ||
| 430 | u32 offset; | ||
| 431 | size_t len; | ||
| 432 | int err; | ||
| 433 | |||
| 434 | memcpy(&offset, data, sizeof(offset)); | ||
| 435 | buf = (const char *)data + sizeof(offset); | ||
| 436 | len = count - sizeof(offset); | ||
| 437 | |||
| 438 | err = at24_write(at24, buf, offset, len); | ||
| 439 | if (err) | ||
| 440 | return err; | ||
| 441 | return 0; | 398 | return 0; |
| 442 | } | 399 | } |
| 443 | 400 | ||
| 444 | static const struct regmap_bus at24_regmap_bus = { | ||
| 445 | .read = at24_regmap_read, | ||
| 446 | .write = at24_regmap_write, | ||
| 447 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 448 | }; | ||
| 449 | |||
| 450 | /*-------------------------------------------------------------------------*/ | ||
| 451 | |||
| 452 | #ifdef CONFIG_OF | 401 | #ifdef CONFIG_OF |
| 453 | static void at24_get_ofdata(struct i2c_client *client, | 402 | static void at24_get_ofdata(struct i2c_client *client, |
| 454 | struct at24_platform_data *chip) | 403 | struct at24_platform_data *chip) |
| @@ -480,7 +429,6 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 480 | struct at24_data *at24; | 429 | struct at24_data *at24; |
| 481 | int err; | 430 | int err; |
| 482 | unsigned i, num_addresses; | 431 | unsigned i, num_addresses; |
| 483 | struct regmap *regmap; | ||
| 484 | 432 | ||
| 485 | if (client->dev.platform_data) { | 433 | if (client->dev.platform_data) { |
| 486 | chip = *(struct at24_platform_data *)client->dev.platform_data; | 434 | chip = *(struct at24_platform_data *)client->dev.platform_data; |
| @@ -607,19 +555,6 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 607 | } | 555 | } |
| 608 | } | 556 | } |
| 609 | 557 | ||
| 610 | at24->regmap_config.reg_bits = 32; | ||
| 611 | at24->regmap_config.val_bits = 8; | ||
| 612 | at24->regmap_config.reg_stride = 1; | ||
| 613 | at24->regmap_config.max_register = chip.byte_len - 1; | ||
| 614 | |||
| 615 | regmap = devm_regmap_init(&client->dev, &at24_regmap_bus, at24, | ||
| 616 | &at24->regmap_config); | ||
| 617 | if (IS_ERR(regmap)) { | ||
| 618 | dev_err(&client->dev, "regmap init failed\n"); | ||
| 619 | err = PTR_ERR(regmap); | ||
| 620 | goto err_clients; | ||
| 621 | } | ||
| 622 | |||
| 623 | at24->nvmem_config.name = dev_name(&client->dev); | 558 | at24->nvmem_config.name = dev_name(&client->dev); |
| 624 | at24->nvmem_config.dev = &client->dev; | 559 | at24->nvmem_config.dev = &client->dev; |
| 625 | at24->nvmem_config.read_only = !writable; | 560 | at24->nvmem_config.read_only = !writable; |
| @@ -627,6 +562,12 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 627 | at24->nvmem_config.owner = THIS_MODULE; | 562 | at24->nvmem_config.owner = THIS_MODULE; |
| 628 | at24->nvmem_config.compat = true; | 563 | at24->nvmem_config.compat = true; |
| 629 | at24->nvmem_config.base_dev = &client->dev; | 564 | at24->nvmem_config.base_dev = &client->dev; |
| 565 | at24->nvmem_config.reg_read = at24_read; | ||
| 566 | at24->nvmem_config.reg_write = at24_write; | ||
| 567 | at24->nvmem_config.priv = at24; | ||
| 568 | at24->nvmem_config.stride = 4; | ||
| 569 | at24->nvmem_config.word_size = 1; | ||
| 570 | at24->nvmem_config.size = chip.byte_len; | ||
| 630 | 571 | ||
| 631 | at24->nvmem = nvmem_register(&at24->nvmem_config); | 572 | at24->nvmem = nvmem_register(&at24->nvmem_config); |
| 632 | 573 | ||
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index fa36a6e37084..2c6c7c8e3ead 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
| 18 | 18 | ||
| 19 | #include <linux/nvmem-provider.h> | 19 | #include <linux/nvmem-provider.h> |
| 20 | #include <linux/regmap.h> | ||
| 21 | #include <linux/spi/spi.h> | 20 | #include <linux/spi/spi.h> |
| 22 | #include <linux/spi/eeprom.h> | 21 | #include <linux/spi/eeprom.h> |
| 23 | #include <linux/property.h> | 22 | #include <linux/property.h> |
| @@ -34,7 +33,6 @@ struct at25_data { | |||
| 34 | struct mutex lock; | 33 | struct mutex lock; |
| 35 | struct spi_eeprom chip; | 34 | struct spi_eeprom chip; |
| 36 | unsigned addrlen; | 35 | unsigned addrlen; |
| 37 | struct regmap_config regmap_config; | ||
| 38 | struct nvmem_config nvmem_config; | 36 | struct nvmem_config nvmem_config; |
| 39 | struct nvmem_device *nvmem; | 37 | struct nvmem_device *nvmem; |
| 40 | }; | 38 | }; |
| @@ -65,14 +63,11 @@ struct at25_data { | |||
| 65 | 63 | ||
| 66 | #define io_limit PAGE_SIZE /* bytes */ | 64 | #define io_limit PAGE_SIZE /* bytes */ |
| 67 | 65 | ||
| 68 | static ssize_t | 66 | static int at25_ee_read(void *priv, unsigned int offset, |
| 69 | at25_ee_read( | 67 | void *val, size_t count) |
| 70 | struct at25_data *at25, | ||
| 71 | char *buf, | ||
| 72 | unsigned offset, | ||
| 73 | size_t count | ||
| 74 | ) | ||
| 75 | { | 68 | { |
| 69 | struct at25_data *at25 = priv; | ||
| 70 | char *buf = val; | ||
| 76 | u8 command[EE_MAXADDRLEN + 1]; | 71 | u8 command[EE_MAXADDRLEN + 1]; |
| 77 | u8 *cp; | 72 | u8 *cp; |
| 78 | ssize_t status; | 73 | ssize_t status; |
| @@ -81,11 +76,11 @@ at25_ee_read( | |||
| 81 | u8 instr; | 76 | u8 instr; |
| 82 | 77 | ||
| 83 | if (unlikely(offset >= at25->chip.byte_len)) | 78 | if (unlikely(offset >= at25->chip.byte_len)) |
| 84 | return 0; | 79 | return -EINVAL; |
| 85 | if ((offset + count) > at25->chip.byte_len) | 80 | if ((offset + count) > at25->chip.byte_len) |
| 86 | count = at25->chip.byte_len - offset; | 81 | count = at25->chip.byte_len - offset; |
| 87 | if (unlikely(!count)) | 82 | if (unlikely(!count)) |
| 88 | return count; | 83 | return -EINVAL; |
| 89 | 84 | ||
| 90 | cp = command; | 85 | cp = command; |
| 91 | 86 | ||
| @@ -131,28 +126,14 @@ at25_ee_read( | |||
| 131 | count, offset, (int) status); | 126 | count, offset, (int) status); |
| 132 | 127 | ||
| 133 | mutex_unlock(&at25->lock); | 128 | mutex_unlock(&at25->lock); |
| 134 | return status ? status : count; | 129 | return status; |
| 135 | } | 130 | } |
| 136 | 131 | ||
| 137 | static int at25_regmap_read(void *context, const void *reg, size_t reg_size, | 132 | static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) |
| 138 | void *val, size_t val_size) | ||
| 139 | { | 133 | { |
| 140 | struct at25_data *at25 = context; | 134 | struct at25_data *at25 = priv; |
| 141 | off_t offset = *(u32 *)reg; | 135 | const char *buf = val; |
| 142 | int err; | 136 | int status = 0; |
| 143 | |||
| 144 | err = at25_ee_read(at25, val, offset, val_size); | ||
| 145 | if (err) | ||
| 146 | return err; | ||
| 147 | return 0; | ||
| 148 | } | ||
| 149 | |||
| 150 | static ssize_t | ||
| 151 | at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | ||
| 152 | size_t count) | ||
| 153 | { | ||
| 154 | ssize_t status = 0; | ||
| 155 | unsigned written = 0; | ||
| 156 | unsigned buf_size; | 137 | unsigned buf_size; |
| 157 | u8 *bounce; | 138 | u8 *bounce; |
| 158 | 139 | ||
| @@ -161,7 +142,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | |||
| 161 | if ((off + count) > at25->chip.byte_len) | 142 | if ((off + count) > at25->chip.byte_len) |
| 162 | count = at25->chip.byte_len - off; | 143 | count = at25->chip.byte_len - off; |
| 163 | if (unlikely(!count)) | 144 | if (unlikely(!count)) |
| 164 | return count; | 145 | return -EINVAL; |
| 165 | 146 | ||
| 166 | /* Temp buffer starts with command and address */ | 147 | /* Temp buffer starts with command and address */ |
| 167 | buf_size = at25->chip.page_size; | 148 | buf_size = at25->chip.page_size; |
| @@ -256,40 +237,15 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, | |||
| 256 | off += segment; | 237 | off += segment; |
| 257 | buf += segment; | 238 | buf += segment; |
| 258 | count -= segment; | 239 | count -= segment; |
| 259 | written += segment; | ||
| 260 | 240 | ||
| 261 | } while (count > 0); | 241 | } while (count > 0); |
| 262 | 242 | ||
| 263 | mutex_unlock(&at25->lock); | 243 | mutex_unlock(&at25->lock); |
| 264 | 244 | ||
| 265 | kfree(bounce); | 245 | kfree(bounce); |
| 266 | return written ? written : status; | 246 | return status; |
| 267 | } | 247 | } |
| 268 | 248 | ||
| 269 | static int at25_regmap_write(void *context, const void *data, size_t count) | ||
| 270 | { | ||
| 271 | struct at25_data *at25 = context; | ||
| 272 | const char *buf; | ||
| 273 | u32 offset; | ||
| 274 | size_t len; | ||
| 275 | int err; | ||
| 276 | |||
| 277 | memcpy(&offset, data, sizeof(offset)); | ||
| 278 | buf = (const char *)data + sizeof(offset); | ||
| 279 | len = count - sizeof(offset); | ||
| 280 | |||
| 281 | err = at25_ee_write(at25, buf, offset, len); | ||
| 282 | if (err) | ||
| 283 | return err; | ||
| 284 | return 0; | ||
| 285 | } | ||
| 286 | |||
| 287 | static const struct regmap_bus at25_regmap_bus = { | ||
| 288 | .read = at25_regmap_read, | ||
| 289 | .write = at25_regmap_write, | ||
| 290 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 291 | }; | ||
| 292 | |||
| 293 | /*-------------------------------------------------------------------------*/ | 249 | /*-------------------------------------------------------------------------*/ |
| 294 | 250 | ||
| 295 | static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip) | 251 | static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip) |
| @@ -349,7 +305,6 @@ static int at25_probe(struct spi_device *spi) | |||
| 349 | { | 305 | { |
| 350 | struct at25_data *at25 = NULL; | 306 | struct at25_data *at25 = NULL; |
| 351 | struct spi_eeprom chip; | 307 | struct spi_eeprom chip; |
| 352 | struct regmap *regmap; | ||
| 353 | int err; | 308 | int err; |
| 354 | int sr; | 309 | int sr; |
| 355 | int addrlen; | 310 | int addrlen; |
| @@ -390,22 +345,10 @@ static int at25_probe(struct spi_device *spi) | |||
| 390 | 345 | ||
| 391 | mutex_init(&at25->lock); | 346 | mutex_init(&at25->lock); |
| 392 | at25->chip = chip; | 347 | at25->chip = chip; |
| 393 | at25->spi = spi_dev_get(spi); | 348 | at25->spi = spi; |
| 394 | spi_set_drvdata(spi, at25); | 349 | spi_set_drvdata(spi, at25); |
| 395 | at25->addrlen = addrlen; | 350 | at25->addrlen = addrlen; |
| 396 | 351 | ||
| 397 | at25->regmap_config.reg_bits = 32; | ||
| 398 | at25->regmap_config.val_bits = 8; | ||
| 399 | at25->regmap_config.reg_stride = 1; | ||
| 400 | at25->regmap_config.max_register = chip.byte_len - 1; | ||
| 401 | |||
| 402 | regmap = devm_regmap_init(&spi->dev, &at25_regmap_bus, at25, | ||
| 403 | &at25->regmap_config); | ||
| 404 | if (IS_ERR(regmap)) { | ||
| 405 | dev_err(&spi->dev, "regmap init failed\n"); | ||
| 406 | return PTR_ERR(regmap); | ||
| 407 | } | ||
| 408 | |||
| 409 | at25->nvmem_config.name = dev_name(&spi->dev); | 352 | at25->nvmem_config.name = dev_name(&spi->dev); |
| 410 | at25->nvmem_config.dev = &spi->dev; | 353 | at25->nvmem_config.dev = &spi->dev; |
| 411 | at25->nvmem_config.read_only = chip.flags & EE_READONLY; | 354 | at25->nvmem_config.read_only = chip.flags & EE_READONLY; |
| @@ -413,6 +356,12 @@ static int at25_probe(struct spi_device *spi) | |||
| 413 | at25->nvmem_config.owner = THIS_MODULE; | 356 | at25->nvmem_config.owner = THIS_MODULE; |
| 414 | at25->nvmem_config.compat = true; | 357 | at25->nvmem_config.compat = true; |
| 415 | at25->nvmem_config.base_dev = &spi->dev; | 358 | at25->nvmem_config.base_dev = &spi->dev; |
| 359 | at25->nvmem_config.reg_read = at25_ee_read; | ||
| 360 | at25->nvmem_config.reg_write = at25_ee_write; | ||
| 361 | at25->nvmem_config.priv = at25; | ||
| 362 | at25->nvmem_config.stride = 4; | ||
| 363 | at25->nvmem_config.word_size = 1; | ||
| 364 | at25->nvmem_config.size = chip.byte_len; | ||
| 416 | 365 | ||
| 417 | at25->nvmem = nvmem_register(&at25->nvmem_config); | 366 | at25->nvmem = nvmem_register(&at25->nvmem_config); |
| 418 | if (IS_ERR(at25->nvmem)) | 367 | if (IS_ERR(at25->nvmem)) |
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c index 426fe2fd5238..94cc035aa841 100644 --- a/drivers/misc/eeprom/eeprom_93xx46.c +++ b/drivers/misc/eeprom/eeprom_93xx46.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
| 22 | #include <linux/nvmem-provider.h> | 22 | #include <linux/nvmem-provider.h> |
| 23 | #include <linux/regmap.h> | ||
| 24 | #include <linux/eeprom_93xx46.h> | 23 | #include <linux/eeprom_93xx46.h> |
| 25 | 24 | ||
| 26 | #define OP_START 0x4 | 25 | #define OP_START 0x4 |
| @@ -43,7 +42,6 @@ struct eeprom_93xx46_dev { | |||
| 43 | struct spi_device *spi; | 42 | struct spi_device *spi; |
| 44 | struct eeprom_93xx46_platform_data *pdata; | 43 | struct eeprom_93xx46_platform_data *pdata; |
| 45 | struct mutex lock; | 44 | struct mutex lock; |
| 46 | struct regmap_config regmap_config; | ||
| 47 | struct nvmem_config nvmem_config; | 45 | struct nvmem_config nvmem_config; |
| 48 | struct nvmem_device *nvmem; | 46 | struct nvmem_device *nvmem; |
| 49 | int addrlen; | 47 | int addrlen; |
| @@ -60,11 +58,12 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev) | |||
| 60 | return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; | 58 | return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH; |
| 61 | } | 59 | } |
| 62 | 60 | ||
| 63 | static ssize_t | 61 | static int eeprom_93xx46_read(void *priv, unsigned int off, |
| 64 | eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf, | 62 | void *val, size_t count) |
| 65 | unsigned off, size_t count) | ||
| 66 | { | 63 | { |
| 67 | ssize_t ret = 0; | 64 | struct eeprom_93xx46_dev *edev = priv; |
| 65 | char *buf = val; | ||
| 66 | int err = 0; | ||
| 68 | 67 | ||
| 69 | if (unlikely(off >= edev->size)) | 68 | if (unlikely(off >= edev->size)) |
| 70 | return 0; | 69 | return 0; |
| @@ -84,7 +83,6 @@ eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf, | |||
| 84 | u16 cmd_addr = OP_READ << edev->addrlen; | 83 | u16 cmd_addr = OP_READ << edev->addrlen; |
| 85 | size_t nbytes = count; | 84 | size_t nbytes = count; |
| 86 | int bits; | 85 | int bits; |
| 87 | int err; | ||
| 88 | 86 | ||
| 89 | if (edev->addrlen == 7) { | 87 | if (edev->addrlen == 7) { |
| 90 | cmd_addr |= off & 0x7f; | 88 | cmd_addr |= off & 0x7f; |
| @@ -120,21 +118,20 @@ eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf, | |||
| 120 | if (err) { | 118 | if (err) { |
| 121 | dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", | 119 | dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n", |
| 122 | nbytes, (int)off, err); | 120 | nbytes, (int)off, err); |
| 123 | ret = err; | ||
| 124 | break; | 121 | break; |
| 125 | } | 122 | } |
| 126 | 123 | ||
| 127 | buf += nbytes; | 124 | buf += nbytes; |
| 128 | off += nbytes; | 125 | off += nbytes; |
| 129 | count -= nbytes; | 126 | count -= nbytes; |
| 130 | ret += nbytes; | ||
| 131 | } | 127 | } |
| 132 | 128 | ||
| 133 | if (edev->pdata->finish) | 129 | if (edev->pdata->finish) |
| 134 | edev->pdata->finish(edev); | 130 | edev->pdata->finish(edev); |
| 135 | 131 | ||
| 136 | mutex_unlock(&edev->lock); | 132 | mutex_unlock(&edev->lock); |
| 137 | return ret; | 133 | |
| 134 | return err; | ||
| 138 | } | 135 | } |
| 139 | 136 | ||
| 140 | static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) | 137 | static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on) |
| @@ -230,10 +227,11 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev, | |||
| 230 | return ret; | 227 | return ret; |
| 231 | } | 228 | } |
| 232 | 229 | ||
| 233 | static ssize_t | 230 | static int eeprom_93xx46_write(void *priv, unsigned int off, |
| 234 | eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf, | 231 | void *val, size_t count) |
| 235 | loff_t off, size_t count) | ||
| 236 | { | 232 | { |
| 233 | struct eeprom_93xx46_dev *edev = priv; | ||
| 234 | char *buf = val; | ||
| 237 | int i, ret, step = 1; | 235 | int i, ret, step = 1; |
| 238 | 236 | ||
| 239 | if (unlikely(off >= edev->size)) | 237 | if (unlikely(off >= edev->size)) |
| @@ -275,52 +273,9 @@ eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf, | |||
| 275 | 273 | ||
| 276 | /* erase/write disable */ | 274 | /* erase/write disable */ |
| 277 | eeprom_93xx46_ew(edev, 0); | 275 | eeprom_93xx46_ew(edev, 0); |
| 278 | return ret ? : count; | 276 | return ret; |
| 279 | } | ||
| 280 | |||
| 281 | /* | ||
| 282 | * Provide a regmap interface, which is registered with the NVMEM | ||
| 283 | * framework | ||
| 284 | */ | ||
| 285 | static int eeprom_93xx46_regmap_read(void *context, const void *reg, | ||
| 286 | size_t reg_size, void *val, | ||
| 287 | size_t val_size) | ||
| 288 | { | ||
| 289 | struct eeprom_93xx46_dev *eeprom_93xx46 = context; | ||
| 290 | off_t offset = *(u32 *)reg; | ||
| 291 | int err; | ||
| 292 | |||
| 293 | err = eeprom_93xx46_read(eeprom_93xx46, val, offset, val_size); | ||
| 294 | if (err) | ||
| 295 | return err; | ||
| 296 | return 0; | ||
| 297 | } | ||
| 298 | |||
| 299 | static int eeprom_93xx46_regmap_write(void *context, const void *data, | ||
| 300 | size_t count) | ||
| 301 | { | ||
| 302 | struct eeprom_93xx46_dev *eeprom_93xx46 = context; | ||
| 303 | const char *buf; | ||
| 304 | u32 offset; | ||
| 305 | size_t len; | ||
| 306 | int err; | ||
| 307 | |||
| 308 | memcpy(&offset, data, sizeof(offset)); | ||
| 309 | buf = (const char *)data + sizeof(offset); | ||
| 310 | len = count - sizeof(offset); | ||
| 311 | |||
| 312 | err = eeprom_93xx46_write(eeprom_93xx46, buf, offset, len); | ||
| 313 | if (err) | ||
| 314 | return err; | ||
| 315 | return 0; | ||
| 316 | } | 277 | } |
| 317 | 278 | ||
| 318 | static const struct regmap_bus eeprom_93xx46_regmap_bus = { | ||
| 319 | .read = eeprom_93xx46_regmap_read, | ||
| 320 | .write = eeprom_93xx46_regmap_write, | ||
| 321 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 322 | }; | ||
| 323 | |||
| 324 | static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) | 279 | static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev) |
| 325 | { | 280 | { |
| 326 | struct eeprom_93xx46_platform_data *pd = edev->pdata; | 281 | struct eeprom_93xx46_platform_data *pd = edev->pdata; |
| @@ -480,7 +435,6 @@ static int eeprom_93xx46_probe(struct spi_device *spi) | |||
| 480 | { | 435 | { |
| 481 | struct eeprom_93xx46_platform_data *pd; | 436 | struct eeprom_93xx46_platform_data *pd; |
| 482 | struct eeprom_93xx46_dev *edev; | 437 | struct eeprom_93xx46_dev *edev; |
| 483 | struct regmap *regmap; | ||
| 484 | int err; | 438 | int err; |
| 485 | 439 | ||
| 486 | if (spi->dev.of_node) { | 440 | if (spi->dev.of_node) { |
| @@ -511,24 +465,10 @@ static int eeprom_93xx46_probe(struct spi_device *spi) | |||
| 511 | 465 | ||
| 512 | mutex_init(&edev->lock); | 466 | mutex_init(&edev->lock); |
| 513 | 467 | ||
| 514 | edev->spi = spi_dev_get(spi); | 468 | edev->spi = spi; |
| 515 | edev->pdata = pd; | 469 | edev->pdata = pd; |
| 516 | 470 | ||
| 517 | edev->size = 128; | 471 | edev->size = 128; |
| 518 | |||
| 519 | edev->regmap_config.reg_bits = 32; | ||
| 520 | edev->regmap_config.val_bits = 8; | ||
| 521 | edev->regmap_config.reg_stride = 1; | ||
| 522 | edev->regmap_config.max_register = edev->size - 1; | ||
| 523 | |||
| 524 | regmap = devm_regmap_init(&spi->dev, &eeprom_93xx46_regmap_bus, edev, | ||
| 525 | &edev->regmap_config); | ||
| 526 | if (IS_ERR(regmap)) { | ||
| 527 | dev_err(&spi->dev, "regmap init failed\n"); | ||
| 528 | err = PTR_ERR(regmap); | ||
| 529 | goto fail; | ||
| 530 | } | ||
| 531 | |||
| 532 | edev->nvmem_config.name = dev_name(&spi->dev); | 472 | edev->nvmem_config.name = dev_name(&spi->dev); |
| 533 | edev->nvmem_config.dev = &spi->dev; | 473 | edev->nvmem_config.dev = &spi->dev; |
| 534 | edev->nvmem_config.read_only = pd->flags & EE_READONLY; | 474 | edev->nvmem_config.read_only = pd->flags & EE_READONLY; |
| @@ -536,6 +476,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi) | |||
| 536 | edev->nvmem_config.owner = THIS_MODULE; | 476 | edev->nvmem_config.owner = THIS_MODULE; |
| 537 | edev->nvmem_config.compat = true; | 477 | edev->nvmem_config.compat = true; |
| 538 | edev->nvmem_config.base_dev = &spi->dev; | 478 | edev->nvmem_config.base_dev = &spi->dev; |
| 479 | edev->nvmem_config.reg_read = eeprom_93xx46_read; | ||
| 480 | edev->nvmem_config.reg_write = eeprom_93xx46_write; | ||
| 481 | edev->nvmem_config.priv = edev; | ||
| 482 | edev->nvmem_config.stride = 4; | ||
| 483 | edev->nvmem_config.word_size = 1; | ||
| 484 | edev->nvmem_config.size = edev->size; | ||
| 539 | 485 | ||
| 540 | edev->nvmem = nvmem_register(&edev->nvmem_config); | 486 | edev->nvmem = nvmem_register(&edev->nvmem_config); |
| 541 | if (IS_ERR(edev->nvmem)) { | 487 | if (IS_ERR(edev->nvmem)) { |
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c index 194360a5f782..a039a5df6f21 100644 --- a/drivers/misc/mei/amthif.c +++ b/drivers/misc/mei/amthif.c | |||
| @@ -380,8 +380,10 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl, | |||
| 380 | 380 | ||
| 381 | dev = cl->dev; | 381 | dev = cl->dev; |
| 382 | 382 | ||
| 383 | if (dev->iamthif_state != MEI_IAMTHIF_READING) | 383 | if (dev->iamthif_state != MEI_IAMTHIF_READING) { |
| 384 | mei_irq_discard_msg(dev, mei_hdr); | ||
| 384 | return 0; | 385 | return 0; |
| 386 | } | ||
| 385 | 387 | ||
| 386 | ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list); | 388 | ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list); |
| 387 | if (ret) | 389 | if (ret) |
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 5d5996e39a67..1f33fea9299f 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
| @@ -220,17 +220,23 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv); | |||
| 220 | static void mei_cl_bus_event_work(struct work_struct *work) | 220 | static void mei_cl_bus_event_work(struct work_struct *work) |
| 221 | { | 221 | { |
| 222 | struct mei_cl_device *cldev; | 222 | struct mei_cl_device *cldev; |
| 223 | struct mei_device *bus; | ||
| 223 | 224 | ||
| 224 | cldev = container_of(work, struct mei_cl_device, event_work); | 225 | cldev = container_of(work, struct mei_cl_device, event_work); |
| 225 | 226 | ||
| 227 | bus = cldev->bus; | ||
| 228 | |||
| 226 | if (cldev->event_cb) | 229 | if (cldev->event_cb) |
| 227 | cldev->event_cb(cldev, cldev->events, cldev->event_context); | 230 | cldev->event_cb(cldev, cldev->events, cldev->event_context); |
| 228 | 231 | ||
| 229 | cldev->events = 0; | 232 | cldev->events = 0; |
| 230 | 233 | ||
| 231 | /* Prepare for the next read */ | 234 | /* Prepare for the next read */ |
| 232 | if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) | 235 | if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { |
| 236 | mutex_lock(&bus->device_lock); | ||
| 233 | mei_cl_read_start(cldev->cl, 0, NULL); | 237 | mei_cl_read_start(cldev->cl, 0, NULL); |
| 238 | mutex_unlock(&bus->device_lock); | ||
| 239 | } | ||
| 234 | } | 240 | } |
| 235 | 241 | ||
| 236 | /** | 242 | /** |
| @@ -304,6 +310,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev, | |||
| 304 | unsigned long events_mask, | 310 | unsigned long events_mask, |
| 305 | mei_cldev_event_cb_t event_cb, void *context) | 311 | mei_cldev_event_cb_t event_cb, void *context) |
| 306 | { | 312 | { |
| 313 | struct mei_device *bus = cldev->bus; | ||
| 307 | int ret; | 314 | int ret; |
| 308 | 315 | ||
| 309 | if (cldev->event_cb) | 316 | if (cldev->event_cb) |
| @@ -316,15 +323,17 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev, | |||
| 316 | INIT_WORK(&cldev->event_work, mei_cl_bus_event_work); | 323 | INIT_WORK(&cldev->event_work, mei_cl_bus_event_work); |
| 317 | 324 | ||
| 318 | if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { | 325 | if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { |
| 326 | mutex_lock(&bus->device_lock); | ||
| 319 | ret = mei_cl_read_start(cldev->cl, 0, NULL); | 327 | ret = mei_cl_read_start(cldev->cl, 0, NULL); |
| 328 | mutex_unlock(&bus->device_lock); | ||
| 320 | if (ret && ret != -EBUSY) | 329 | if (ret && ret != -EBUSY) |
| 321 | return ret; | 330 | return ret; |
| 322 | } | 331 | } |
| 323 | 332 | ||
| 324 | if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) { | 333 | if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) { |
| 325 | mutex_lock(&cldev->cl->dev->device_lock); | 334 | mutex_lock(&bus->device_lock); |
| 326 | ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0); | 335 | ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0); |
| 327 | mutex_unlock(&cldev->cl->dev->device_lock); | 336 | mutex_unlock(&bus->device_lock); |
| 328 | if (ret) | 337 | if (ret) |
| 329 | return ret; | 338 | return ret; |
| 330 | } | 339 | } |
| @@ -580,6 +589,7 @@ static int mei_cl_device_probe(struct device *dev) | |||
| 580 | struct mei_cl_device *cldev; | 589 | struct mei_cl_device *cldev; |
| 581 | struct mei_cl_driver *cldrv; | 590 | struct mei_cl_driver *cldrv; |
| 582 | const struct mei_cl_device_id *id; | 591 | const struct mei_cl_device_id *id; |
| 592 | int ret; | ||
| 583 | 593 | ||
| 584 | cldev = to_mei_cl_device(dev); | 594 | cldev = to_mei_cl_device(dev); |
| 585 | cldrv = to_mei_cl_driver(dev->driver); | 595 | cldrv = to_mei_cl_driver(dev->driver); |
| @@ -594,9 +604,12 @@ static int mei_cl_device_probe(struct device *dev) | |||
| 594 | if (!id) | 604 | if (!id) |
| 595 | return -ENODEV; | 605 | return -ENODEV; |
| 596 | 606 | ||
| 597 | __module_get(THIS_MODULE); | 607 | ret = cldrv->probe(cldev, id); |
| 608 | if (ret) | ||
| 609 | return ret; | ||
| 598 | 610 | ||
| 599 | return cldrv->probe(cldev, id); | 611 | __module_get(THIS_MODULE); |
| 612 | return 0; | ||
| 600 | } | 613 | } |
| 601 | 614 | ||
| 602 | /** | 615 | /** |
| @@ -634,11 +647,8 @@ static ssize_t name_show(struct device *dev, struct device_attribute *a, | |||
| 634 | char *buf) | 647 | char *buf) |
| 635 | { | 648 | { |
| 636 | struct mei_cl_device *cldev = to_mei_cl_device(dev); | 649 | struct mei_cl_device *cldev = to_mei_cl_device(dev); |
| 637 | size_t len; | ||
| 638 | |||
| 639 | len = snprintf(buf, PAGE_SIZE, "%s", cldev->name); | ||
| 640 | 650 | ||
| 641 | return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; | 651 | return scnprintf(buf, PAGE_SIZE, "%s", cldev->name); |
| 642 | } | 652 | } |
| 643 | static DEVICE_ATTR_RO(name); | 653 | static DEVICE_ATTR_RO(name); |
| 644 | 654 | ||
| @@ -647,11 +657,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *a, | |||
| 647 | { | 657 | { |
| 648 | struct mei_cl_device *cldev = to_mei_cl_device(dev); | 658 | struct mei_cl_device *cldev = to_mei_cl_device(dev); |
| 649 | const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); | 659 | const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); |
| 650 | size_t len; | ||
| 651 | 660 | ||
| 652 | len = snprintf(buf, PAGE_SIZE, "%pUl", uuid); | 661 | return scnprintf(buf, PAGE_SIZE, "%pUl", uuid); |
| 653 | |||
| 654 | return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; | ||
| 655 | } | 662 | } |
| 656 | static DEVICE_ATTR_RO(uuid); | 663 | static DEVICE_ATTR_RO(uuid); |
| 657 | 664 | ||
| @@ -660,11 +667,8 @@ static ssize_t version_show(struct device *dev, struct device_attribute *a, | |||
| 660 | { | 667 | { |
| 661 | struct mei_cl_device *cldev = to_mei_cl_device(dev); | 668 | struct mei_cl_device *cldev = to_mei_cl_device(dev); |
| 662 | u8 version = mei_me_cl_ver(cldev->me_cl); | 669 | u8 version = mei_me_cl_ver(cldev->me_cl); |
| 663 | size_t len; | ||
| 664 | |||
| 665 | len = snprintf(buf, PAGE_SIZE, "%02X", version); | ||
| 666 | 670 | ||
| 667 | return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; | 671 | return scnprintf(buf, PAGE_SIZE, "%02X", version); |
| 668 | } | 672 | } |
| 669 | static DEVICE_ATTR_RO(version); | 673 | static DEVICE_ATTR_RO(version); |
| 670 | 674 | ||
| @@ -673,10 +677,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a, | |||
| 673 | { | 677 | { |
| 674 | struct mei_cl_device *cldev = to_mei_cl_device(dev); | 678 | struct mei_cl_device *cldev = to_mei_cl_device(dev); |
| 675 | const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); | 679 | const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); |
| 676 | size_t len; | ||
| 677 | 680 | ||
| 678 | len = snprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid); | 681 | return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid); |
| 679 | return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len; | ||
| 680 | } | 682 | } |
| 681 | static DEVICE_ATTR_RO(modalias); | 683 | static DEVICE_ATTR_RO(modalias); |
| 682 | 684 | ||
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index bab17e4197b6..eed254da63a8 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
| @@ -727,6 +727,11 @@ static void mei_cl_wake_all(struct mei_cl *cl) | |||
| 727 | cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); | 727 | cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); |
| 728 | wake_up_interruptible(&cl->ev_wait); | 728 | wake_up_interruptible(&cl->ev_wait); |
| 729 | } | 729 | } |
| 730 | /* synchronized under device mutex */ | ||
| 731 | if (waitqueue_active(&cl->wait)) { | ||
| 732 | cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); | ||
| 733 | wake_up_interruptible(&cl->wait); | ||
| 734 | } | ||
| 730 | } | 735 | } |
| 731 | 736 | ||
| 732 | /** | 737 | /** |
| @@ -879,12 +884,15 @@ static int __mei_cl_disconnect(struct mei_cl *cl) | |||
| 879 | } | 884 | } |
| 880 | 885 | ||
| 881 | mutex_unlock(&dev->device_lock); | 886 | mutex_unlock(&dev->device_lock); |
| 882 | wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY, | 887 | wait_event_timeout(cl->wait, |
| 888 | cl->state == MEI_FILE_DISCONNECT_REPLY || | ||
| 889 | cl->state == MEI_FILE_DISCONNECTED, | ||
| 883 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); | 890 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); |
| 884 | mutex_lock(&dev->device_lock); | 891 | mutex_lock(&dev->device_lock); |
| 885 | 892 | ||
| 886 | rets = cl->status; | 893 | rets = cl->status; |
| 887 | if (cl->state != MEI_FILE_DISCONNECT_REPLY) { | 894 | if (cl->state != MEI_FILE_DISCONNECT_REPLY && |
| 895 | cl->state != MEI_FILE_DISCONNECTED) { | ||
| 888 | cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); | 896 | cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); |
| 889 | rets = -ETIME; | 897 | rets = -ETIME; |
| 890 | } | 898 | } |
| @@ -1085,6 +1093,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, | |||
| 1085 | mutex_unlock(&dev->device_lock); | 1093 | mutex_unlock(&dev->device_lock); |
| 1086 | wait_event_timeout(cl->wait, | 1094 | wait_event_timeout(cl->wait, |
| 1087 | (cl->state == MEI_FILE_CONNECTED || | 1095 | (cl->state == MEI_FILE_CONNECTED || |
| 1096 | cl->state == MEI_FILE_DISCONNECTED || | ||
| 1088 | cl->state == MEI_FILE_DISCONNECT_REQUIRED || | 1097 | cl->state == MEI_FILE_DISCONNECT_REQUIRED || |
| 1089 | cl->state == MEI_FILE_DISCONNECT_REPLY), | 1098 | cl->state == MEI_FILE_DISCONNECT_REPLY), |
| 1090 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); | 1099 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); |
| @@ -1333,16 +1342,13 @@ int mei_cl_notify_request(struct mei_cl *cl, | |||
| 1333 | } | 1342 | } |
| 1334 | 1343 | ||
| 1335 | mutex_unlock(&dev->device_lock); | 1344 | mutex_unlock(&dev->device_lock); |
| 1336 | wait_event_timeout(cl->wait, cl->notify_en == request, | 1345 | wait_event_timeout(cl->wait, |
| 1337 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); | 1346 | cl->notify_en == request || !mei_cl_is_connected(cl), |
| 1347 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); | ||
| 1338 | mutex_lock(&dev->device_lock); | 1348 | mutex_lock(&dev->device_lock); |
| 1339 | 1349 | ||
| 1340 | if (cl->notify_en != request) { | 1350 | if (cl->notify_en != request && !cl->status) |
| 1341 | mei_io_list_flush(&dev->ctrl_rd_list, cl); | 1351 | cl->status = -EFAULT; |
| 1342 | mei_io_list_flush(&dev->ctrl_wr_list, cl); | ||
| 1343 | if (!cl->status) | ||
| 1344 | cl->status = -EFAULT; | ||
| 1345 | } | ||
| 1346 | 1352 | ||
| 1347 | rets = cl->status; | 1353 | rets = cl->status; |
| 1348 | 1354 | ||
| @@ -1767,6 +1773,10 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) | |||
| 1767 | wake_up(&cl->wait); | 1773 | wake_up(&cl->wait); |
| 1768 | 1774 | ||
| 1769 | break; | 1775 | break; |
| 1776 | case MEI_FOP_DISCONNECT_RSP: | ||
| 1777 | mei_io_cb_free(cb); | ||
| 1778 | mei_cl_set_disconnected(cl); | ||
| 1779 | break; | ||
| 1770 | default: | 1780 | default: |
| 1771 | BUG_ON(0); | 1781 | BUG_ON(0); |
| 1772 | } | 1782 | } |
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 5e305d2605f3..5aa606c8a827 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c | |||
| @@ -113,8 +113,6 @@ void mei_hbm_idle(struct mei_device *dev) | |||
| 113 | */ | 113 | */ |
| 114 | void mei_hbm_reset(struct mei_device *dev) | 114 | void mei_hbm_reset(struct mei_device *dev) |
| 115 | { | 115 | { |
| 116 | dev->me_client_index = 0; | ||
| 117 | |||
| 118 | mei_me_cl_rm_all(dev); | 116 | mei_me_cl_rm_all(dev); |
| 119 | 117 | ||
| 120 | mei_hbm_idle(dev); | 118 | mei_hbm_idle(dev); |
| @@ -530,24 +528,22 @@ static void mei_hbm_cl_notify(struct mei_device *dev, | |||
| 530 | * mei_hbm_prop_req - request property for a single client | 528 | * mei_hbm_prop_req - request property for a single client |
| 531 | * | 529 | * |
| 532 | * @dev: the device structure | 530 | * @dev: the device structure |
| 531 | * @start_idx: client index to start search | ||
| 533 | * | 532 | * |
| 534 | * Return: 0 on success and < 0 on failure | 533 | * Return: 0 on success and < 0 on failure |
| 535 | */ | 534 | */ |
| 536 | 535 | static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) | |
| 537 | static int mei_hbm_prop_req(struct mei_device *dev) | ||
| 538 | { | 536 | { |
| 539 | |||
| 540 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; | 537 | struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; |
| 541 | struct hbm_props_request *prop_req; | 538 | struct hbm_props_request *prop_req; |
| 542 | const size_t len = sizeof(struct hbm_props_request); | 539 | const size_t len = sizeof(struct hbm_props_request); |
| 543 | unsigned long next_client_index; | 540 | unsigned long addr; |
| 544 | int ret; | 541 | int ret; |
| 545 | 542 | ||
| 546 | next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, | 543 | addr = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, start_idx); |
| 547 | dev->me_client_index); | ||
| 548 | 544 | ||
| 549 | /* We got all client properties */ | 545 | /* We got all client properties */ |
| 550 | if (next_client_index == MEI_CLIENTS_MAX) { | 546 | if (addr == MEI_CLIENTS_MAX) { |
| 551 | dev->hbm_state = MEI_HBM_STARTED; | 547 | dev->hbm_state = MEI_HBM_STARTED; |
| 552 | mei_host_client_init(dev); | 548 | mei_host_client_init(dev); |
| 553 | 549 | ||
| @@ -560,7 +556,7 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
| 560 | memset(prop_req, 0, sizeof(struct hbm_props_request)); | 556 | memset(prop_req, 0, sizeof(struct hbm_props_request)); |
| 561 | 557 | ||
| 562 | prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; | 558 | prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; |
| 563 | prop_req->me_addr = next_client_index; | 559 | prop_req->me_addr = addr; |
| 564 | 560 | ||
| 565 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); | 561 | ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); |
| 566 | if (ret) { | 562 | if (ret) { |
| @@ -570,7 +566,6 @@ static int mei_hbm_prop_req(struct mei_device *dev) | |||
| 570 | } | 566 | } |
| 571 | 567 | ||
| 572 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; | 568 | dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; |
| 573 | dev->me_client_index = next_client_index; | ||
| 574 | 569 | ||
| 575 | return 0; | 570 | return 0; |
| 576 | } | 571 | } |
| @@ -882,8 +877,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev, | |||
| 882 | cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL); | 877 | cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL); |
| 883 | if (!cb) | 878 | if (!cb) |
| 884 | return -ENOMEM; | 879 | return -ENOMEM; |
| 885 | cl_dbg(dev, cl, "add disconnect response as first\n"); | 880 | list_add_tail(&cb->list, &dev->ctrl_wr_list.list); |
| 886 | list_add(&cb->list, &dev->ctrl_wr_list.list); | ||
| 887 | } | 881 | } |
| 888 | return 0; | 882 | return 0; |
| 889 | } | 883 | } |
| @@ -1152,10 +1146,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
| 1152 | 1146 | ||
| 1153 | mei_hbm_me_cl_add(dev, props_res); | 1147 | mei_hbm_me_cl_add(dev, props_res); |
| 1154 | 1148 | ||
| 1155 | dev->me_client_index++; | ||
| 1156 | |||
| 1157 | /* request property for the next client */ | 1149 | /* request property for the next client */ |
| 1158 | if (mei_hbm_prop_req(dev)) | 1150 | if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) |
| 1159 | return -EIO; | 1151 | return -EIO; |
| 1160 | 1152 | ||
| 1161 | break; | 1153 | break; |
| @@ -1181,7 +1173,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) | |||
| 1181 | dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; | 1173 | dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; |
| 1182 | 1174 | ||
| 1183 | /* first property request */ | 1175 | /* first property request */ |
| 1184 | if (mei_hbm_prop_req(dev)) | 1176 | if (mei_hbm_prop_req(dev, 0)) |
| 1185 | return -EIO; | 1177 | return -EIO; |
| 1186 | 1178 | ||
| 1187 | break; | 1179 | break; |
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index 1e5cb1f704f8..3831a7ba2531 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c | |||
| @@ -76,7 +76,6 @@ static inline int mei_cl_hbm_equal(struct mei_cl *cl, | |||
| 76 | * @dev: mei device | 76 | * @dev: mei device |
| 77 | * @hdr: message header | 77 | * @hdr: message header |
| 78 | */ | 78 | */ |
| 79 | static inline | ||
| 80 | void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) | 79 | void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) |
| 81 | { | 80 | { |
| 82 | /* | 81 | /* |
| @@ -194,10 +193,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, | |||
| 194 | return -EMSGSIZE; | 193 | return -EMSGSIZE; |
| 195 | 194 | ||
| 196 | ret = mei_hbm_cl_disconnect_rsp(dev, cl); | 195 | ret = mei_hbm_cl_disconnect_rsp(dev, cl); |
| 197 | mei_cl_set_disconnected(cl); | 196 | list_move_tail(&cb->list, &cmpl_list->list); |
| 198 | mei_io_cb_free(cb); | ||
| 199 | mei_me_cl_put(cl->me_cl); | ||
| 200 | cl->me_cl = NULL; | ||
| 201 | 197 | ||
| 202 | return ret; | 198 | return ret; |
| 203 | } | 199 | } |
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index db78e6d99456..c9e01021eadf 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h | |||
| @@ -396,7 +396,6 @@ const char *mei_pg_state_str(enum mei_pg_state state); | |||
| 396 | * @me_clients : list of FW clients | 396 | * @me_clients : list of FW clients |
| 397 | * @me_clients_map : FW clients bit map | 397 | * @me_clients_map : FW clients bit map |
| 398 | * @host_clients_map : host clients id pool | 398 | * @host_clients_map : host clients id pool |
| 399 | * @me_client_index : last FW client index in enumeration | ||
| 400 | * | 399 | * |
| 401 | * @allow_fixed_address: allow user space to connect a fixed client | 400 | * @allow_fixed_address: allow user space to connect a fixed client |
| 402 | * @override_fixed_address: force allow fixed address behavior | 401 | * @override_fixed_address: force allow fixed address behavior |
| @@ -486,7 +485,6 @@ struct mei_device { | |||
| 486 | struct list_head me_clients; | 485 | struct list_head me_clients; |
| 487 | DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); | 486 | DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); |
| 488 | DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); | 487 | DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); |
| 489 | unsigned long me_client_index; | ||
| 490 | 488 | ||
| 491 | bool allow_fixed_address; | 489 | bool allow_fixed_address; |
| 492 | bool override_fixed_address; | 490 | bool override_fixed_address; |
| @@ -704,6 +702,8 @@ bool mei_hbuf_acquire(struct mei_device *dev); | |||
| 704 | 702 | ||
| 705 | bool mei_write_is_idle(struct mei_device *dev); | 703 | bool mei_write_is_idle(struct mei_device *dev); |
| 706 | 704 | ||
| 705 | void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr); | ||
| 706 | |||
| 707 | #if IS_ENABLED(CONFIG_DEBUG_FS) | 707 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
| 708 | int mei_dbgfs_register(struct mei_device *dev, const char *name); | 708 | int mei_dbgfs_register(struct mei_device *dev, const char *name); |
| 709 | void mei_dbgfs_deregister(struct mei_device *dev); | 709 | void mei_dbgfs_deregister(struct mei_device *dev); |
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig index 2e4f3ba75c8e..89e5917e1c33 100644 --- a/drivers/misc/mic/Kconfig +++ b/drivers/misc/mic/Kconfig | |||
| @@ -132,6 +132,7 @@ config VOP | |||
| 132 | tristate "VOP Driver" | 132 | tristate "VOP Driver" |
| 133 | depends on 64BIT && PCI && X86 && VOP_BUS | 133 | depends on 64BIT && PCI && X86 && VOP_BUS |
| 134 | select VHOST_RING | 134 | select VHOST_RING |
| 135 | select VIRTIO | ||
| 135 | help | 136 | help |
| 136 | This enables VOP (Virtio over PCIe) Driver support for the Intel | 137 | This enables VOP (Virtio over PCIe) Driver support for the Intel |
| 137 | Many Integrated Core (MIC) family of PCIe form factor coprocessor | 138 | Many Integrated Core (MIC) family of PCIe form factor coprocessor |
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 8c91c9950b54..e047efd83f57 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c | |||
| @@ -76,7 +76,7 @@ static void __mic_free_irq(struct vop_device *vpdev, | |||
| 76 | { | 76 | { |
| 77 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); | 77 | struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev); |
| 78 | 78 | ||
| 79 | return mic_free_irq(mdev, cookie, data); | 79 | mic_free_irq(mdev, cookie, data); |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | static void __mic_ack_interrupt(struct vop_device *vpdev, int num) | 82 | static void __mic_ack_interrupt(struct vop_device *vpdev, int num) |
| @@ -272,7 +272,7 @@ ___mic_free_irq(struct scif_hw_dev *scdev, | |||
| 272 | { | 272 | { |
| 273 | struct mic_device *mdev = scdev_to_mdev(scdev); | 273 | struct mic_device *mdev = scdev_to_mdev(scdev); |
| 274 | 274 | ||
| 275 | return mic_free_irq(mdev, cookie, data); | 275 | mic_free_irq(mdev, cookie, data); |
| 276 | } | 276 | } |
| 277 | 277 | ||
| 278 | static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num) | 278 | static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num) |
| @@ -362,7 +362,7 @@ _mic_request_threaded_irq(struct mbus_device *mbdev, | |||
| 362 | static void _mic_free_irq(struct mbus_device *mbdev, | 362 | static void _mic_free_irq(struct mbus_device *mbdev, |
| 363 | struct mic_irq *cookie, void *data) | 363 | struct mic_irq *cookie, void *data) |
| 364 | { | 364 | { |
| 365 | return mic_free_irq(mbdev_to_mdev(mbdev), cookie, data); | 365 | mic_free_irq(mbdev_to_mdev(mbdev), cookie, data); |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | static void _mic_ack_interrupt(struct mbus_device *mbdev, int num) | 368 | static void _mic_ack_interrupt(struct mbus_device *mbdev, int num) |
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c index 7f2c96f57066..cac3bcc308a7 100644 --- a/drivers/misc/mic/scif/scif_fence.c +++ b/drivers/misc/mic/scif/scif_fence.c | |||
| @@ -27,7 +27,8 @@ | |||
| 27 | void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg) | 27 | void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg) |
| 28 | { | 28 | { |
| 29 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; | 29 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; |
| 30 | int mark, err; | 30 | int mark = 0; |
| 31 | int err; | ||
| 31 | 32 | ||
| 32 | err = _scif_fence_mark(ep, &mark); | 33 | err = _scif_fence_mark(ep, &mark); |
| 33 | if (err) | 34 | if (err) |
diff --git a/drivers/misc/qcom-coincell.c b/drivers/misc/qcom-coincell.c index 7b4a2da487a5..829a61dbd65f 100644 --- a/drivers/misc/qcom-coincell.c +++ b/drivers/misc/qcom-coincell.c | |||
| @@ -94,7 +94,8 @@ static int qcom_coincell_probe(struct platform_device *pdev) | |||
| 94 | { | 94 | { |
| 95 | struct device_node *node = pdev->dev.of_node; | 95 | struct device_node *node = pdev->dev.of_node; |
| 96 | struct qcom_coincell chgr; | 96 | struct qcom_coincell chgr; |
| 97 | u32 rset, vset; | 97 | u32 rset = 0; |
| 98 | u32 vset = 0; | ||
| 98 | bool enable; | 99 | bool enable; |
| 99 | int rc; | 100 | int rc; |
| 100 | 101 | ||
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index 69cdabea9c03..f84b53d6ce50 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c | |||
| @@ -364,8 +364,8 @@ static int sram_probe(struct platform_device *pdev) | |||
| 364 | sram->virt_base = devm_ioremap(sram->dev, res->start, size); | 364 | sram->virt_base = devm_ioremap(sram->dev, res->start, size); |
| 365 | else | 365 | else |
| 366 | sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size); | 366 | sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size); |
| 367 | if (IS_ERR(sram->virt_base)) | 367 | if (!sram->virt_base) |
| 368 | return PTR_ERR(sram->virt_base); | 368 | return -ENOMEM; |
| 369 | 369 | ||
| 370 | sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY), | 370 | sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY), |
| 371 | NUMA_NO_NODE, NULL); | 371 | NUMA_NO_NODE, NULL); |
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c index 71b64550b591..bf0d7708beac 100644 --- a/drivers/misc/ti-st/st_kim.c +++ b/drivers/misc/ti-st/st_kim.c | |||
| @@ -78,7 +78,6 @@ static void validate_firmware_response(struct kim_data_s *kim_gdata) | |||
| 78 | memcpy(kim_gdata->resp_buffer, | 78 | memcpy(kim_gdata->resp_buffer, |
| 79 | kim_gdata->rx_skb->data, | 79 | kim_gdata->rx_skb->data, |
| 80 | kim_gdata->rx_skb->len); | 80 | kim_gdata->rx_skb->len); |
| 81 | complete_all(&kim_gdata->kim_rcvd); | ||
| 82 | kim_gdata->rx_state = ST_W4_PACKET_TYPE; | 81 | kim_gdata->rx_state = ST_W4_PACKET_TYPE; |
| 83 | kim_gdata->rx_skb = NULL; | 82 | kim_gdata->rx_skb = NULL; |
| 84 | kim_gdata->rx_count = 0; | 83 | kim_gdata->rx_count = 0; |
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index ca52952d850f..3041d48e7155 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig | |||
| @@ -1,6 +1,5 @@ | |||
| 1 | menuconfig NVMEM | 1 | menuconfig NVMEM |
| 2 | tristate "NVMEM Support" | 2 | tristate "NVMEM Support" |
| 3 | select REGMAP | ||
| 4 | help | 3 | help |
| 5 | Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... | 4 | Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... |
| 6 | 5 | ||
| @@ -28,6 +27,7 @@ config NVMEM_IMX_OCOTP | |||
| 28 | config NVMEM_LPC18XX_EEPROM | 27 | config NVMEM_LPC18XX_EEPROM |
| 29 | tristate "NXP LPC18XX EEPROM Memory Support" | 28 | tristate "NXP LPC18XX EEPROM Memory Support" |
| 30 | depends on ARCH_LPC18XX || COMPILE_TEST | 29 | depends on ARCH_LPC18XX || COMPILE_TEST |
| 30 | depends on HAS_IOMEM | ||
| 31 | help | 31 | help |
| 32 | Say Y here to include support for NXP LPC18xx EEPROM memory found in | 32 | Say Y here to include support for NXP LPC18xx EEPROM memory found in |
| 33 | NXP LPC185x/3x and LPC435x/3x/2x/1x devices. | 33 | NXP LPC185x/3x and LPC435x/3x/2x/1x devices. |
| @@ -49,6 +49,7 @@ config NVMEM_MXS_OCOTP | |||
| 49 | config MTK_EFUSE | 49 | config MTK_EFUSE |
| 50 | tristate "Mediatek SoCs EFUSE support" | 50 | tristate "Mediatek SoCs EFUSE support" |
| 51 | depends on ARCH_MEDIATEK || COMPILE_TEST | 51 | depends on ARCH_MEDIATEK || COMPILE_TEST |
| 52 | depends on HAS_IOMEM | ||
| 52 | select REGMAP_MMIO | 53 | select REGMAP_MMIO |
| 53 | help | 54 | help |
| 54 | This is a driver to access hardware related data like sensor | 55 | This is a driver to access hardware related data like sensor |
| @@ -61,7 +62,6 @@ config QCOM_QFPROM | |||
| 61 | tristate "QCOM QFPROM Support" | 62 | tristate "QCOM QFPROM Support" |
| 62 | depends on ARCH_QCOM || COMPILE_TEST | 63 | depends on ARCH_QCOM || COMPILE_TEST |
| 63 | depends on HAS_IOMEM | 64 | depends on HAS_IOMEM |
| 64 | select REGMAP_MMIO | ||
| 65 | help | 65 | help |
| 66 | Say y here to enable QFPROM support. The QFPROM provides access | 66 | Say y here to enable QFPROM support. The QFPROM provides access |
| 67 | functions for QFPROM data to rest of the drivers via nvmem interface. | 67 | functions for QFPROM data to rest of the drivers via nvmem interface. |
| @@ -83,7 +83,6 @@ config ROCKCHIP_EFUSE | |||
| 83 | config NVMEM_SUNXI_SID | 83 | config NVMEM_SUNXI_SID |
| 84 | tristate "Allwinner SoCs SID support" | 84 | tristate "Allwinner SoCs SID support" |
| 85 | depends on ARCH_SUNXI | 85 | depends on ARCH_SUNXI |
| 86 | select REGMAP_MMIO | ||
| 87 | help | 86 | help |
| 88 | This is a driver for the 'security ID' available on various Allwinner | 87 | This is a driver for the 'security ID' available on various Allwinner |
| 89 | devices. | 88 | devices. |
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 0de3d878c439..bb4ea123547f 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c | |||
| @@ -23,12 +23,10 @@ | |||
| 23 | #include <linux/nvmem-consumer.h> | 23 | #include <linux/nvmem-consumer.h> |
| 24 | #include <linux/nvmem-provider.h> | 24 | #include <linux/nvmem-provider.h> |
| 25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
| 26 | #include <linux/regmap.h> | ||
| 27 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 28 | 27 | ||
| 29 | struct nvmem_device { | 28 | struct nvmem_device { |
| 30 | const char *name; | 29 | const char *name; |
| 31 | struct regmap *regmap; | ||
| 32 | struct module *owner; | 30 | struct module *owner; |
| 33 | struct device dev; | 31 | struct device dev; |
| 34 | int stride; | 32 | int stride; |
| @@ -41,6 +39,9 @@ struct nvmem_device { | |||
| 41 | int flags; | 39 | int flags; |
| 42 | struct bin_attribute eeprom; | 40 | struct bin_attribute eeprom; |
| 43 | struct device *base_dev; | 41 | struct device *base_dev; |
| 42 | nvmem_reg_read_t reg_read; | ||
| 43 | nvmem_reg_write_t reg_write; | ||
| 44 | void *priv; | ||
| 44 | }; | 45 | }; |
| 45 | 46 | ||
| 46 | #define FLAG_COMPAT BIT(0) | 47 | #define FLAG_COMPAT BIT(0) |
| @@ -66,6 +67,23 @@ static struct lock_class_key eeprom_lock_key; | |||
| 66 | #endif | 67 | #endif |
| 67 | 68 | ||
| 68 | #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) | 69 | #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) |
| 70 | static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, | ||
| 71 | void *val, size_t bytes) | ||
| 72 | { | ||
| 73 | if (nvmem->reg_read) | ||
| 74 | return nvmem->reg_read(nvmem->priv, offset, val, bytes); | ||
| 75 | |||
| 76 | return -EINVAL; | ||
| 77 | } | ||
| 78 | |||
| 79 | static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, | ||
| 80 | void *val, size_t bytes) | ||
| 81 | { | ||
| 82 | if (nvmem->reg_write) | ||
| 83 | return nvmem->reg_write(nvmem->priv, offset, val, bytes); | ||
| 84 | |||
| 85 | return -EINVAL; | ||
| 86 | } | ||
| 69 | 87 | ||
| 70 | static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, | 88 | static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, |
| 71 | struct bin_attribute *attr, | 89 | struct bin_attribute *attr, |
| @@ -93,7 +111,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, | |||
| 93 | 111 | ||
| 94 | count = round_down(count, nvmem->word_size); | 112 | count = round_down(count, nvmem->word_size); |
| 95 | 113 | ||
| 96 | rc = regmap_raw_read(nvmem->regmap, pos, buf, count); | 114 | rc = nvmem_reg_read(nvmem, pos, buf, count); |
| 97 | 115 | ||
| 98 | if (IS_ERR_VALUE(rc)) | 116 | if (IS_ERR_VALUE(rc)) |
| 99 | return rc; | 117 | return rc; |
| @@ -127,7 +145,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, | |||
| 127 | 145 | ||
| 128 | count = round_down(count, nvmem->word_size); | 146 | count = round_down(count, nvmem->word_size); |
| 129 | 147 | ||
| 130 | rc = regmap_raw_write(nvmem->regmap, pos, buf, count); | 148 | rc = nvmem_reg_write(nvmem, pos, buf, count); |
| 131 | 149 | ||
| 132 | if (IS_ERR_VALUE(rc)) | 150 | if (IS_ERR_VALUE(rc)) |
| 133 | return rc; | 151 | return rc; |
| @@ -421,18 +439,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) | |||
| 421 | { | 439 | { |
| 422 | struct nvmem_device *nvmem; | 440 | struct nvmem_device *nvmem; |
| 423 | struct device_node *np; | 441 | struct device_node *np; |
| 424 | struct regmap *rm; | ||
| 425 | int rval; | 442 | int rval; |
| 426 | 443 | ||
| 427 | if (!config->dev) | 444 | if (!config->dev) |
| 428 | return ERR_PTR(-EINVAL); | 445 | return ERR_PTR(-EINVAL); |
| 429 | 446 | ||
| 430 | rm = dev_get_regmap(config->dev, NULL); | ||
| 431 | if (!rm) { | ||
| 432 | dev_err(config->dev, "Regmap not found\n"); | ||
| 433 | return ERR_PTR(-EINVAL); | ||
| 434 | } | ||
| 435 | |||
| 436 | nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); | 447 | nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); |
| 437 | if (!nvmem) | 448 | if (!nvmem) |
| 438 | return ERR_PTR(-ENOMEM); | 449 | return ERR_PTR(-ENOMEM); |
| @@ -444,14 +455,16 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) | |||
| 444 | } | 455 | } |
| 445 | 456 | ||
| 446 | nvmem->id = rval; | 457 | nvmem->id = rval; |
| 447 | nvmem->regmap = rm; | ||
| 448 | nvmem->owner = config->owner; | 458 | nvmem->owner = config->owner; |
| 449 | nvmem->stride = regmap_get_reg_stride(rm); | 459 | nvmem->stride = config->stride; |
| 450 | nvmem->word_size = regmap_get_val_bytes(rm); | 460 | nvmem->word_size = config->word_size; |
| 451 | nvmem->size = regmap_get_max_register(rm) + nvmem->stride; | 461 | nvmem->size = config->size; |
| 452 | nvmem->dev.type = &nvmem_provider_type; | 462 | nvmem->dev.type = &nvmem_provider_type; |
| 453 | nvmem->dev.bus = &nvmem_bus_type; | 463 | nvmem->dev.bus = &nvmem_bus_type; |
| 454 | nvmem->dev.parent = config->dev; | 464 | nvmem->dev.parent = config->dev; |
| 465 | nvmem->priv = config->priv; | ||
| 466 | nvmem->reg_read = config->reg_read; | ||
| 467 | nvmem->reg_write = config->reg_write; | ||
| 455 | np = config->dev->of_node; | 468 | np = config->dev->of_node; |
| 456 | nvmem->dev.of_node = np; | 469 | nvmem->dev.of_node = np; |
| 457 | dev_set_name(&nvmem->dev, "%s%d", | 470 | dev_set_name(&nvmem->dev, "%s%d", |
| @@ -948,7 +961,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem, | |||
| 948 | { | 961 | { |
| 949 | int rc; | 962 | int rc; |
| 950 | 963 | ||
| 951 | rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes); | 964 | rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes); |
| 952 | 965 | ||
| 953 | if (IS_ERR_VALUE(rc)) | 966 | if (IS_ERR_VALUE(rc)) |
| 954 | return rc; | 967 | return rc; |
| @@ -977,7 +990,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) | |||
| 977 | u8 *buf; | 990 | u8 *buf; |
| 978 | int rc; | 991 | int rc; |
| 979 | 992 | ||
| 980 | if (!nvmem || !nvmem->regmap) | 993 | if (!nvmem) |
| 981 | return ERR_PTR(-EINVAL); | 994 | return ERR_PTR(-EINVAL); |
| 982 | 995 | ||
| 983 | buf = kzalloc(cell->bytes, GFP_KERNEL); | 996 | buf = kzalloc(cell->bytes, GFP_KERNEL); |
| @@ -1014,7 +1027,7 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, | |||
| 1014 | *b <<= bit_offset; | 1027 | *b <<= bit_offset; |
| 1015 | 1028 | ||
| 1016 | /* setup the first byte with lsb bits from nvmem */ | 1029 | /* setup the first byte with lsb bits from nvmem */ |
| 1017 | rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1); | 1030 | rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); |
| 1018 | *b++ |= GENMASK(bit_offset - 1, 0) & v; | 1031 | *b++ |= GENMASK(bit_offset - 1, 0) & v; |
| 1019 | 1032 | ||
| 1020 | /* setup rest of the byte if any */ | 1033 | /* setup rest of the byte if any */ |
| @@ -1031,7 +1044,7 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, | |||
| 1031 | /* if it's not end on byte boundary */ | 1044 | /* if it's not end on byte boundary */ |
| 1032 | if ((nbits + bit_offset) % BITS_PER_BYTE) { | 1045 | if ((nbits + bit_offset) % BITS_PER_BYTE) { |
| 1033 | /* setup the last byte with msb bits from nvmem */ | 1046 | /* setup the last byte with msb bits from nvmem */ |
| 1034 | rc = regmap_raw_read(nvmem->regmap, | 1047 | rc = nvmem_reg_read(nvmem, |
| 1035 | cell->offset + cell->bytes - 1, &v, 1); | 1048 | cell->offset + cell->bytes - 1, &v, 1); |
| 1036 | *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; | 1049 | *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; |
| 1037 | 1050 | ||
| @@ -1054,7 +1067,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) | |||
| 1054 | struct nvmem_device *nvmem = cell->nvmem; | 1067 | struct nvmem_device *nvmem = cell->nvmem; |
| 1055 | int rc; | 1068 | int rc; |
| 1056 | 1069 | ||
| 1057 | if (!nvmem || !nvmem->regmap || nvmem->read_only || | 1070 | if (!nvmem || nvmem->read_only || |
| 1058 | (cell->bit_offset == 0 && len != cell->bytes)) | 1071 | (cell->bit_offset == 0 && len != cell->bytes)) |
| 1059 | return -EINVAL; | 1072 | return -EINVAL; |
| 1060 | 1073 | ||
| @@ -1064,7 +1077,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) | |||
| 1064 | return PTR_ERR(buf); | 1077 | return PTR_ERR(buf); |
| 1065 | } | 1078 | } |
| 1066 | 1079 | ||
| 1067 | rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); | 1080 | rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes); |
| 1068 | 1081 | ||
| 1069 | /* free the tmp buffer */ | 1082 | /* free the tmp buffer */ |
| 1070 | if (cell->bit_offset || cell->nbits) | 1083 | if (cell->bit_offset || cell->nbits) |
| @@ -1094,7 +1107,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, | |||
| 1094 | int rc; | 1107 | int rc; |
| 1095 | ssize_t len; | 1108 | ssize_t len; |
| 1096 | 1109 | ||
| 1097 | if (!nvmem || !nvmem->regmap) | 1110 | if (!nvmem) |
| 1098 | return -EINVAL; | 1111 | return -EINVAL; |
| 1099 | 1112 | ||
| 1100 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); | 1113 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); |
| @@ -1124,7 +1137,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem, | |||
| 1124 | struct nvmem_cell cell; | 1137 | struct nvmem_cell cell; |
| 1125 | int rc; | 1138 | int rc; |
| 1126 | 1139 | ||
| 1127 | if (!nvmem || !nvmem->regmap) | 1140 | if (!nvmem) |
| 1128 | return -EINVAL; | 1141 | return -EINVAL; |
| 1129 | 1142 | ||
| 1130 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); | 1143 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); |
| @@ -1152,10 +1165,10 @@ int nvmem_device_read(struct nvmem_device *nvmem, | |||
| 1152 | { | 1165 | { |
| 1153 | int rc; | 1166 | int rc; |
| 1154 | 1167 | ||
| 1155 | if (!nvmem || !nvmem->regmap) | 1168 | if (!nvmem) |
| 1156 | return -EINVAL; | 1169 | return -EINVAL; |
| 1157 | 1170 | ||
| 1158 | rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes); | 1171 | rc = nvmem_reg_read(nvmem, offset, buf, bytes); |
| 1159 | 1172 | ||
| 1160 | if (IS_ERR_VALUE(rc)) | 1173 | if (IS_ERR_VALUE(rc)) |
| 1161 | return rc; | 1174 | return rc; |
| @@ -1180,10 +1193,10 @@ int nvmem_device_write(struct nvmem_device *nvmem, | |||
| 1180 | { | 1193 | { |
| 1181 | int rc; | 1194 | int rc; |
| 1182 | 1195 | ||
| 1183 | if (!nvmem || !nvmem->regmap) | 1196 | if (!nvmem) |
| 1184 | return -EINVAL; | 1197 | return -EINVAL; |
| 1185 | 1198 | ||
| 1186 | rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes); | 1199 | rc = nvmem_reg_write(nvmem, offset, buf, bytes); |
| 1187 | 1200 | ||
| 1188 | if (IS_ERR_VALUE(rc)) | 1201 | if (IS_ERR_VALUE(rc)) |
| 1189 | return rc; | 1202 | return rc; |
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index d7796eb5421f..75e66ef5b0ec 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <linux/of.h> | 22 | #include <linux/of.h> |
| 23 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
| 24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
| 25 | #include <linux/regmap.h> | ||
| 26 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 27 | 26 | ||
| 28 | struct ocotp_priv { | 27 | struct ocotp_priv { |
| @@ -31,59 +30,34 @@ struct ocotp_priv { | |||
| 31 | unsigned int nregs; | 30 | unsigned int nregs; |
| 32 | }; | 31 | }; |
| 33 | 32 | ||
| 34 | static int imx_ocotp_read(void *context, const void *reg, size_t reg_size, | 33 | static int imx_ocotp_read(void *context, unsigned int offset, |
| 35 | void *val, size_t val_size) | 34 | void *val, size_t bytes) |
| 36 | { | 35 | { |
| 37 | struct ocotp_priv *priv = context; | 36 | struct ocotp_priv *priv = context; |
| 38 | unsigned int offset = *(u32 *)reg; | ||
| 39 | unsigned int count; | 37 | unsigned int count; |
| 38 | u32 *buf = val; | ||
| 40 | int i; | 39 | int i; |
| 41 | u32 index; | 40 | u32 index; |
| 42 | 41 | ||
| 43 | index = offset >> 2; | 42 | index = offset >> 2; |
| 44 | count = val_size >> 2; | 43 | count = bytes >> 2; |
| 45 | 44 | ||
| 46 | if (count > (priv->nregs - index)) | 45 | if (count > (priv->nregs - index)) |
| 47 | count = priv->nregs - index; | 46 | count = priv->nregs - index; |
| 48 | 47 | ||
| 49 | for (i = index; i < (index + count); i++) { | 48 | for (i = index; i < (index + count); i++) |
| 50 | *(u32 *)val = readl(priv->base + 0x400 + i * 0x10); | 49 | *buf++ = readl(priv->base + 0x400 + i * 0x10); |
| 51 | val += 4; | ||
| 52 | } | ||
| 53 | 50 | ||
| 54 | return 0; | 51 | return 0; |
| 55 | } | 52 | } |
| 56 | 53 | ||
| 57 | static int imx_ocotp_write(void *context, const void *data, size_t count) | ||
| 58 | { | ||
| 59 | /* Not implemented */ | ||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | static struct regmap_bus imx_ocotp_bus = { | ||
| 64 | .read = imx_ocotp_read, | ||
| 65 | .write = imx_ocotp_write, | ||
| 66 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 67 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 68 | }; | ||
| 69 | |||
| 70 | static bool imx_ocotp_writeable_reg(struct device *dev, unsigned int reg) | ||
| 71 | { | ||
| 72 | return false; | ||
| 73 | } | ||
| 74 | |||
| 75 | static struct regmap_config imx_ocotp_regmap_config = { | ||
| 76 | .reg_bits = 32, | ||
| 77 | .val_bits = 32, | ||
| 78 | .reg_stride = 4, | ||
| 79 | .writeable_reg = imx_ocotp_writeable_reg, | ||
| 80 | .name = "imx-ocotp", | ||
| 81 | }; | ||
| 82 | |||
| 83 | static struct nvmem_config imx_ocotp_nvmem_config = { | 54 | static struct nvmem_config imx_ocotp_nvmem_config = { |
| 84 | .name = "imx-ocotp", | 55 | .name = "imx-ocotp", |
| 85 | .read_only = true, | 56 | .read_only = true, |
| 57 | .word_size = 4, | ||
| 58 | .stride = 4, | ||
| 86 | .owner = THIS_MODULE, | 59 | .owner = THIS_MODULE, |
| 60 | .reg_read = imx_ocotp_read, | ||
| 87 | }; | 61 | }; |
| 88 | 62 | ||
| 89 | static const struct of_device_id imx_ocotp_dt_ids[] = { | 63 | static const struct of_device_id imx_ocotp_dt_ids[] = { |
| @@ -99,7 +73,6 @@ static int imx_ocotp_probe(struct platform_device *pdev) | |||
| 99 | const struct of_device_id *of_id; | 73 | const struct of_device_id *of_id; |
| 100 | struct device *dev = &pdev->dev; | 74 | struct device *dev = &pdev->dev; |
| 101 | struct resource *res; | 75 | struct resource *res; |
| 102 | struct regmap *regmap; | ||
| 103 | struct ocotp_priv *priv; | 76 | struct ocotp_priv *priv; |
| 104 | struct nvmem_device *nvmem; | 77 | struct nvmem_device *nvmem; |
| 105 | 78 | ||
| @@ -114,15 +87,9 @@ static int imx_ocotp_probe(struct platform_device *pdev) | |||
| 114 | 87 | ||
| 115 | of_id = of_match_device(imx_ocotp_dt_ids, dev); | 88 | of_id = of_match_device(imx_ocotp_dt_ids, dev); |
| 116 | priv->nregs = (unsigned int)of_id->data; | 89 | priv->nregs = (unsigned int)of_id->data; |
| 117 | imx_ocotp_regmap_config.max_register = 4 * priv->nregs - 4; | 90 | imx_ocotp_nvmem_config.size = 4 * priv->nregs; |
| 118 | |||
| 119 | regmap = devm_regmap_init(dev, &imx_ocotp_bus, priv, | ||
| 120 | &imx_ocotp_regmap_config); | ||
| 121 | if (IS_ERR(regmap)) { | ||
| 122 | dev_err(dev, "regmap init failed\n"); | ||
| 123 | return PTR_ERR(regmap); | ||
| 124 | } | ||
| 125 | imx_ocotp_nvmem_config.dev = dev; | 91 | imx_ocotp_nvmem_config.dev = dev; |
| 92 | imx_ocotp_nvmem_config.priv = priv; | ||
| 126 | nvmem = nvmem_register(&imx_ocotp_nvmem_config); | 93 | nvmem = nvmem_register(&imx_ocotp_nvmem_config); |
| 127 | if (IS_ERR(nvmem)) | 94 | if (IS_ERR(nvmem)) |
| 128 | return PTR_ERR(nvmem); | 95 | return PTR_ERR(nvmem); |
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c index 878fce789341..c81ae4c6da74 100644 --- a/drivers/nvmem/lpc18xx_eeprom.c +++ b/drivers/nvmem/lpc18xx_eeprom.c | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <linux/nvmem-provider.h> | 17 | #include <linux/nvmem-provider.h> |
| 18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
| 19 | #include <linux/regmap.h> | ||
| 20 | #include <linux/reset.h> | 19 | #include <linux/reset.h> |
| 21 | 20 | ||
| 22 | /* Registers */ | 21 | /* Registers */ |
| @@ -51,12 +50,7 @@ struct lpc18xx_eeprom_dev { | |||
| 51 | struct nvmem_device *nvmem; | 50 | struct nvmem_device *nvmem; |
| 52 | unsigned reg_bytes; | 51 | unsigned reg_bytes; |
| 53 | unsigned val_bytes; | 52 | unsigned val_bytes; |
| 54 | }; | 53 | int size; |
| 55 | |||
| 56 | static struct regmap_config lpc18xx_regmap_config = { | ||
| 57 | .reg_bits = 32, | ||
| 58 | .reg_stride = 4, | ||
| 59 | .val_bits = 32, | ||
| 60 | }; | 54 | }; |
| 61 | 55 | ||
| 62 | static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom, | 56 | static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom, |
| @@ -95,30 +89,35 @@ static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom) | |||
| 95 | return -ETIMEDOUT; | 89 | return -ETIMEDOUT; |
| 96 | } | 90 | } |
| 97 | 91 | ||
| 98 | static int lpc18xx_eeprom_gather_write(void *context, const void *reg, | 92 | static int lpc18xx_eeprom_gather_write(void *context, unsigned int reg, |
| 99 | size_t reg_size, const void *val, | 93 | void *val, size_t bytes) |
| 100 | size_t val_size) | ||
| 101 | { | 94 | { |
| 102 | struct lpc18xx_eeprom_dev *eeprom = context; | 95 | struct lpc18xx_eeprom_dev *eeprom = context; |
| 103 | unsigned int offset = *(u32 *)reg; | 96 | unsigned int offset = reg; |
| 104 | int ret; | 97 | int ret; |
| 105 | 98 | ||
| 106 | if (offset % lpc18xx_regmap_config.reg_stride) | 99 | /* |
| 100 | * The last page contains the EEPROM initialization data and is not | ||
| 101 | * writable. | ||
| 102 | */ | ||
| 103 | if ((reg > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE) || | ||
| 104 | (reg + bytes > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE)) | ||
| 107 | return -EINVAL; | 105 | return -EINVAL; |
| 108 | 106 | ||
| 107 | |||
| 109 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | 108 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, |
| 110 | LPC18XX_EEPROM_PWRDWN_NO); | 109 | LPC18XX_EEPROM_PWRDWN_NO); |
| 111 | 110 | ||
| 112 | /* Wait 100 us while the EEPROM wakes up */ | 111 | /* Wait 100 us while the EEPROM wakes up */ |
| 113 | usleep_range(100, 200); | 112 | usleep_range(100, 200); |
| 114 | 113 | ||
| 115 | while (val_size) { | 114 | while (bytes) { |
| 116 | writel(*(u32 *)val, eeprom->mem_base + offset); | 115 | writel(*(u32 *)val, eeprom->mem_base + offset); |
| 117 | ret = lpc18xx_eeprom_busywait_until_prog(eeprom); | 116 | ret = lpc18xx_eeprom_busywait_until_prog(eeprom); |
| 118 | if (ret < 0) | 117 | if (ret < 0) |
| 119 | return ret; | 118 | return ret; |
| 120 | 119 | ||
| 121 | val_size -= eeprom->val_bytes; | 120 | bytes -= eeprom->val_bytes; |
| 122 | val += eeprom->val_bytes; | 121 | val += eeprom->val_bytes; |
| 123 | offset += eeprom->val_bytes; | 122 | offset += eeprom->val_bytes; |
| 124 | } | 123 | } |
| @@ -129,23 +128,10 @@ static int lpc18xx_eeprom_gather_write(void *context, const void *reg, | |||
| 129 | return 0; | 128 | return 0; |
| 130 | } | 129 | } |
| 131 | 130 | ||
| 132 | static int lpc18xx_eeprom_write(void *context, const void *data, size_t count) | 131 | static int lpc18xx_eeprom_read(void *context, unsigned int offset, |
| 132 | void *val, size_t bytes) | ||
| 133 | { | 133 | { |
| 134 | struct lpc18xx_eeprom_dev *eeprom = context; | 134 | struct lpc18xx_eeprom_dev *eeprom = context; |
| 135 | unsigned int offset = eeprom->reg_bytes; | ||
| 136 | |||
| 137 | if (count <= offset) | ||
| 138 | return -EINVAL; | ||
| 139 | |||
| 140 | return lpc18xx_eeprom_gather_write(context, data, eeprom->reg_bytes, | ||
| 141 | data + offset, count - offset); | ||
| 142 | } | ||
| 143 | |||
| 144 | static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size, | ||
| 145 | void *val, size_t val_size) | ||
| 146 | { | ||
| 147 | struct lpc18xx_eeprom_dev *eeprom = context; | ||
| 148 | unsigned int offset = *(u32 *)reg; | ||
| 149 | 135 | ||
| 150 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | 136 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, |
| 151 | LPC18XX_EEPROM_PWRDWN_NO); | 137 | LPC18XX_EEPROM_PWRDWN_NO); |
| @@ -153,9 +139,9 @@ static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size, | |||
| 153 | /* Wait 100 us while the EEPROM wakes up */ | 139 | /* Wait 100 us while the EEPROM wakes up */ |
| 154 | usleep_range(100, 200); | 140 | usleep_range(100, 200); |
| 155 | 141 | ||
| 156 | while (val_size) { | 142 | while (bytes) { |
| 157 | *(u32 *)val = readl(eeprom->mem_base + offset); | 143 | *(u32 *)val = readl(eeprom->mem_base + offset); |
| 158 | val_size -= eeprom->val_bytes; | 144 | bytes -= eeprom->val_bytes; |
| 159 | val += eeprom->val_bytes; | 145 | val += eeprom->val_bytes; |
| 160 | offset += eeprom->val_bytes; | 146 | offset += eeprom->val_bytes; |
| 161 | } | 147 | } |
| @@ -166,31 +152,13 @@ static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size, | |||
| 166 | return 0; | 152 | return 0; |
| 167 | } | 153 | } |
| 168 | 154 | ||
| 169 | static struct regmap_bus lpc18xx_eeprom_bus = { | ||
| 170 | .write = lpc18xx_eeprom_write, | ||
| 171 | .gather_write = lpc18xx_eeprom_gather_write, | ||
| 172 | .read = lpc18xx_eeprom_read, | ||
| 173 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 174 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 175 | }; | ||
| 176 | |||
| 177 | static bool lpc18xx_eeprom_writeable_reg(struct device *dev, unsigned int reg) | ||
| 178 | { | ||
| 179 | /* | ||
| 180 | * The last page contains the EEPROM initialization data and is not | ||
| 181 | * writable. | ||
| 182 | */ | ||
| 183 | return reg <= lpc18xx_regmap_config.max_register - | ||
| 184 | LPC18XX_EEPROM_PAGE_SIZE; | ||
| 185 | } | ||
| 186 | |||
| 187 | static bool lpc18xx_eeprom_readable_reg(struct device *dev, unsigned int reg) | ||
| 188 | { | ||
| 189 | return reg <= lpc18xx_regmap_config.max_register; | ||
| 190 | } | ||
| 191 | 155 | ||
| 192 | static struct nvmem_config lpc18xx_nvmem_config = { | 156 | static struct nvmem_config lpc18xx_nvmem_config = { |
| 193 | .name = "lpc18xx-eeprom", | 157 | .name = "lpc18xx-eeprom", |
| 158 | .stride = 4, | ||
| 159 | .word_size = 4, | ||
| 160 | .reg_read = lpc18xx_eeprom_read, | ||
| 161 | .reg_write = lpc18xx_eeprom_gather_write, | ||
| 194 | .owner = THIS_MODULE, | 162 | .owner = THIS_MODULE, |
| 195 | }; | 163 | }; |
| 196 | 164 | ||
| @@ -200,7 +168,6 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev) | |||
| 200 | struct device *dev = &pdev->dev; | 168 | struct device *dev = &pdev->dev; |
| 201 | struct reset_control *rst; | 169 | struct reset_control *rst; |
| 202 | unsigned long clk_rate; | 170 | unsigned long clk_rate; |
| 203 | struct regmap *regmap; | ||
| 204 | struct resource *res; | 171 | struct resource *res; |
| 205 | int ret; | 172 | int ret; |
| 206 | 173 | ||
| @@ -243,8 +210,8 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev) | |||
| 243 | goto err_clk; | 210 | goto err_clk; |
| 244 | } | 211 | } |
| 245 | 212 | ||
| 246 | eeprom->val_bytes = lpc18xx_regmap_config.val_bits / BITS_PER_BYTE; | 213 | eeprom->val_bytes = 4; |
| 247 | eeprom->reg_bytes = lpc18xx_regmap_config.reg_bits / BITS_PER_BYTE; | 214 | eeprom->reg_bytes = 4; |
| 248 | 215 | ||
| 249 | /* | 216 | /* |
| 250 | * Clock rate is generated by dividing the system bus clock by the | 217 | * Clock rate is generated by dividing the system bus clock by the |
| @@ -264,19 +231,10 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev) | |||
| 264 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, | 231 | lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN, |
| 265 | LPC18XX_EEPROM_PWRDWN_YES); | 232 | LPC18XX_EEPROM_PWRDWN_YES); |
| 266 | 233 | ||
| 267 | lpc18xx_regmap_config.max_register = resource_size(res) - 1; | 234 | eeprom->size = resource_size(res); |
| 268 | lpc18xx_regmap_config.writeable_reg = lpc18xx_eeprom_writeable_reg; | 235 | lpc18xx_nvmem_config.size = resource_size(res); |
| 269 | lpc18xx_regmap_config.readable_reg = lpc18xx_eeprom_readable_reg; | ||
| 270 | |||
| 271 | regmap = devm_regmap_init(dev, &lpc18xx_eeprom_bus, eeprom, | ||
| 272 | &lpc18xx_regmap_config); | ||
| 273 | if (IS_ERR(regmap)) { | ||
| 274 | dev_err(dev, "regmap init failed: %ld\n", PTR_ERR(regmap)); | ||
| 275 | ret = PTR_ERR(regmap); | ||
| 276 | goto err_clk; | ||
| 277 | } | ||
| 278 | |||
| 279 | lpc18xx_nvmem_config.dev = dev; | 236 | lpc18xx_nvmem_config.dev = dev; |
| 237 | lpc18xx_nvmem_config.priv = eeprom; | ||
| 280 | 238 | ||
| 281 | eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config); | 239 | eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config); |
| 282 | if (IS_ERR(eeprom->nvmem)) { | 240 | if (IS_ERR(eeprom->nvmem)) { |
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index 3829e5fbf8c3..b5305f08b184 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c | |||
| @@ -13,21 +13,35 @@ | |||
| 13 | 13 | ||
| 14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | #include <linux/io.h> | ||
| 16 | #include <linux/nvmem-provider.h> | 17 | #include <linux/nvmem-provider.h> |
| 17 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
| 18 | #include <linux/regmap.h> | ||
| 19 | 19 | ||
| 20 | static struct regmap_config qfprom_regmap_config = { | 20 | static int qfprom_reg_read(void *context, |
| 21 | .reg_bits = 32, | 21 | unsigned int reg, void *_val, size_t bytes) |
| 22 | .val_bits = 8, | 22 | { |
| 23 | .reg_stride = 1, | 23 | void __iomem *base = context; |
| 24 | .val_format_endian = REGMAP_ENDIAN_LITTLE, | 24 | u32 *val = _val; |
| 25 | }; | 25 | int i = 0, words = bytes / 4; |
| 26 | 26 | ||
| 27 | static struct nvmem_config econfig = { | 27 | while (words--) |
| 28 | .name = "qfprom", | 28 | *val++ = readl(base + reg + (i++ * 4)); |
| 29 | .owner = THIS_MODULE, | 29 | |
| 30 | }; | 30 | return 0; |
| 31 | } | ||
| 32 | |||
| 33 | static int qfprom_reg_write(void *context, | ||
| 34 | unsigned int reg, void *_val, size_t bytes) | ||
| 35 | { | ||
| 36 | void __iomem *base = context; | ||
| 37 | u32 *val = _val; | ||
| 38 | int i = 0, words = bytes / 4; | ||
| 39 | |||
| 40 | while (words--) | ||
| 41 | writel(*val++, base + reg + (i++ * 4)); | ||
| 42 | |||
| 43 | return 0; | ||
| 44 | } | ||
| 31 | 45 | ||
| 32 | static int qfprom_remove(struct platform_device *pdev) | 46 | static int qfprom_remove(struct platform_device *pdev) |
| 33 | { | 47 | { |
| @@ -36,12 +50,20 @@ static int qfprom_remove(struct platform_device *pdev) | |||
| 36 | return nvmem_unregister(nvmem); | 50 | return nvmem_unregister(nvmem); |
| 37 | } | 51 | } |
| 38 | 52 | ||
| 53 | static struct nvmem_config econfig = { | ||
| 54 | .name = "qfprom", | ||
| 55 | .owner = THIS_MODULE, | ||
| 56 | .stride = 4, | ||
| 57 | .word_size = 1, | ||
| 58 | .reg_read = qfprom_reg_read, | ||
| 59 | .reg_write = qfprom_reg_write, | ||
| 60 | }; | ||
| 61 | |||
| 39 | static int qfprom_probe(struct platform_device *pdev) | 62 | static int qfprom_probe(struct platform_device *pdev) |
| 40 | { | 63 | { |
| 41 | struct device *dev = &pdev->dev; | 64 | struct device *dev = &pdev->dev; |
| 42 | struct resource *res; | 65 | struct resource *res; |
| 43 | struct nvmem_device *nvmem; | 66 | struct nvmem_device *nvmem; |
| 44 | struct regmap *regmap; | ||
| 45 | void __iomem *base; | 67 | void __iomem *base; |
| 46 | 68 | ||
| 47 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 69 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| @@ -49,14 +71,10 @@ static int qfprom_probe(struct platform_device *pdev) | |||
| 49 | if (IS_ERR(base)) | 71 | if (IS_ERR(base)) |
| 50 | return PTR_ERR(base); | 72 | return PTR_ERR(base); |
| 51 | 73 | ||
| 52 | qfprom_regmap_config.max_register = resource_size(res) - 1; | 74 | econfig.size = resource_size(res); |
| 53 | |||
| 54 | regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config); | ||
| 55 | if (IS_ERR(regmap)) { | ||
| 56 | dev_err(dev, "regmap init failed\n"); | ||
| 57 | return PTR_ERR(regmap); | ||
| 58 | } | ||
| 59 | econfig.dev = dev; | 75 | econfig.dev = dev; |
| 76 | econfig.priv = base; | ||
| 77 | |||
| 60 | nvmem = nvmem_register(&econfig); | 78 | nvmem = nvmem_register(&econfig); |
| 61 | if (IS_ERR(nvmem)) | 79 | if (IS_ERR(nvmem)) |
| 62 | return PTR_ERR(nvmem); | 80 | return PTR_ERR(nvmem); |
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c index a009795111e9..4d3f391f0a0b 100644 --- a/drivers/nvmem/rockchip-efuse.c +++ b/drivers/nvmem/rockchip-efuse.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
| 25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/regmap.h> | ||
| 27 | 26 | ||
| 28 | #define EFUSE_A_SHIFT 6 | 27 | #define EFUSE_A_SHIFT 6 |
| 29 | #define EFUSE_A_MASK 0x3ff | 28 | #define EFUSE_A_MASK 0x3ff |
| @@ -41,17 +40,9 @@ struct rockchip_efuse_chip { | |||
| 41 | struct clk *clk; | 40 | struct clk *clk; |
| 42 | }; | 41 | }; |
| 43 | 42 | ||
| 44 | static int rockchip_efuse_write(void *context, const void *data, size_t count) | 43 | static int rockchip_efuse_read(void *context, unsigned int offset, |
| 44 | void *val, size_t bytes) | ||
| 45 | { | 45 | { |
| 46 | /* Nothing TBD, Read-Only */ | ||
| 47 | return 0; | ||
| 48 | } | ||
| 49 | |||
| 50 | static int rockchip_efuse_read(void *context, | ||
| 51 | const void *reg, size_t reg_size, | ||
| 52 | void *val, size_t val_size) | ||
| 53 | { | ||
| 54 | unsigned int offset = *(u32 *)reg; | ||
| 55 | struct rockchip_efuse_chip *efuse = context; | 46 | struct rockchip_efuse_chip *efuse = context; |
| 56 | u8 *buf = val; | 47 | u8 *buf = val; |
| 57 | int ret; | 48 | int ret; |
| @@ -64,12 +55,12 @@ static int rockchip_efuse_read(void *context, | |||
| 64 | 55 | ||
| 65 | writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL); | 56 | writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL); |
| 66 | udelay(1); | 57 | udelay(1); |
| 67 | while (val_size) { | 58 | while (bytes--) { |
| 68 | writel(readl(efuse->base + REG_EFUSE_CTRL) & | 59 | writel(readl(efuse->base + REG_EFUSE_CTRL) & |
| 69 | (~(EFUSE_A_MASK << EFUSE_A_SHIFT)), | 60 | (~(EFUSE_A_MASK << EFUSE_A_SHIFT)), |
| 70 | efuse->base + REG_EFUSE_CTRL); | 61 | efuse->base + REG_EFUSE_CTRL); |
| 71 | writel(readl(efuse->base + REG_EFUSE_CTRL) | | 62 | writel(readl(efuse->base + REG_EFUSE_CTRL) | |
| 72 | ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT), | 63 | ((offset++ & EFUSE_A_MASK) << EFUSE_A_SHIFT), |
| 73 | efuse->base + REG_EFUSE_CTRL); | 64 | efuse->base + REG_EFUSE_CTRL); |
| 74 | udelay(1); | 65 | udelay(1); |
| 75 | writel(readl(efuse->base + REG_EFUSE_CTRL) | | 66 | writel(readl(efuse->base + REG_EFUSE_CTRL) | |
| @@ -79,9 +70,6 @@ static int rockchip_efuse_read(void *context, | |||
| 79 | writel(readl(efuse->base + REG_EFUSE_CTRL) & | 70 | writel(readl(efuse->base + REG_EFUSE_CTRL) & |
| 80 | (~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL); | 71 | (~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL); |
| 81 | udelay(1); | 72 | udelay(1); |
| 82 | |||
| 83 | val_size -= 1; | ||
| 84 | offset += 1; | ||
| 85 | } | 73 | } |
| 86 | 74 | ||
| 87 | /* Switch to standby mode */ | 75 | /* Switch to standby mode */ |
| @@ -92,22 +80,11 @@ static int rockchip_efuse_read(void *context, | |||
| 92 | return 0; | 80 | return 0; |
| 93 | } | 81 | } |
| 94 | 82 | ||
| 95 | static struct regmap_bus rockchip_efuse_bus = { | ||
| 96 | .read = rockchip_efuse_read, | ||
| 97 | .write = rockchip_efuse_write, | ||
| 98 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 99 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 100 | }; | ||
| 101 | |||
| 102 | static struct regmap_config rockchip_efuse_regmap_config = { | ||
| 103 | .reg_bits = 32, | ||
| 104 | .reg_stride = 1, | ||
| 105 | .val_bits = 8, | ||
| 106 | }; | ||
| 107 | |||
| 108 | static struct nvmem_config econfig = { | 83 | static struct nvmem_config econfig = { |
| 109 | .name = "rockchip-efuse", | 84 | .name = "rockchip-efuse", |
| 110 | .owner = THIS_MODULE, | 85 | .owner = THIS_MODULE, |
| 86 | .stride = 1, | ||
| 87 | .word_size = 1, | ||
| 111 | .read_only = true, | 88 | .read_only = true, |
| 112 | }; | 89 | }; |
| 113 | 90 | ||
| @@ -121,7 +98,6 @@ static int rockchip_efuse_probe(struct platform_device *pdev) | |||
| 121 | { | 98 | { |
| 122 | struct resource *res; | 99 | struct resource *res; |
| 123 | struct nvmem_device *nvmem; | 100 | struct nvmem_device *nvmem; |
| 124 | struct regmap *regmap; | ||
| 125 | struct rockchip_efuse_chip *efuse; | 101 | struct rockchip_efuse_chip *efuse; |
| 126 | 102 | ||
| 127 | efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip), | 103 | efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip), |
| @@ -139,16 +115,9 @@ static int rockchip_efuse_probe(struct platform_device *pdev) | |||
| 139 | return PTR_ERR(efuse->clk); | 115 | return PTR_ERR(efuse->clk); |
| 140 | 116 | ||
| 141 | efuse->dev = &pdev->dev; | 117 | efuse->dev = &pdev->dev; |
| 142 | 118 | econfig.size = resource_size(res); | |
| 143 | rockchip_efuse_regmap_config.max_register = resource_size(res) - 1; | 119 | econfig.reg_read = rockchip_efuse_read; |
| 144 | 120 | econfig.priv = efuse; | |
| 145 | regmap = devm_regmap_init(efuse->dev, &rockchip_efuse_bus, | ||
| 146 | efuse, &rockchip_efuse_regmap_config); | ||
| 147 | if (IS_ERR(regmap)) { | ||
| 148 | dev_err(efuse->dev, "regmap init failed\n"); | ||
| 149 | return PTR_ERR(regmap); | ||
| 150 | } | ||
| 151 | |||
| 152 | econfig.dev = efuse->dev; | 121 | econfig.dev = efuse->dev; |
| 153 | nvmem = nvmem_register(&econfig); | 122 | nvmem = nvmem_register(&econfig); |
| 154 | if (IS_ERR(nvmem)) | 123 | if (IS_ERR(nvmem)) |
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index bc88b4084055..1567ccca8de3 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c | |||
| @@ -21,13 +21,14 @@ | |||
| 21 | #include <linux/nvmem-provider.h> | 21 | #include <linux/nvmem-provider.h> |
| 22 | #include <linux/of.h> | 22 | #include <linux/of.h> |
| 23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
| 24 | #include <linux/regmap.h> | ||
| 25 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 26 | #include <linux/random.h> | 25 | #include <linux/random.h> |
| 27 | 26 | ||
| 28 | static struct nvmem_config econfig = { | 27 | static struct nvmem_config econfig = { |
| 29 | .name = "sunxi-sid", | 28 | .name = "sunxi-sid", |
| 30 | .read_only = true, | 29 | .read_only = true, |
| 30 | .stride = 4, | ||
| 31 | .word_size = 1, | ||
| 31 | .owner = THIS_MODULE, | 32 | .owner = THIS_MODULE, |
| 32 | }; | 33 | }; |
| 33 | 34 | ||
| @@ -51,54 +52,23 @@ static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid, | |||
| 51 | return sid_key; /* Only return the last byte */ | 52 | return sid_key; /* Only return the last byte */ |
| 52 | } | 53 | } |
| 53 | 54 | ||
| 54 | static int sunxi_sid_read(void *context, | 55 | static int sunxi_sid_read(void *context, unsigned int offset, |
| 55 | const void *reg, size_t reg_size, | 56 | void *val, size_t bytes) |
| 56 | void *val, size_t val_size) | ||
| 57 | { | 57 | { |
| 58 | struct sunxi_sid *sid = context; | 58 | struct sunxi_sid *sid = context; |
| 59 | unsigned int offset = *(u32 *)reg; | ||
| 60 | u8 *buf = val; | 59 | u8 *buf = val; |
| 61 | 60 | ||
| 62 | while (val_size) { | 61 | while (bytes--) |
| 63 | *buf++ = sunxi_sid_read_byte(sid, offset); | 62 | *buf++ = sunxi_sid_read_byte(sid, offset++); |
| 64 | val_size--; | ||
| 65 | offset++; | ||
| 66 | } | ||
| 67 | |||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | 63 | ||
| 71 | static int sunxi_sid_write(void *context, const void *data, size_t count) | ||
| 72 | { | ||
| 73 | /* Unimplemented, dummy to keep regmap core happy */ | ||
| 74 | return 0; | 64 | return 0; |
| 75 | } | 65 | } |
| 76 | 66 | ||
| 77 | static struct regmap_bus sunxi_sid_bus = { | ||
| 78 | .read = sunxi_sid_read, | ||
| 79 | .write = sunxi_sid_write, | ||
| 80 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 81 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 82 | }; | ||
| 83 | |||
| 84 | static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg) | ||
| 85 | { | ||
| 86 | return false; | ||
| 87 | } | ||
| 88 | |||
| 89 | static struct regmap_config sunxi_sid_regmap_config = { | ||
| 90 | .reg_bits = 32, | ||
| 91 | .val_bits = 8, | ||
| 92 | .reg_stride = 1, | ||
| 93 | .writeable_reg = sunxi_sid_writeable_reg, | ||
| 94 | }; | ||
| 95 | |||
| 96 | static int sunxi_sid_probe(struct platform_device *pdev) | 67 | static int sunxi_sid_probe(struct platform_device *pdev) |
| 97 | { | 68 | { |
| 98 | struct device *dev = &pdev->dev; | 69 | struct device *dev = &pdev->dev; |
| 99 | struct resource *res; | 70 | struct resource *res; |
| 100 | struct nvmem_device *nvmem; | 71 | struct nvmem_device *nvmem; |
| 101 | struct regmap *regmap; | ||
| 102 | struct sunxi_sid *sid; | 72 | struct sunxi_sid *sid; |
| 103 | int ret, i, size; | 73 | int ret, i, size; |
| 104 | char *randomness; | 74 | char *randomness; |
| @@ -113,16 +83,10 @@ static int sunxi_sid_probe(struct platform_device *pdev) | |||
| 113 | return PTR_ERR(sid->base); | 83 | return PTR_ERR(sid->base); |
| 114 | 84 | ||
| 115 | size = resource_size(res) - 1; | 85 | size = resource_size(res) - 1; |
| 116 | sunxi_sid_regmap_config.max_register = size; | 86 | econfig.size = resource_size(res); |
| 117 | |||
| 118 | regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid, | ||
| 119 | &sunxi_sid_regmap_config); | ||
| 120 | if (IS_ERR(regmap)) { | ||
| 121 | dev_err(dev, "regmap init failed\n"); | ||
| 122 | return PTR_ERR(regmap); | ||
| 123 | } | ||
| 124 | |||
| 125 | econfig.dev = dev; | 87 | econfig.dev = dev; |
| 88 | econfig.reg_read = sunxi_sid_read; | ||
| 89 | econfig.priv = sid; | ||
| 126 | nvmem = nvmem_register(&econfig); | 90 | nvmem = nvmem_register(&econfig); |
| 127 | if (IS_ERR(nvmem)) | 91 | if (IS_ERR(nvmem)) |
| 128 | return PTR_ERR(nvmem); | 92 | return PTR_ERR(nvmem); |
diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c index 8641319efeda..72e4faabce29 100644 --- a/drivers/nvmem/vf610-ocotp.c +++ b/drivers/nvmem/vf610-ocotp.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | #include <linux/nvmem-provider.h> | 25 | #include <linux/nvmem-provider.h> |
| 26 | #include <linux/of.h> | 26 | #include <linux/of.h> |
| 27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 28 | #include <linux/regmap.h> | ||
| 29 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 30 | 29 | ||
| 31 | /* OCOTP Register Offsets */ | 30 | /* OCOTP Register Offsets */ |
| @@ -152,23 +151,16 @@ static int vf610_get_fuse_address(int base_addr_offset) | |||
| 152 | return -EINVAL; | 151 | return -EINVAL; |
| 153 | } | 152 | } |
| 154 | 153 | ||
| 155 | static int vf610_ocotp_write(void *context, const void *data, size_t count) | 154 | static int vf610_ocotp_read(void *context, unsigned int offset, |
| 156 | { | 155 | void *val, size_t bytes) |
| 157 | return 0; | ||
| 158 | } | ||
| 159 | |||
| 160 | static int vf610_ocotp_read(void *context, | ||
| 161 | const void *off, size_t reg_size, | ||
| 162 | void *val, size_t val_size) | ||
| 163 | { | 156 | { |
| 164 | struct vf610_ocotp *ocotp = context; | 157 | struct vf610_ocotp *ocotp = context; |
| 165 | void __iomem *base = ocotp->base; | 158 | void __iomem *base = ocotp->base; |
| 166 | unsigned int offset = *(u32 *)off; | ||
| 167 | u32 reg, *buf = val; | 159 | u32 reg, *buf = val; |
| 168 | int fuse_addr; | 160 | int fuse_addr; |
| 169 | int ret; | 161 | int ret; |
| 170 | 162 | ||
| 171 | while (val_size > 0) { | 163 | while (bytes > 0) { |
| 172 | fuse_addr = vf610_get_fuse_address(offset); | 164 | fuse_addr = vf610_get_fuse_address(offset); |
| 173 | if (fuse_addr > 0) { | 165 | if (fuse_addr > 0) { |
| 174 | writel(ocotp->timing, base + OCOTP_TIMING); | 166 | writel(ocotp->timing, base + OCOTP_TIMING); |
| @@ -205,29 +197,19 @@ static int vf610_ocotp_read(void *context, | |||
| 205 | } | 197 | } |
| 206 | 198 | ||
| 207 | buf++; | 199 | buf++; |
| 208 | val_size--; | 200 | bytes -= 4; |
| 209 | offset += reg_size; | 201 | offset += 4; |
| 210 | } | 202 | } |
| 211 | 203 | ||
| 212 | return 0; | 204 | return 0; |
| 213 | } | 205 | } |
| 214 | 206 | ||
| 215 | static struct regmap_bus vf610_ocotp_bus = { | ||
| 216 | .read = vf610_ocotp_read, | ||
| 217 | .write = vf610_ocotp_write, | ||
| 218 | .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 219 | .val_format_endian_default = REGMAP_ENDIAN_NATIVE, | ||
| 220 | }; | ||
| 221 | |||
| 222 | static struct regmap_config ocotp_regmap_config = { | ||
| 223 | .reg_bits = 32, | ||
| 224 | .val_bits = 32, | ||
| 225 | .reg_stride = 4, | ||
| 226 | }; | ||
| 227 | |||
| 228 | static struct nvmem_config ocotp_config = { | 207 | static struct nvmem_config ocotp_config = { |
| 229 | .name = "ocotp", | 208 | .name = "ocotp", |
| 230 | .owner = THIS_MODULE, | 209 | .owner = THIS_MODULE, |
| 210 | .stride = 4, | ||
| 211 | .word_size = 4, | ||
| 212 | .reg_read = vf610_ocotp_read, | ||
| 231 | }; | 213 | }; |
| 232 | 214 | ||
| 233 | static const struct of_device_id ocotp_of_match[] = { | 215 | static const struct of_device_id ocotp_of_match[] = { |
| @@ -247,7 +229,6 @@ static int vf610_ocotp_probe(struct platform_device *pdev) | |||
| 247 | { | 229 | { |
| 248 | struct device *dev = &pdev->dev; | 230 | struct device *dev = &pdev->dev; |
| 249 | struct resource *res; | 231 | struct resource *res; |
| 250 | struct regmap *regmap; | ||
| 251 | struct vf610_ocotp *ocotp_dev; | 232 | struct vf610_ocotp *ocotp_dev; |
| 252 | 233 | ||
| 253 | ocotp_dev = devm_kzalloc(&pdev->dev, | 234 | ocotp_dev = devm_kzalloc(&pdev->dev, |
| @@ -267,13 +248,8 @@ static int vf610_ocotp_probe(struct platform_device *pdev) | |||
| 267 | return PTR_ERR(ocotp_dev->clk); | 248 | return PTR_ERR(ocotp_dev->clk); |
| 268 | } | 249 | } |
| 269 | 250 | ||
| 270 | ocotp_regmap_config.max_register = resource_size(res); | 251 | ocotp_config.size = resource_size(res); |
| 271 | regmap = devm_regmap_init(dev, | 252 | ocotp_config.priv = ocotp_dev; |
| 272 | &vf610_ocotp_bus, ocotp_dev, &ocotp_regmap_config); | ||
| 273 | if (IS_ERR(regmap)) { | ||
| 274 | dev_err(dev, "regmap init failed\n"); | ||
| 275 | return PTR_ERR(regmap); | ||
| 276 | } | ||
| 277 | ocotp_config.dev = dev; | 253 | ocotp_config.dev = dev; |
| 278 | 254 | ||
| 279 | ocotp_dev->nvmem = nvmem_register(&ocotp_config); | 255 | ocotp_dev->nvmem = nvmem_register(&ocotp_config); |
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index c776333a68bc..74ed3e459a3e 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c | |||
| @@ -617,5 +617,5 @@ static void __exit parport_default_proc_unregister (void) | |||
| 617 | } | 617 | } |
| 618 | #endif | 618 | #endif |
| 619 | 619 | ||
| 620 | module_init(parport_default_proc_register) | 620 | subsys_initcall(parport_default_proc_register) |
| 621 | module_exit(parport_default_proc_unregister) | 621 | module_exit(parport_default_proc_unregister) |
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 58f7eeb79bb4..7e9b2de2aa24 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c | |||
| @@ -1809,14 +1809,14 @@ static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus) | |||
| 1809 | 1809 | ||
| 1810 | if (hbus->low_mmio_space && hbus->low_mmio_res) { | 1810 | if (hbus->low_mmio_space && hbus->low_mmio_res) { |
| 1811 | hbus->low_mmio_res->flags |= IORESOURCE_BUSY; | 1811 | hbus->low_mmio_res->flags |= IORESOURCE_BUSY; |
| 1812 | release_mem_region(hbus->low_mmio_res->start, | 1812 | vmbus_free_mmio(hbus->low_mmio_res->start, |
| 1813 | resource_size(hbus->low_mmio_res)); | 1813 | resource_size(hbus->low_mmio_res)); |
| 1814 | } | 1814 | } |
| 1815 | 1815 | ||
| 1816 | if (hbus->high_mmio_space && hbus->high_mmio_res) { | 1816 | if (hbus->high_mmio_space && hbus->high_mmio_res) { |
| 1817 | hbus->high_mmio_res->flags |= IORESOURCE_BUSY; | 1817 | hbus->high_mmio_res->flags |= IORESOURCE_BUSY; |
| 1818 | release_mem_region(hbus->high_mmio_res->start, | 1818 | vmbus_free_mmio(hbus->high_mmio_res->start, |
| 1819 | resource_size(hbus->high_mmio_res)); | 1819 | resource_size(hbus->high_mmio_res)); |
| 1820 | } | 1820 | } |
| 1821 | } | 1821 | } |
| 1822 | 1822 | ||
| @@ -1894,8 +1894,8 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus) | |||
| 1894 | 1894 | ||
| 1895 | release_low_mmio: | 1895 | release_low_mmio: |
| 1896 | if (hbus->low_mmio_res) { | 1896 | if (hbus->low_mmio_res) { |
| 1897 | release_mem_region(hbus->low_mmio_res->start, | 1897 | vmbus_free_mmio(hbus->low_mmio_res->start, |
| 1898 | resource_size(hbus->low_mmio_res)); | 1898 | resource_size(hbus->low_mmio_res)); |
| 1899 | } | 1899 | } |
| 1900 | 1900 | ||
| 1901 | return ret; | 1901 | return ret; |
| @@ -1938,7 +1938,7 @@ static int hv_allocate_config_window(struct hv_pcibus_device *hbus) | |||
| 1938 | 1938 | ||
| 1939 | static void hv_free_config_window(struct hv_pcibus_device *hbus) | 1939 | static void hv_free_config_window(struct hv_pcibus_device *hbus) |
| 1940 | { | 1940 | { |
| 1941 | release_mem_region(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); | 1941 | vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); |
| 1942 | } | 1942 | } |
| 1943 | 1943 | ||
| 1944 | /** | 1944 | /** |
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c index 6b3da1bb0d63..2b9b0941d9eb 100644 --- a/drivers/spmi/spmi.c +++ b/drivers/spmi/spmi.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #define CREATE_TRACE_POINTS | 25 | #define CREATE_TRACE_POINTS |
| 26 | #include <trace/events/spmi.h> | 26 | #include <trace/events/spmi.h> |
| 27 | 27 | ||
| 28 | static bool is_registered; | ||
| 28 | static DEFINE_IDA(ctrl_ida); | 29 | static DEFINE_IDA(ctrl_ida); |
| 29 | 30 | ||
| 30 | static void spmi_dev_release(struct device *dev) | 31 | static void spmi_dev_release(struct device *dev) |
| @@ -507,7 +508,7 @@ int spmi_controller_add(struct spmi_controller *ctrl) | |||
| 507 | int ret; | 508 | int ret; |
| 508 | 509 | ||
| 509 | /* Can't register until after driver model init */ | 510 | /* Can't register until after driver model init */ |
| 510 | if (WARN_ON(!spmi_bus_type.p)) | 511 | if (WARN_ON(!is_registered)) |
| 511 | return -EAGAIN; | 512 | return -EAGAIN; |
| 512 | 513 | ||
| 513 | ret = device_add(&ctrl->dev); | 514 | ret = device_add(&ctrl->dev); |
| @@ -576,7 +577,14 @@ module_exit(spmi_exit); | |||
| 576 | 577 | ||
| 577 | static int __init spmi_init(void) | 578 | static int __init spmi_init(void) |
| 578 | { | 579 | { |
| 579 | return bus_register(&spmi_bus_type); | 580 | int ret; |
| 581 | |||
| 582 | ret = bus_register(&spmi_bus_type); | ||
| 583 | if (ret) | ||
| 584 | return ret; | ||
| 585 | |||
| 586 | is_registered = true; | ||
| 587 | return 0; | ||
| 580 | } | 588 | } |
| 581 | postcore_initcall(spmi_init); | 589 | postcore_initcall(spmi_init); |
| 582 | 590 | ||
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index bcc1fc027311..fba021f5736a 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
| @@ -271,12 +271,16 @@ static int uio_dev_add_attributes(struct uio_device *idev) | |||
| 271 | map_found = 1; | 271 | map_found = 1; |
| 272 | idev->map_dir = kobject_create_and_add("maps", | 272 | idev->map_dir = kobject_create_and_add("maps", |
| 273 | &idev->dev->kobj); | 273 | &idev->dev->kobj); |
| 274 | if (!idev->map_dir) | 274 | if (!idev->map_dir) { |
| 275 | ret = -ENOMEM; | ||
| 275 | goto err_map; | 276 | goto err_map; |
| 277 | } | ||
| 276 | } | 278 | } |
| 277 | map = kzalloc(sizeof(*map), GFP_KERNEL); | 279 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
| 278 | if (!map) | 280 | if (!map) { |
| 281 | ret = -ENOMEM; | ||
| 279 | goto err_map_kobj; | 282 | goto err_map_kobj; |
| 283 | } | ||
| 280 | kobject_init(&map->kobj, &map_attr_type); | 284 | kobject_init(&map->kobj, &map_attr_type); |
| 281 | map->mem = mem; | 285 | map->mem = mem; |
| 282 | mem->map = map; | 286 | mem->map = map; |
| @@ -296,12 +300,16 @@ static int uio_dev_add_attributes(struct uio_device *idev) | |||
| 296 | portio_found = 1; | 300 | portio_found = 1; |
| 297 | idev->portio_dir = kobject_create_and_add("portio", | 301 | idev->portio_dir = kobject_create_and_add("portio", |
| 298 | &idev->dev->kobj); | 302 | &idev->dev->kobj); |
| 299 | if (!idev->portio_dir) | 303 | if (!idev->portio_dir) { |
| 304 | ret = -ENOMEM; | ||
| 300 | goto err_portio; | 305 | goto err_portio; |
| 306 | } | ||
| 301 | } | 307 | } |
| 302 | portio = kzalloc(sizeof(*portio), GFP_KERNEL); | 308 | portio = kzalloc(sizeof(*portio), GFP_KERNEL); |
| 303 | if (!portio) | 309 | if (!portio) { |
| 310 | ret = -ENOMEM; | ||
| 304 | goto err_portio_kobj; | 311 | goto err_portio_kobj; |
| 312 | } | ||
| 305 | kobject_init(&portio->kobj, &portio_attr_type); | 313 | kobject_init(&portio->kobj, &portio_attr_type); |
| 306 | portio->port = port; | 314 | portio->port = port; |
| 307 | port->portio = portio; | 315 | port->portio = portio; |
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index e2451bdb4525..2fd49b2358f8 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c | |||
| @@ -743,7 +743,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) | |||
| 743 | err3: | 743 | err3: |
| 744 | iounmap(fb_virt); | 744 | iounmap(fb_virt); |
| 745 | err2: | 745 | err2: |
| 746 | release_mem_region(par->mem->start, screen_fb_size); | 746 | vmbus_free_mmio(par->mem->start, screen_fb_size); |
| 747 | par->mem = NULL; | 747 | par->mem = NULL; |
| 748 | err1: | 748 | err1: |
| 749 | if (!gen2vm) | 749 | if (!gen2vm) |
| @@ -758,7 +758,7 @@ static void hvfb_putmem(struct fb_info *info) | |||
| 758 | struct hvfb_par *par = info->par; | 758 | struct hvfb_par *par = info->par; |
| 759 | 759 | ||
| 760 | iounmap(info->screen_base); | 760 | iounmap(info->screen_base); |
| 761 | release_mem_region(par->mem->start, screen_fb_size); | 761 | vmbus_free_mmio(par->mem->start, screen_fb_size); |
| 762 | par->mem = NULL; | 762 | par->mem = NULL; |
| 763 | } | 763 | } |
| 764 | 764 | ||
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index 5fbeab38889e..9f2c834e43e0 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c | |||
| @@ -204,10 +204,6 @@ static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge) | |||
| 204 | /* Need pdev */ | 204 | /* Need pdev */ |
| 205 | pdev = to_pci_dev(ca91cx42_bridge->parent); | 205 | pdev = to_pci_dev(ca91cx42_bridge->parent); |
| 206 | 206 | ||
| 207 | INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers); | ||
| 208 | |||
| 209 | mutex_init(&ca91cx42_bridge->irq_mtx); | ||
| 210 | |||
| 211 | /* Disable interrupts from PCI to VME */ | 207 | /* Disable interrupts from PCI to VME */ |
| 212 | iowrite32(0, bridge->base + VINT_EN); | 208 | iowrite32(0, bridge->base + VINT_EN); |
| 213 | 209 | ||
| @@ -1626,6 +1622,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1626 | retval = -ENOMEM; | 1622 | retval = -ENOMEM; |
| 1627 | goto err_struct; | 1623 | goto err_struct; |
| 1628 | } | 1624 | } |
| 1625 | vme_init_bridge(ca91cx42_bridge); | ||
| 1629 | 1626 | ||
| 1630 | ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL); | 1627 | ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL); |
| 1631 | 1628 | ||
| @@ -1686,7 +1683,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1686 | } | 1683 | } |
| 1687 | 1684 | ||
| 1688 | /* Add master windows to list */ | 1685 | /* Add master windows to list */ |
| 1689 | INIT_LIST_HEAD(&ca91cx42_bridge->master_resources); | ||
| 1690 | for (i = 0; i < CA91C142_MAX_MASTER; i++) { | 1686 | for (i = 0; i < CA91C142_MAX_MASTER; i++) { |
| 1691 | master_image = kmalloc(sizeof(struct vme_master_resource), | 1687 | master_image = kmalloc(sizeof(struct vme_master_resource), |
| 1692 | GFP_KERNEL); | 1688 | GFP_KERNEL); |
| @@ -1713,7 +1709,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1713 | } | 1709 | } |
| 1714 | 1710 | ||
| 1715 | /* Add slave windows to list */ | 1711 | /* Add slave windows to list */ |
| 1716 | INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources); | ||
| 1717 | for (i = 0; i < CA91C142_MAX_SLAVE; i++) { | 1712 | for (i = 0; i < CA91C142_MAX_SLAVE; i++) { |
| 1718 | slave_image = kmalloc(sizeof(struct vme_slave_resource), | 1713 | slave_image = kmalloc(sizeof(struct vme_slave_resource), |
| 1719 | GFP_KERNEL); | 1714 | GFP_KERNEL); |
| @@ -1741,7 +1736,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1741 | } | 1736 | } |
| 1742 | 1737 | ||
| 1743 | /* Add dma engines to list */ | 1738 | /* Add dma engines to list */ |
| 1744 | INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources); | ||
| 1745 | for (i = 0; i < CA91C142_MAX_DMA; i++) { | 1739 | for (i = 0; i < CA91C142_MAX_DMA; i++) { |
| 1746 | dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource), | 1740 | dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource), |
| 1747 | GFP_KERNEL); | 1741 | GFP_KERNEL); |
| @@ -1764,7 +1758,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1764 | } | 1758 | } |
| 1765 | 1759 | ||
| 1766 | /* Add location monitor to list */ | 1760 | /* Add location monitor to list */ |
| 1767 | INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources); | ||
| 1768 | lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL); | 1761 | lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL); |
| 1769 | if (lm == NULL) { | 1762 | if (lm == NULL) { |
| 1770 | dev_err(&pdev->dev, "Failed to allocate memory for " | 1763 | dev_err(&pdev->dev, "Failed to allocate memory for " |
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 60524834dba3..4bc5d451ec6c 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c | |||
| @@ -314,10 +314,6 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge) | |||
| 314 | 314 | ||
| 315 | bridge = tsi148_bridge->driver_priv; | 315 | bridge = tsi148_bridge->driver_priv; |
| 316 | 316 | ||
| 317 | INIT_LIST_HEAD(&tsi148_bridge->vme_error_handlers); | ||
| 318 | |||
| 319 | mutex_init(&tsi148_bridge->irq_mtx); | ||
| 320 | |||
| 321 | result = request_irq(pdev->irq, | 317 | result = request_irq(pdev->irq, |
| 322 | tsi148_irqhandler, | 318 | tsi148_irqhandler, |
| 323 | IRQF_SHARED, | 319 | IRQF_SHARED, |
| @@ -2301,6 +2297,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2301 | retval = -ENOMEM; | 2297 | retval = -ENOMEM; |
| 2302 | goto err_struct; | 2298 | goto err_struct; |
| 2303 | } | 2299 | } |
| 2300 | vme_init_bridge(tsi148_bridge); | ||
| 2304 | 2301 | ||
| 2305 | tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL); | 2302 | tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL); |
| 2306 | if (tsi148_device == NULL) { | 2303 | if (tsi148_device == NULL) { |
| @@ -2387,7 +2384,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2387 | } | 2384 | } |
| 2388 | 2385 | ||
| 2389 | /* Add master windows to list */ | 2386 | /* Add master windows to list */ |
| 2390 | INIT_LIST_HEAD(&tsi148_bridge->master_resources); | ||
| 2391 | for (i = 0; i < master_num; i++) { | 2387 | for (i = 0; i < master_num; i++) { |
| 2392 | master_image = kmalloc(sizeof(struct vme_master_resource), | 2388 | master_image = kmalloc(sizeof(struct vme_master_resource), |
| 2393 | GFP_KERNEL); | 2389 | GFP_KERNEL); |
| @@ -2417,7 +2413,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2417 | } | 2413 | } |
| 2418 | 2414 | ||
| 2419 | /* Add slave windows to list */ | 2415 | /* Add slave windows to list */ |
| 2420 | INIT_LIST_HEAD(&tsi148_bridge->slave_resources); | ||
| 2421 | for (i = 0; i < TSI148_MAX_SLAVE; i++) { | 2416 | for (i = 0; i < TSI148_MAX_SLAVE; i++) { |
| 2422 | slave_image = kmalloc(sizeof(struct vme_slave_resource), | 2417 | slave_image = kmalloc(sizeof(struct vme_slave_resource), |
| 2423 | GFP_KERNEL); | 2418 | GFP_KERNEL); |
| @@ -2442,7 +2437,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2442 | } | 2437 | } |
| 2443 | 2438 | ||
| 2444 | /* Add dma engines to list */ | 2439 | /* Add dma engines to list */ |
| 2445 | INIT_LIST_HEAD(&tsi148_bridge->dma_resources); | ||
| 2446 | for (i = 0; i < TSI148_MAX_DMA; i++) { | 2440 | for (i = 0; i < TSI148_MAX_DMA; i++) { |
| 2447 | dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource), | 2441 | dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource), |
| 2448 | GFP_KERNEL); | 2442 | GFP_KERNEL); |
| @@ -2467,7 +2461,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2467 | } | 2461 | } |
| 2468 | 2462 | ||
| 2469 | /* Add location monitor to list */ | 2463 | /* Add location monitor to list */ |
| 2470 | INIT_LIST_HEAD(&tsi148_bridge->lm_resources); | ||
| 2471 | lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL); | 2464 | lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL); |
| 2472 | if (lm == NULL) { | 2465 | if (lm == NULL) { |
| 2473 | dev_err(&pdev->dev, "Failed to allocate memory for " | 2466 | dev_err(&pdev->dev, "Failed to allocate memory for " |
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c index 72924b0632b7..37ac0a58e59a 100644 --- a/drivers/vme/vme.c +++ b/drivers/vme/vme.c | |||
| @@ -782,7 +782,7 @@ struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource) | |||
| 782 | 782 | ||
| 783 | dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL); | 783 | dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL); |
| 784 | if (dma_list == NULL) { | 784 | if (dma_list == NULL) { |
| 785 | printk(KERN_ERR "Unable to allocate memory for new dma list\n"); | 785 | printk(KERN_ERR "Unable to allocate memory for new DMA list\n"); |
| 786 | return NULL; | 786 | return NULL; |
| 787 | } | 787 | } |
| 788 | INIT_LIST_HEAD(&dma_list->entries); | 788 | INIT_LIST_HEAD(&dma_list->entries); |
| @@ -846,7 +846,7 @@ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address) | |||
| 846 | 846 | ||
| 847 | pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL); | 847 | pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL); |
| 848 | if (pci_attr == NULL) { | 848 | if (pci_attr == NULL) { |
| 849 | printk(KERN_ERR "Unable to allocate memory for pci attributes\n"); | 849 | printk(KERN_ERR "Unable to allocate memory for PCI attributes\n"); |
| 850 | goto err_pci; | 850 | goto err_pci; |
| 851 | } | 851 | } |
| 852 | 852 | ||
| @@ -884,7 +884,7 @@ struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address, | |||
| 884 | 884 | ||
| 885 | vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL); | 885 | vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL); |
| 886 | if (vme_attr == NULL) { | 886 | if (vme_attr == NULL) { |
| 887 | printk(KERN_ERR "Unable to allocate memory for vme attributes\n"); | 887 | printk(KERN_ERR "Unable to allocate memory for VME attributes\n"); |
| 888 | goto err_vme; | 888 | goto err_vme; |
| 889 | } | 889 | } |
| 890 | 890 | ||
| @@ -975,8 +975,8 @@ int vme_dma_list_free(struct vme_dma_list *list) | |||
| 975 | } | 975 | } |
| 976 | 976 | ||
| 977 | /* | 977 | /* |
| 978 | * Empty out all of the entries from the dma list. We need to go to the | 978 | * Empty out all of the entries from the DMA list. We need to go to the |
| 979 | * low level driver as dma entries are driver specific. | 979 | * low level driver as DMA entries are driver specific. |
| 980 | */ | 980 | */ |
| 981 | retval = bridge->dma_list_empty(list); | 981 | retval = bridge->dma_list_empty(list); |
| 982 | if (retval) { | 982 | if (retval) { |
| @@ -1091,7 +1091,7 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid) | |||
| 1091 | if (call != NULL) | 1091 | if (call != NULL) |
| 1092 | call(level, statid, priv_data); | 1092 | call(level, statid, priv_data); |
| 1093 | else | 1093 | else |
| 1094 | printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n", | 1094 | printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n", |
| 1095 | level, statid); | 1095 | level, statid); |
| 1096 | } | 1096 | } |
| 1097 | EXPORT_SYMBOL(vme_irq_handler); | 1097 | EXPORT_SYMBOL(vme_irq_handler); |
| @@ -1429,6 +1429,20 @@ static void vme_dev_release(struct device *dev) | |||
| 1429 | kfree(dev_to_vme_dev(dev)); | 1429 | kfree(dev_to_vme_dev(dev)); |
| 1430 | } | 1430 | } |
| 1431 | 1431 | ||
| 1432 | /* Common bridge initialization */ | ||
| 1433 | struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge) | ||
| 1434 | { | ||
| 1435 | INIT_LIST_HEAD(&bridge->vme_error_handlers); | ||
| 1436 | INIT_LIST_HEAD(&bridge->master_resources); | ||
| 1437 | INIT_LIST_HEAD(&bridge->slave_resources); | ||
| 1438 | INIT_LIST_HEAD(&bridge->dma_resources); | ||
| 1439 | INIT_LIST_HEAD(&bridge->lm_resources); | ||
| 1440 | mutex_init(&bridge->irq_mtx); | ||
| 1441 | |||
| 1442 | return bridge; | ||
| 1443 | } | ||
| 1444 | EXPORT_SYMBOL(vme_init_bridge); | ||
| 1445 | |||
| 1432 | int vme_register_bridge(struct vme_bridge *bridge) | 1446 | int vme_register_bridge(struct vme_bridge *bridge) |
| 1433 | { | 1447 | { |
| 1434 | int i; | 1448 | int i; |
diff --git a/drivers/vme/vme_bridge.h b/drivers/vme/vme_bridge.h index b59cbee231dd..cb8246fd97be 100644 --- a/drivers/vme/vme_bridge.h +++ b/drivers/vme/vme_bridge.h | |||
| @@ -177,6 +177,7 @@ void vme_bus_error_handler(struct vme_bridge *bridge, | |||
| 177 | unsigned long long address, int am); | 177 | unsigned long long address, int am); |
| 178 | void vme_irq_handler(struct vme_bridge *, int, int); | 178 | void vme_irq_handler(struct vme_bridge *, int, int); |
| 179 | 179 | ||
| 180 | struct vme_bridge *vme_init_bridge(struct vme_bridge *); | ||
| 180 | int vme_register_bridge(struct vme_bridge *); | 181 | int vme_register_bridge(struct vme_bridge *); |
| 181 | void vme_unregister_bridge(struct vme_bridge *); | 182 | void vme_unregister_bridge(struct vme_bridge *); |
| 182 | struct vme_error_handler *vme_register_error_handler( | 183 | struct vme_error_handler *vme_register_error_handler( |
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c index b05e8fefbabd..2e30db1b1a43 100644 --- a/drivers/w1/masters/ds2482.c +++ b/drivers/w1/masters/ds2482.c | |||
| @@ -24,6 +24,19 @@ | |||
| 24 | #include "../w1_int.h" | 24 | #include "../w1_int.h" |
| 25 | 25 | ||
| 26 | /** | 26 | /** |
| 27 | * Allow the active pullup to be disabled, default is enabled. | ||
| 28 | * | ||
| 29 | * Note from the DS2482 datasheet: | ||
| 30 | * The APU bit controls whether an active pullup (controlled slew-rate | ||
| 31 | * transistor) or a passive pullup (Rwpu resistor) will be used to drive | ||
| 32 | * a 1-Wire line from low to high. When APU = 0, active pullup is disabled | ||
| 33 | * (resistor mode). Active Pullup should always be selected unless there is | ||
| 34 | * only a single slave on the 1-Wire line. | ||
| 35 | */ | ||
| 36 | static int ds2482_active_pullup = 1; | ||
| 37 | module_param_named(active_pullup, ds2482_active_pullup, int, 0644); | ||
| 38 | |||
| 39 | /** | ||
| 27 | * The DS2482 registers - there are 3 registers that are addressed by a read | 40 | * The DS2482 registers - there are 3 registers that are addressed by a read |
| 28 | * pointer. The read pointer is set by the last command executed. | 41 | * pointer. The read pointer is set by the last command executed. |
| 29 | * | 42 | * |
| @@ -138,6 +151,9 @@ struct ds2482_data { | |||
| 138 | */ | 151 | */ |
| 139 | static inline u8 ds2482_calculate_config(u8 conf) | 152 | static inline u8 ds2482_calculate_config(u8 conf) |
| 140 | { | 153 | { |
| 154 | if (ds2482_active_pullup) | ||
| 155 | conf |= DS2482_REG_CFG_APU; | ||
| 156 | |||
| 141 | return conf | ((~conf & 0x0f) << 4); | 157 | return conf | ((~conf & 0x0f) << 4); |
| 142 | } | 158 | } |
| 143 | 159 | ||
| @@ -546,6 +562,8 @@ static int ds2482_remove(struct i2c_client *client) | |||
| 546 | 562 | ||
| 547 | module_i2c_driver(ds2482_driver); | 563 | module_i2c_driver(ds2482_driver); |
| 548 | 564 | ||
| 565 | MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \ | ||
| 566 | "0-disable, 1-enable (default)"); | ||
| 549 | MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>"); | 567 | MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>"); |
| 550 | MODULE_DESCRIPTION("DS2482 driver"); | 568 | MODULE_DESCRIPTION("DS2482 driver"); |
| 551 | MODULE_LICENSE("GPL"); | 569 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 2f029e8f4f95..581a300fd6cd 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c | |||
| @@ -92,10 +92,13 @@ static void w1_therm_remove_slave(struct w1_slave *sl) | |||
| 92 | static ssize_t w1_slave_show(struct device *device, | 92 | static ssize_t w1_slave_show(struct device *device, |
| 93 | struct device_attribute *attr, char *buf); | 93 | struct device_attribute *attr, char *buf); |
| 94 | 94 | ||
| 95 | static ssize_t w1_slave_store(struct device *device, | ||
| 96 | struct device_attribute *attr, const char *buf, size_t size); | ||
| 97 | |||
| 95 | static ssize_t w1_seq_show(struct device *device, | 98 | static ssize_t w1_seq_show(struct device *device, |
| 96 | struct device_attribute *attr, char *buf); | 99 | struct device_attribute *attr, char *buf); |
| 97 | 100 | ||
| 98 | static DEVICE_ATTR_RO(w1_slave); | 101 | static DEVICE_ATTR_RW(w1_slave); |
| 99 | static DEVICE_ATTR_RO(w1_seq); | 102 | static DEVICE_ATTR_RO(w1_seq); |
| 100 | 103 | ||
| 101 | static struct attribute *w1_therm_attrs[] = { | 104 | static struct attribute *w1_therm_attrs[] = { |
| @@ -154,8 +157,17 @@ struct w1_therm_family_converter | |||
| 154 | u16 reserved; | 157 | u16 reserved; |
| 155 | struct w1_family *f; | 158 | struct w1_family *f; |
| 156 | int (*convert)(u8 rom[9]); | 159 | int (*convert)(u8 rom[9]); |
| 160 | int (*precision)(struct device *device, int val); | ||
| 161 | int (*eeprom)(struct device *device); | ||
| 157 | }; | 162 | }; |
| 158 | 163 | ||
| 164 | /* write configuration to eeprom */ | ||
| 165 | static inline int w1_therm_eeprom(struct device *device); | ||
| 166 | |||
| 167 | /* Set precision for conversion */ | ||
| 168 | static inline int w1_DS18B20_precision(struct device *device, int val); | ||
| 169 | static inline int w1_DS18S20_precision(struct device *device, int val); | ||
| 170 | |||
| 159 | /* The return value is millidegrees Centigrade. */ | 171 | /* The return value is millidegrees Centigrade. */ |
| 160 | static inline int w1_DS18B20_convert_temp(u8 rom[9]); | 172 | static inline int w1_DS18B20_convert_temp(u8 rom[9]); |
| 161 | static inline int w1_DS18S20_convert_temp(u8 rom[9]); | 173 | static inline int w1_DS18S20_convert_temp(u8 rom[9]); |
| @@ -163,26 +175,194 @@ static inline int w1_DS18S20_convert_temp(u8 rom[9]); | |||
| 163 | static struct w1_therm_family_converter w1_therm_families[] = { | 175 | static struct w1_therm_family_converter w1_therm_families[] = { |
| 164 | { | 176 | { |
| 165 | .f = &w1_therm_family_DS18S20, | 177 | .f = &w1_therm_family_DS18S20, |
| 166 | .convert = w1_DS18S20_convert_temp | 178 | .convert = w1_DS18S20_convert_temp, |
| 179 | .precision = w1_DS18S20_precision, | ||
| 180 | .eeprom = w1_therm_eeprom | ||
| 167 | }, | 181 | }, |
| 168 | { | 182 | { |
| 169 | .f = &w1_therm_family_DS1822, | 183 | .f = &w1_therm_family_DS1822, |
| 170 | .convert = w1_DS18B20_convert_temp | 184 | .convert = w1_DS18B20_convert_temp, |
| 185 | .precision = w1_DS18S20_precision, | ||
| 186 | .eeprom = w1_therm_eeprom | ||
| 171 | }, | 187 | }, |
| 172 | { | 188 | { |
| 173 | .f = &w1_therm_family_DS18B20, | 189 | .f = &w1_therm_family_DS18B20, |
| 174 | .convert = w1_DS18B20_convert_temp | 190 | .convert = w1_DS18B20_convert_temp, |
| 191 | .precision = w1_DS18B20_precision, | ||
| 192 | .eeprom = w1_therm_eeprom | ||
| 175 | }, | 193 | }, |
| 176 | { | 194 | { |
| 177 | .f = &w1_therm_family_DS28EA00, | 195 | .f = &w1_therm_family_DS28EA00, |
| 178 | .convert = w1_DS18B20_convert_temp | 196 | .convert = w1_DS18B20_convert_temp, |
| 197 | .precision = w1_DS18S20_precision, | ||
| 198 | .eeprom = w1_therm_eeprom | ||
| 179 | }, | 199 | }, |
| 180 | { | 200 | { |
| 181 | .f = &w1_therm_family_DS1825, | 201 | .f = &w1_therm_family_DS1825, |
| 182 | .convert = w1_DS18B20_convert_temp | 202 | .convert = w1_DS18B20_convert_temp, |
| 203 | .precision = w1_DS18S20_precision, | ||
| 204 | .eeprom = w1_therm_eeprom | ||
| 183 | } | 205 | } |
| 184 | }; | 206 | }; |
| 185 | 207 | ||
| 208 | static inline int w1_therm_eeprom(struct device *device) | ||
| 209 | { | ||
| 210 | struct w1_slave *sl = dev_to_w1_slave(device); | ||
| 211 | struct w1_master *dev = sl->master; | ||
| 212 | u8 rom[9], external_power; | ||
| 213 | int ret, max_trying = 10; | ||
| 214 | u8 *family_data = sl->family_data; | ||
| 215 | |||
| 216 | ret = mutex_lock_interruptible(&dev->bus_mutex); | ||
| 217 | if (ret != 0) | ||
| 218 | goto post_unlock; | ||
| 219 | |||
| 220 | if (!sl->family_data) { | ||
| 221 | ret = -ENODEV; | ||
| 222 | goto pre_unlock; | ||
| 223 | } | ||
| 224 | |||
| 225 | /* prevent the slave from going away in sleep */ | ||
| 226 | atomic_inc(THERM_REFCNT(family_data)); | ||
| 227 | memset(rom, 0, sizeof(rom)); | ||
| 228 | |||
| 229 | while (max_trying--) { | ||
| 230 | if (!w1_reset_select_slave(sl)) { | ||
| 231 | unsigned int tm = 10; | ||
| 232 | unsigned long sleep_rem; | ||
| 233 | |||
| 234 | /* check if in parasite mode */ | ||
| 235 | w1_write_8(dev, W1_READ_PSUPPLY); | ||
| 236 | external_power = w1_read_8(dev); | ||
| 237 | |||
| 238 | if (w1_reset_select_slave(sl)) | ||
| 239 | continue; | ||
| 240 | |||
| 241 | /* 10ms strong pullup/delay after the copy command */ | ||
| 242 | if (w1_strong_pullup == 2 || | ||
| 243 | (!external_power && w1_strong_pullup)) | ||
| 244 | w1_next_pullup(dev, tm); | ||
| 245 | |||
| 246 | w1_write_8(dev, W1_COPY_SCRATCHPAD); | ||
| 247 | |||
| 248 | if (external_power) { | ||
| 249 | mutex_unlock(&dev->bus_mutex); | ||
| 250 | |||
| 251 | sleep_rem = msleep_interruptible(tm); | ||
| 252 | if (sleep_rem != 0) { | ||
| 253 | ret = -EINTR; | ||
| 254 | goto post_unlock; | ||
| 255 | } | ||
| 256 | |||
| 257 | ret = mutex_lock_interruptible(&dev->bus_mutex); | ||
| 258 | if (ret != 0) | ||
| 259 | goto post_unlock; | ||
| 260 | } else if (!w1_strong_pullup) { | ||
| 261 | sleep_rem = msleep_interruptible(tm); | ||
| 262 | if (sleep_rem != 0) { | ||
| 263 | ret = -EINTR; | ||
| 264 | goto pre_unlock; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | |||
| 268 | break; | ||
| 269 | } | ||
| 270 | } | ||
| 271 | |||
| 272 | pre_unlock: | ||
| 273 | mutex_unlock(&dev->bus_mutex); | ||
| 274 | |||
| 275 | post_unlock: | ||
| 276 | atomic_dec(THERM_REFCNT(family_data)); | ||
| 277 | return ret; | ||
| 278 | } | ||
| 279 | |||
| 280 | /* DS18S20 does not feature configuration register */ | ||
| 281 | static inline int w1_DS18S20_precision(struct device *device, int val) | ||
| 282 | { | ||
| 283 | return 0; | ||
| 284 | } | ||
| 285 | |||
| 286 | static inline int w1_DS18B20_precision(struct device *device, int val) | ||
| 287 | { | ||
| 288 | struct w1_slave *sl = dev_to_w1_slave(device); | ||
| 289 | struct w1_master *dev = sl->master; | ||
| 290 | u8 rom[9], crc; | ||
| 291 | int ret, max_trying = 10; | ||
| 292 | u8 *family_data = sl->family_data; | ||
| 293 | uint8_t precision_bits; | ||
| 294 | uint8_t mask = 0x60; | ||
| 295 | |||
| 296 | if(val > 12 || val < 9) { | ||
| 297 | pr_warn("Unsupported precision\n"); | ||
| 298 | return -1; | ||
| 299 | } | ||
| 300 | |||
| 301 | ret = mutex_lock_interruptible(&dev->bus_mutex); | ||
| 302 | if (ret != 0) | ||
| 303 | goto post_unlock; | ||
| 304 | |||
| 305 | if (!sl->family_data) { | ||
| 306 | ret = -ENODEV; | ||
| 307 | goto pre_unlock; | ||
| 308 | } | ||
| 309 | |||
| 310 | /* prevent the slave from going away in sleep */ | ||
| 311 | atomic_inc(THERM_REFCNT(family_data)); | ||
| 312 | memset(rom, 0, sizeof(rom)); | ||
| 313 | |||
| 314 | /* translate precision to bitmask (see datasheet page 9) */ | ||
| 315 | switch (val) { | ||
| 316 | case 9: | ||
| 317 | precision_bits = 0x00; | ||
| 318 | break; | ||
| 319 | case 10: | ||
| 320 | precision_bits = 0x20; | ||
| 321 | break; | ||
| 322 | case 11: | ||
| 323 | precision_bits = 0x40; | ||
| 324 | break; | ||
| 325 | case 12: | ||
| 326 | default: | ||
| 327 | precision_bits = 0x60; | ||
| 328 | break; | ||
| 329 | } | ||
| 330 | |||
| 331 | while (max_trying--) { | ||
| 332 | crc = 0; | ||
| 333 | |||
| 334 | if (!w1_reset_select_slave(sl)) { | ||
| 335 | int count = 0; | ||
| 336 | |||
| 337 | /* read values to only alter precision bits */ | ||
| 338 | w1_write_8(dev, W1_READ_SCRATCHPAD); | ||
| 339 | if ((count = w1_read_block(dev, rom, 9)) != 9) | ||
| 340 | dev_warn(device, "w1_read_block() returned %u instead of 9.\n", count); | ||
| 341 | |||
| 342 | crc = w1_calc_crc8(rom, 8); | ||
| 343 | if (rom[8] == crc) { | ||
| 344 | rom[4] = (rom[4] & ~mask) | (precision_bits & mask); | ||
| 345 | |||
| 346 | if (!w1_reset_select_slave(sl)) { | ||
| 347 | w1_write_8(dev, W1_WRITE_SCRATCHPAD); | ||
| 348 | w1_write_8(dev, rom[2]); | ||
| 349 | w1_write_8(dev, rom[3]); | ||
| 350 | w1_write_8(dev, rom[4]); | ||
| 351 | |||
| 352 | break; | ||
| 353 | } | ||
| 354 | } | ||
| 355 | } | ||
| 356 | } | ||
| 357 | |||
| 358 | pre_unlock: | ||
| 359 | mutex_unlock(&dev->bus_mutex); | ||
| 360 | |||
| 361 | post_unlock: | ||
| 362 | atomic_dec(THERM_REFCNT(family_data)); | ||
| 363 | return ret; | ||
| 364 | } | ||
| 365 | |||
| 186 | static inline int w1_DS18B20_convert_temp(u8 rom[9]) | 366 | static inline int w1_DS18B20_convert_temp(u8 rom[9]) |
| 187 | { | 367 | { |
| 188 | s16 t = le16_to_cpup((__le16 *)rom); | 368 | s16 t = le16_to_cpup((__le16 *)rom); |
| @@ -220,6 +400,30 @@ static inline int w1_convert_temp(u8 rom[9], u8 fid) | |||
| 220 | return 0; | 400 | return 0; |
| 221 | } | 401 | } |
| 222 | 402 | ||
| 403 | static ssize_t w1_slave_store(struct device *device, | ||
| 404 | struct device_attribute *attr, const char *buf, | ||
| 405 | size_t size) | ||
| 406 | { | ||
| 407 | int val, ret; | ||
| 408 | struct w1_slave *sl = dev_to_w1_slave(device); | ||
| 409 | int i; | ||
| 410 | |||
| 411 | ret = kstrtoint(buf, 0, &val); | ||
| 412 | if (ret) | ||
| 413 | return ret; | ||
| 414 | |||
| 415 | for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) { | ||
| 416 | if (w1_therm_families[i].f->fid == sl->family->fid) { | ||
| 417 | /* zero value indicates to write current configuration to eeprom */ | ||
| 418 | if (0 == val) | ||
| 419 | ret = w1_therm_families[i].eeprom(device); | ||
| 420 | else | ||
| 421 | ret = w1_therm_families[i].precision(device, val); | ||
| 422 | break; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | return ret ? : size; | ||
| 426 | } | ||
| 223 | 427 | ||
| 224 | static ssize_t w1_slave_show(struct device *device, | 428 | static ssize_t w1_slave_show(struct device *device, |
| 225 | struct device_attribute *attr, char *buf) | 429 | struct device_attribute *attr, char *buf) |
| @@ -311,7 +515,7 @@ static ssize_t w1_slave_show(struct device *device, | |||
| 311 | for (i = 0; i < 9; ++i) | 515 | for (i = 0; i < 9; ++i) |
| 312 | c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", rom[i]); | 516 | c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", rom[i]); |
| 313 | c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n", | 517 | c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n", |
| 314 | crc, (verdict) ? "YES" : "NO"); | 518 | crc, (verdict) ? "YES" : "NO"); |
| 315 | if (verdict) | 519 | if (verdict) |
| 316 | memcpy(family_data, rom, sizeof(rom)); | 520 | memcpy(family_data, rom, sizeof(rom)); |
| 317 | else | 521 | else |
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 89a784751738..bb34362e930a 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c | |||
| @@ -335,7 +335,7 @@ static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev, | |||
| 335 | int tmp; | 335 | int tmp; |
| 336 | struct w1_master *md = dev_to_w1_master(dev); | 336 | struct w1_master *md = dev_to_w1_master(dev); |
| 337 | 337 | ||
| 338 | if (kstrtoint(buf, 0, &tmp) == -EINVAL || tmp < 1) | 338 | if (kstrtoint(buf, 0, &tmp) || tmp < 1) |
| 339 | return -EINVAL; | 339 | return -EINVAL; |
| 340 | 340 | ||
| 341 | mutex_lock(&md->mutex); | 341 | mutex_lock(&md->mutex); |
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index 56a49ba41d83..129895f562b0 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h | |||
| @@ -58,6 +58,8 @@ struct w1_reg_num | |||
| 58 | #define W1_ALARM_SEARCH 0xEC | 58 | #define W1_ALARM_SEARCH 0xEC |
| 59 | #define W1_CONVERT_TEMP 0x44 | 59 | #define W1_CONVERT_TEMP 0x44 |
| 60 | #define W1_SKIP_ROM 0xCC | 60 | #define W1_SKIP_ROM 0xCC |
| 61 | #define W1_COPY_SCRATCHPAD 0x48 | ||
| 62 | #define W1_WRITE_SCRATCHPAD 0x4E | ||
| 61 | #define W1_READ_SCRATCHPAD 0xBE | 63 | #define W1_READ_SCRATCHPAD 0xBE |
| 62 | #define W1_READ_ROM 0x33 | 64 | #define W1_READ_ROM 0x33 |
| 63 | #define W1_READ_PSUPPLY 0xB4 | 65 | #define W1_READ_PSUPPLY 0xB4 |
diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h new file mode 100644 index 000000000000..a978bb85599a --- /dev/null +++ b/include/linux/coresight-stm.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef __LINUX_CORESIGHT_STM_H_ | ||
| 2 | #define __LINUX_CORESIGHT_STM_H_ | ||
| 3 | |||
| 4 | #include <uapi/linux/coresight-stm.h> | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index aa0fadce9308..b10954a66939 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -126,6 +126,8 @@ struct hv_ring_buffer_info { | |||
| 126 | 126 | ||
| 127 | u32 ring_datasize; /* < ring_size */ | 127 | u32 ring_datasize; /* < ring_size */ |
| 128 | u32 ring_data_startoffset; | 128 | u32 ring_data_startoffset; |
| 129 | u32 priv_write_index; | ||
| 130 | u32 priv_read_index; | ||
| 129 | }; | 131 | }; |
| 130 | 132 | ||
| 131 | /* | 133 | /* |
| @@ -151,6 +153,33 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, | |||
| 151 | *read = dsize - *write; | 153 | *read = dsize - *write; |
| 152 | } | 154 | } |
| 153 | 155 | ||
| 156 | static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi) | ||
| 157 | { | ||
| 158 | u32 read_loc, write_loc, dsize, read; | ||
| 159 | |||
| 160 | dsize = rbi->ring_datasize; | ||
| 161 | read_loc = rbi->ring_buffer->read_index; | ||
| 162 | write_loc = READ_ONCE(rbi->ring_buffer->write_index); | ||
| 163 | |||
| 164 | read = write_loc >= read_loc ? (write_loc - read_loc) : | ||
| 165 | (dsize - read_loc) + write_loc; | ||
| 166 | |||
| 167 | return read; | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi) | ||
| 171 | { | ||
| 172 | u32 read_loc, write_loc, dsize, write; | ||
| 173 | |||
| 174 | dsize = rbi->ring_datasize; | ||
| 175 | read_loc = READ_ONCE(rbi->ring_buffer->read_index); | ||
| 176 | write_loc = rbi->ring_buffer->write_index; | ||
| 177 | |||
| 178 | write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : | ||
| 179 | read_loc - write_loc; | ||
| 180 | return write; | ||
| 181 | } | ||
| 182 | |||
| 154 | /* | 183 | /* |
| 155 | * VMBUS version is 32 bit entity broken up into | 184 | * VMBUS version is 32 bit entity broken up into |
| 156 | * two 16 bit quantities: major_number. minor_number. | 185 | * two 16 bit quantities: major_number. minor_number. |
| @@ -1091,7 +1120,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, | |||
| 1091 | resource_size_t min, resource_size_t max, | 1120 | resource_size_t min, resource_size_t max, |
| 1092 | resource_size_t size, resource_size_t align, | 1121 | resource_size_t size, resource_size_t align, |
| 1093 | bool fb_overlap_ok); | 1122 | bool fb_overlap_ok); |
| 1094 | 1123 | void vmbus_free_mmio(resource_size_t start, resource_size_t size); | |
| 1095 | int vmbus_cpu_number_to_vp_number(int cpu_number); | 1124 | int vmbus_cpu_number_to_vp_number(int cpu_number); |
| 1096 | u64 hv_do_hypercall(u64 control, void *input, void *output); | 1125 | u64 hv_do_hypercall(u64 control, void *input, void *output); |
| 1097 | 1126 | ||
| @@ -1338,4 +1367,143 @@ extern __u32 vmbus_proto_version; | |||
| 1338 | 1367 | ||
| 1339 | int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, | 1368 | int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, |
| 1340 | const uuid_le *shv_host_servie_id); | 1369 | const uuid_le *shv_host_servie_id); |
| 1370 | void vmbus_set_event(struct vmbus_channel *channel); | ||
| 1371 | |||
| 1372 | /* Get the start of the ring buffer. */ | ||
| 1373 | static inline void * | ||
| 1374 | hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) | ||
| 1375 | { | ||
| 1376 | return (void *)ring_info->ring_buffer->buffer; | ||
| 1377 | } | ||
| 1378 | |||
| 1379 | /* | ||
| 1380 | * To optimize the flow management on the send-side, | ||
| 1381 | * when the sender is blocked because of lack of | ||
| 1382 | * sufficient space in the ring buffer, potential the | ||
| 1383 | * consumer of the ring buffer can signal the producer. | ||
| 1384 | * This is controlled by the following parameters: | ||
| 1385 | * | ||
| 1386 | * 1. pending_send_sz: This is the size in bytes that the | ||
| 1387 | * producer is trying to send. | ||
| 1388 | * 2. The feature bit feat_pending_send_sz set to indicate if | ||
| 1389 | * the consumer of the ring will signal when the ring | ||
| 1390 | * state transitions from being full to a state where | ||
| 1391 | * there is room for the producer to send the pending packet. | ||
| 1392 | */ | ||
| 1393 | |||
| 1394 | static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi) | ||
| 1395 | { | ||
| 1396 | u32 cur_write_sz; | ||
| 1397 | u32 pending_sz; | ||
| 1398 | |||
| 1399 | /* | ||
| 1400 | * Issue a full memory barrier before making the signaling decision. | ||
| 1401 | * Here is the reason for having this barrier: | ||
| 1402 | * If the reading of the pend_sz (in this function) | ||
| 1403 | * were to be reordered and read before we commit the new read | ||
| 1404 | * index (in the calling function) we could | ||
| 1405 | * have a problem. If the host were to set the pending_sz after we | ||
| 1406 | * have sampled pending_sz and go to sleep before we commit the | ||
| 1407 | * read index, we could miss sending the interrupt. Issue a full | ||
| 1408 | * memory barrier to address this. | ||
| 1409 | */ | ||
| 1410 | virt_mb(); | ||
| 1411 | |||
| 1412 | pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); | ||
| 1413 | /* If the other end is not blocked on write don't bother. */ | ||
| 1414 | if (pending_sz == 0) | ||
| 1415 | return false; | ||
| 1416 | |||
| 1417 | cur_write_sz = hv_get_bytes_to_write(rbi); | ||
| 1418 | |||
| 1419 | if (cur_write_sz >= pending_sz) | ||
| 1420 | return true; | ||
| 1421 | |||
| 1422 | return false; | ||
| 1423 | } | ||
| 1424 | |||
| 1425 | /* | ||
| 1426 | * An API to support in-place processing of incoming VMBUS packets. | ||
| 1427 | */ | ||
| 1428 | #define VMBUS_PKT_TRAILER 8 | ||
| 1429 | |||
| 1430 | static inline struct vmpacket_descriptor * | ||
| 1431 | get_next_pkt_raw(struct vmbus_channel *channel) | ||
| 1432 | { | ||
| 1433 | struct hv_ring_buffer_info *ring_info = &channel->inbound; | ||
| 1434 | u32 read_loc = ring_info->priv_read_index; | ||
| 1435 | void *ring_buffer = hv_get_ring_buffer(ring_info); | ||
| 1436 | struct vmpacket_descriptor *cur_desc; | ||
| 1437 | u32 packetlen; | ||
| 1438 | u32 dsize = ring_info->ring_datasize; | ||
| 1439 | u32 delta = read_loc - ring_info->ring_buffer->read_index; | ||
| 1440 | u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta); | ||
| 1441 | |||
| 1442 | if (bytes_avail_toread < sizeof(struct vmpacket_descriptor)) | ||
| 1443 | return NULL; | ||
| 1444 | |||
| 1445 | if ((read_loc + sizeof(*cur_desc)) > dsize) | ||
| 1446 | return NULL; | ||
| 1447 | |||
| 1448 | cur_desc = ring_buffer + read_loc; | ||
| 1449 | packetlen = cur_desc->len8 << 3; | ||
| 1450 | |||
| 1451 | /* | ||
| 1452 | * If the packet under consideration is wrapping around, | ||
| 1453 | * return failure. | ||
| 1454 | */ | ||
| 1455 | if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1)) | ||
| 1456 | return NULL; | ||
| 1457 | |||
| 1458 | return cur_desc; | ||
| 1459 | } | ||
| 1460 | |||
| 1461 | /* | ||
| 1462 | * A helper function to step through packets "in-place" | ||
| 1463 | * This API is to be called after each successful call | ||
| 1464 | * get_next_pkt_raw(). | ||
| 1465 | */ | ||
| 1466 | static inline void put_pkt_raw(struct vmbus_channel *channel, | ||
| 1467 | struct vmpacket_descriptor *desc) | ||
| 1468 | { | ||
| 1469 | struct hv_ring_buffer_info *ring_info = &channel->inbound; | ||
| 1470 | u32 read_loc = ring_info->priv_read_index; | ||
| 1471 | u32 packetlen = desc->len8 << 3; | ||
| 1472 | u32 dsize = ring_info->ring_datasize; | ||
| 1473 | |||
| 1474 | if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize) | ||
| 1475 | BUG(); | ||
| 1476 | /* | ||
| 1477 | * Include the packet trailer. | ||
| 1478 | */ | ||
| 1479 | ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER; | ||
| 1480 | } | ||
| 1481 | |||
| 1482 | /* | ||
| 1483 | * This call commits the read index and potentially signals the host. | ||
| 1484 | * Here is the pattern for using the "in-place" consumption APIs: | ||
| 1485 | * | ||
| 1486 | * while (get_next_pkt_raw() { | ||
| 1487 | * process the packet "in-place"; | ||
| 1488 | * put_pkt_raw(); | ||
| 1489 | * } | ||
| 1490 | * if (packets processed in place) | ||
| 1491 | * commit_rd_index(); | ||
| 1492 | */ | ||
| 1493 | static inline void commit_rd_index(struct vmbus_channel *channel) | ||
| 1494 | { | ||
| 1495 | struct hv_ring_buffer_info *ring_info = &channel->inbound; | ||
| 1496 | /* | ||
| 1497 | * Make sure all reads are done before we update the read index since | ||
| 1498 | * the writer may start writing to the read area once the read index | ||
| 1499 | * is updated. | ||
| 1500 | */ | ||
| 1501 | virt_rmb(); | ||
| 1502 | ring_info->ring_buffer->read_index = ring_info->priv_read_index; | ||
| 1503 | |||
| 1504 | if (hv_need_to_signal_on_read(ring_info)) | ||
| 1505 | vmbus_set_event(channel); | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | |||
| 1341 | #endif /* _HYPERV_H */ | 1509 | #endif /* _HYPERV_H */ |
diff --git a/include/linux/mcb.h b/include/linux/mcb.h index ed06e15a36aa..ead13d233a97 100644 --- a/include/linux/mcb.h +++ b/include/linux/mcb.h | |||
| @@ -15,22 +15,30 @@ | |||
| 15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
| 16 | #include <linux/irqreturn.h> | 16 | #include <linux/irqreturn.h> |
| 17 | 17 | ||
| 18 | #define CHAMELEON_FILENAME_LEN 12 | ||
| 19 | |||
| 18 | struct mcb_driver; | 20 | struct mcb_driver; |
| 19 | struct mcb_device; | 21 | struct mcb_device; |
| 20 | 22 | ||
| 21 | /** | 23 | /** |
| 22 | * struct mcb_bus - MEN Chameleon Bus | 24 | * struct mcb_bus - MEN Chameleon Bus |
| 23 | * | 25 | * |
| 24 | * @dev: pointer to carrier device | 26 | * @dev: bus device |
| 25 | * @children: the child busses | 27 | * @carrier: pointer to carrier device |
| 26 | * @bus_nr: mcb bus number | 28 | * @bus_nr: mcb bus number |
| 27 | * @get_irq: callback to get IRQ number | 29 | * @get_irq: callback to get IRQ number |
| 30 | * @revision: the FPGA's revision number | ||
| 31 | * @model: the FPGA's model number | ||
| 32 | * @filename: the FPGA's name | ||
| 28 | */ | 33 | */ |
| 29 | struct mcb_bus { | 34 | struct mcb_bus { |
| 30 | struct list_head children; | ||
| 31 | struct device dev; | 35 | struct device dev; |
| 32 | struct device *carrier; | 36 | struct device *carrier; |
| 33 | int bus_nr; | 37 | int bus_nr; |
| 38 | u8 revision; | ||
| 39 | char model; | ||
| 40 | u8 minor; | ||
| 41 | char name[CHAMELEON_FILENAME_LEN + 1]; | ||
| 34 | int (*get_irq)(struct mcb_device *dev); | 42 | int (*get_irq)(struct mcb_device *dev); |
| 35 | }; | 43 | }; |
| 36 | #define to_mcb_bus(b) container_of((b), struct mcb_bus, dev) | 44 | #define to_mcb_bus(b) container_of((b), struct mcb_bus, dev) |
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index a4fcc90b0f20..cd93416d762e 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h | |||
| @@ -14,6 +14,10 @@ | |||
| 14 | 14 | ||
| 15 | struct nvmem_device; | 15 | struct nvmem_device; |
| 16 | struct nvmem_cell_info; | 16 | struct nvmem_cell_info; |
| 17 | typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, | ||
| 18 | void *val, size_t bytes); | ||
| 19 | typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, | ||
| 20 | void *val, size_t bytes); | ||
| 17 | 21 | ||
| 18 | struct nvmem_config { | 22 | struct nvmem_config { |
| 19 | struct device *dev; | 23 | struct device *dev; |
| @@ -24,6 +28,12 @@ struct nvmem_config { | |||
| 24 | int ncells; | 28 | int ncells; |
| 25 | bool read_only; | 29 | bool read_only; |
| 26 | bool root_only; | 30 | bool root_only; |
| 31 | nvmem_reg_read_t reg_read; | ||
| 32 | nvmem_reg_write_t reg_write; | ||
| 33 | int size; | ||
| 34 | int word_size; | ||
| 35 | int stride; | ||
| 36 | void *priv; | ||
| 27 | /* To be only used by old driver/misc/eeprom drivers */ | 37 | /* To be only used by old driver/misc/eeprom drivers */ |
| 28 | bool compat; | 38 | bool compat; |
| 29 | struct device *base_dev; | 39 | struct device *base_dev; |
diff --git a/include/linux/stm.h b/include/linux/stm.h index 1a79ed8e43da..8369d8a8cabd 100644 --- a/include/linux/stm.h +++ b/include/linux/stm.h | |||
| @@ -50,6 +50,8 @@ struct stm_device; | |||
| 50 | * @sw_end: last STP master available to software | 50 | * @sw_end: last STP master available to software |
| 51 | * @sw_nchannels: number of STP channels per master | 51 | * @sw_nchannels: number of STP channels per master |
| 52 | * @sw_mmiosz: size of one channel's IO space, for mmap, optional | 52 | * @sw_mmiosz: size of one channel's IO space, for mmap, optional |
| 53 | * @hw_override: masters in the STP stream will not match the ones | ||
| 54 | * assigned by software, but are up to the STM hardware | ||
| 53 | * @packet: callback that sends an STP packet | 55 | * @packet: callback that sends an STP packet |
| 54 | * @mmio_addr: mmap callback, optional | 56 | * @mmio_addr: mmap callback, optional |
| 55 | * @link: called when a new stm_source gets linked to us, optional | 57 | * @link: called when a new stm_source gets linked to us, optional |
| @@ -85,6 +87,7 @@ struct stm_data { | |||
| 85 | unsigned int sw_end; | 87 | unsigned int sw_end; |
| 86 | unsigned int sw_nchannels; | 88 | unsigned int sw_nchannels; |
| 87 | unsigned int sw_mmiosz; | 89 | unsigned int sw_mmiosz; |
| 90 | unsigned int hw_override; | ||
| 88 | ssize_t (*packet)(struct stm_data *, unsigned int, | 91 | ssize_t (*packet)(struct stm_data *, unsigned int, |
| 89 | unsigned int, unsigned int, | 92 | unsigned int, unsigned int, |
| 90 | unsigned int, unsigned int, | 93 | unsigned int, unsigned int, |
diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h new file mode 100644 index 000000000000..7e4272cf1fb2 --- /dev/null +++ b/include/uapi/linux/coresight-stm.h | |||
| @@ -0,0 +1,21 @@ | |||
| 1 | #ifndef __UAPI_CORESIGHT_STM_H_ | ||
| 2 | #define __UAPI_CORESIGHT_STM_H_ | ||
| 3 | |||
| 4 | #define STM_FLAG_TIMESTAMPED BIT(3) | ||
| 5 | #define STM_FLAG_GUARANTEED BIT(7) | ||
| 6 | |||
| 7 | /* | ||
| 8 | * The CoreSight STM supports guaranteed and invariant timing | ||
| 9 | * transactions. Guaranteed transactions are guaranteed to be | ||
| 10 | * traced, this might involve stalling the bus or system to | ||
| 11 | * ensure the transaction is accepted by the STM. While invariant | ||
| 12 | * timing transactions are not guaranteed to be traced, they | ||
| 13 | * will take an invariant amount of time regardless of the | ||
| 14 | * state of the STM. | ||
| 15 | */ | ||
| 16 | enum { | ||
| 17 | STM_OPTION_GUARANTEED = 0, | ||
| 18 | STM_OPTION_INVARIANT, | ||
| 19 | }; | ||
| 20 | |||
| 21 | #endif | ||
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index d8f6c094cce5..df643f60bb41 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py | |||
| @@ -89,7 +89,7 @@ def parse_options(): | |||
| 89 | 89 | ||
| 90 | if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff): | 90 | if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff): |
| 91 | sys.exit("Please specify valid input in the following format: " | 91 | sys.exit("Please specify valid input in the following format: " |
| 92 | "\'commmit1..commit2\'") | 92 | "\'commit1..commit2\'") |
| 93 | 93 | ||
| 94 | if opts.commit or opts.diff: | 94 | if opts.commit or opts.diff: |
| 95 | if not opts.force and tree_is_dirty(): | 95 | if not opts.force and tree_is_dirty(): |
diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus index 162a3784d80e..e8fecd61871f 100644 --- a/tools/hv/lsvmbus +++ b/tools/hv/lsvmbus | |||
| @@ -35,6 +35,7 @@ vmbus_dev_dict = { | |||
| 35 | '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller', | 35 | '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller', |
| 36 | '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter', | 36 | '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter', |
| 37 | '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter', | 37 | '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter', |
| 38 | '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through', | ||
| 38 | '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]', | 39 | '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]', |
| 39 | '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]', | 40 | '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]', |
| 40 | '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]', | 41 | '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]', |
