diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-11 20:34:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-11 20:34:00 -0400 |
commit | fd9879b9bb3258ebc27a4cc6d2d29f528f71901f (patch) | |
tree | 48b68994f5e8083aafe116533e8143cb2bf30c85 | |
parent | 81ae31d78239318610d7c2acb3e2610d622a5aa4 (diff) | |
parent | d53ba6b3bba33432cc37b7101a86f8f3392c46e7 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman:
"Here's a first pull request for powerpc updates for 3.18.
The bulk of the additions are for the "cxl" driver, for IBM's Coherent
Accelerator Processor Interface (CAPI). Most of it's in drivers/misc,
which Greg & Arnd maintain, Greg said he was happy for us to take it
through our tree.
There's the usual minor cleanups and fixes, including a bit of noise
in drivers from some of those. A bunch of updates to our EEH code,
which has been getting more testing. Several nice speedups from
Anton, including 20% in clear_page().
And a bunch of updates for freescale from Scott"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (130 commits)
cxl: Fix afu_read() not doing finish_wait() on signal or non-blocking
cxl: Add documentation for userspace APIs
cxl: Add driver to Kbuild and Makefiles
cxl: Add userspace header file
cxl: Driver code for powernv PCIe based cards for userspace access
cxl: Add base builtin support
powerpc/mm: Add hooks for cxl
powerpc/opal: Add PHB to cxl mode call
powerpc/mm: Add new hash_page_mm()
powerpc/powerpc: Add new PCIe functions for allocating cxl interrupts
cxl: Add new header for call backs and structs
powerpc/powernv: Split out set MSI IRQ chip code
powerpc/mm: Export mmu_kernel_ssize and mmu_linear_psize
powerpc/msi: Improve IRQ bitmap allocator
powerpc/cell: Make spu_flush_all_slbs() generic
powerpc/cell: Move data segment faulting code out of cell platform
powerpc/cell: Move spu_handle_mm_fault() out of cell platform
powerpc/pseries: Use new defines when calling H_SET_MODE
powerpc: Update contact info in Documentation files
powerpc/perf/hv-24x7: Simplify catalog_read()
...
236 files changed, 8633 insertions, 1566 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 index e78ee798d7bd..32f3f5f8bba2 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 | |||
@@ -1,6 +1,6 @@ | |||
1 | What: /sys/bus/event_source/devices/hv_24x7/interface/catalog | 1 | What: /sys/bus/event_source/devices/hv_24x7/interface/catalog |
2 | Date: February 2014 | 2 | Date: February 2014 |
3 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 3 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
4 | Description: | 4 | Description: |
5 | Provides access to the binary "24x7 catalog" provided by the | 5 | Provides access to the binary "24x7 catalog" provided by the |
6 | hypervisor on POWER7 and 8 systems. This catalog lists events | 6 | hypervisor on POWER7 and 8 systems. This catalog lists events |
@@ -10,14 +10,14 @@ Description: | |||
10 | 10 | ||
11 | What: /sys/bus/event_source/devices/hv_24x7/interface/catalog_length | 11 | What: /sys/bus/event_source/devices/hv_24x7/interface/catalog_length |
12 | Date: February 2014 | 12 | Date: February 2014 |
13 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 13 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
14 | Description: | 14 | Description: |
15 | A number equal to the length in bytes of the catalog. This is | 15 | A number equal to the length in bytes of the catalog. This is |
16 | also extractable from the provided binary "catalog" sysfs entry. | 16 | also extractable from the provided binary "catalog" sysfs entry. |
17 | 17 | ||
18 | What: /sys/bus/event_source/devices/hv_24x7/interface/catalog_version | 18 | What: /sys/bus/event_source/devices/hv_24x7/interface/catalog_version |
19 | Date: February 2014 | 19 | Date: February 2014 |
20 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 20 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
21 | Description: | 21 | Description: |
22 | Exposes the "version" field of the 24x7 catalog. This is also | 22 | Exposes the "version" field of the 24x7 catalog. This is also |
23 | extractable from the provided binary "catalog" sysfs entry. | 23 | extractable from the provided binary "catalog" sysfs entry. |
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci index 3fa58c23f13b..3ca4e554d2f9 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_gpci | |||
@@ -1,6 +1,6 @@ | |||
1 | What: /sys/bus/event_source/devices/hv_gpci/interface/collect_privileged | 1 | What: /sys/bus/event_source/devices/hv_gpci/interface/collect_privileged |
2 | Date: February 2014 | 2 | Date: February 2014 |
3 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 3 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
4 | Description: | 4 | Description: |
5 | '0' if the hypervisor is configured to forbid access to event | 5 | '0' if the hypervisor is configured to forbid access to event |
6 | counters being accumulated by other guests and to physical | 6 | counters being accumulated by other guests and to physical |
@@ -9,35 +9,35 @@ Description: | |||
9 | 9 | ||
10 | What: /sys/bus/event_source/devices/hv_gpci/interface/ga | 10 | What: /sys/bus/event_source/devices/hv_gpci/interface/ga |
11 | Date: February 2014 | 11 | Date: February 2014 |
12 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 12 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
13 | Description: | 13 | Description: |
14 | 0 or 1. Indicates whether we have access to "GA" events (listed | 14 | 0 or 1. Indicates whether we have access to "GA" events (listed |
15 | in arch/powerpc/perf/hv-gpci.h). | 15 | in arch/powerpc/perf/hv-gpci.h). |
16 | 16 | ||
17 | What: /sys/bus/event_source/devices/hv_gpci/interface/expanded | 17 | What: /sys/bus/event_source/devices/hv_gpci/interface/expanded |
18 | Date: February 2014 | 18 | Date: February 2014 |
19 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 19 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
20 | Description: | 20 | Description: |
21 | 0 or 1. Indicates whether we have access to "EXPANDED" events (listed | 21 | 0 or 1. Indicates whether we have access to "EXPANDED" events (listed |
22 | in arch/powerpc/perf/hv-gpci.h). | 22 | in arch/powerpc/perf/hv-gpci.h). |
23 | 23 | ||
24 | What: /sys/bus/event_source/devices/hv_gpci/interface/lab | 24 | What: /sys/bus/event_source/devices/hv_gpci/interface/lab |
25 | Date: February 2014 | 25 | Date: February 2014 |
26 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 26 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
27 | Description: | 27 | Description: |
28 | 0 or 1. Indicates whether we have access to "LAB" events (listed | 28 | 0 or 1. Indicates whether we have access to "LAB" events (listed |
29 | in arch/powerpc/perf/hv-gpci.h). | 29 | in arch/powerpc/perf/hv-gpci.h). |
30 | 30 | ||
31 | What: /sys/bus/event_source/devices/hv_gpci/interface/version | 31 | What: /sys/bus/event_source/devices/hv_gpci/interface/version |
32 | Date: February 2014 | 32 | Date: February 2014 |
33 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 33 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
34 | Description: | 34 | Description: |
35 | A number indicating the version of the gpci interface that the | 35 | A number indicating the version of the gpci interface that the |
36 | hypervisor reports supporting. | 36 | hypervisor reports supporting. |
37 | 37 | ||
38 | What: /sys/bus/event_source/devices/hv_gpci/interface/kernel_version | 38 | What: /sys/bus/event_source/devices/hv_gpci/interface/kernel_version |
39 | Date: February 2014 | 39 | Date: February 2014 |
40 | Contact: Cody P Schafer <cody@linux.vnet.ibm.com> | 40 | Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org> |
41 | Description: | 41 | Description: |
42 | A number indicating the latest version of the gpci interface | 42 | A number indicating the latest version of the gpci interface |
43 | that the kernel is aware of. | 43 | that the kernel is aware of. |
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl new file mode 100644 index 000000000000..554405ec1955 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-cxl | |||
@@ -0,0 +1,129 @@ | |||
1 | Slave contexts (eg. /sys/class/cxl/afu0.0s): | ||
2 | |||
3 | What: /sys/class/cxl/<afu>/irqs_max | ||
4 | Date: September 2014 | ||
5 | Contact: linuxppc-dev@lists.ozlabs.org | ||
6 | Description: read/write | ||
7 | Decimal value of maximum number of interrupts that can be | ||
8 | requested by userspace. The default on probe is the maximum | ||
9 | that hardware can support (eg. 2037). Write values will limit | ||
10 | userspace applications to that many userspace interrupts. Must | ||
11 | be >= irqs_min. | ||
12 | |||
13 | What: /sys/class/cxl/<afu>/irqs_min | ||
14 | Date: September 2014 | ||
15 | Contact: linuxppc-dev@lists.ozlabs.org | ||
16 | Description: read only | ||
17 | Decimal value of the minimum number of interrupts that | ||
18 | userspace must request on a CXL_START_WORK ioctl. Userspace may | ||
19 | omit the num_interrupts field in the START_WORK IOCTL to get | ||
20 | this minimum automatically. | ||
21 | |||
22 | What: /sys/class/cxl/<afu>/mmio_size | ||
23 | Date: September 2014 | ||
24 | Contact: linuxppc-dev@lists.ozlabs.org | ||
25 | Description: read only | ||
26 | Decimal value of the size of the MMIO space that may be mmaped | ||
27 | by userspace. | ||
28 | |||
29 | What: /sys/class/cxl/<afu>/modes_supported | ||
30 | Date: September 2014 | ||
31 | Contact: linuxppc-dev@lists.ozlabs.org | ||
32 | Description: read only | ||
33 | List of the modes this AFU supports. One per line. | ||
34 | Valid entries are: "dedicated_process" and "afu_directed" | ||
35 | |||
36 | What: /sys/class/cxl/<afu>/mode | ||
37 | Date: September 2014 | ||
38 | Contact: linuxppc-dev@lists.ozlabs.org | ||
39 | Description: read/write | ||
40 | The current mode the AFU is using. Will be one of the modes | ||
41 | given in modes_supported. Writing will change the mode | ||
42 | provided that no user contexts are attached. | ||
43 | |||
44 | |||
45 | What: /sys/class/cxl/<afu>/prefault_mode | ||
46 | Date: September 2014 | ||
47 | Contact: linuxppc-dev@lists.ozlabs.org | ||
48 | Description: read/write | ||
49 | Set the mode for prefaulting in segments into the segment table | ||
50 | when performing the START_WORK ioctl. Possible values: | ||
51 | none: No prefaulting (default) | ||
52 | work_element_descriptor: Treat the work element | ||
53 | descriptor as an effective address and | ||
54 | prefault what it points to. | ||
55 | all: all segments process calling START_WORK maps. | ||
56 | |||
57 | What: /sys/class/cxl/<afu>/reset | ||
58 | Date: September 2014 | ||
59 | Contact: linuxppc-dev@lists.ozlabs.org | ||
60 | Description: write only | ||
61 | Writing 1 here will reset the AFU provided there are not | ||
62 | contexts active on the AFU. | ||
63 | |||
64 | What: /sys/class/cxl/<afu>/api_version | ||
65 | Date: September 2014 | ||
66 | Contact: linuxppc-dev@lists.ozlabs.org | ||
67 | Description: read only | ||
68 | Decimal value of the current version of the kernel/user API. | ||
69 | |||
70 | What: /sys/class/cxl/<afu>/api_version_com | ||
71 | Date: September 2014 | ||
72 | Contact: linuxppc-dev@lists.ozlabs.org | ||
73 | Description: read only | ||
74 | Decimal value of the the lowest version of the userspace API | ||
75 | this this kernel supports. | ||
76 | |||
77 | |||
78 | |||
79 | Master contexts (eg. /sys/class/cxl/afu0.0m) | ||
80 | |||
81 | What: /sys/class/cxl/<afu>m/mmio_size | ||
82 | Date: September 2014 | ||
83 | Contact: linuxppc-dev@lists.ozlabs.org | ||
84 | Description: read only | ||
85 | Decimal value of the size of the MMIO space that may be mmaped | ||
86 | by userspace. This includes all slave contexts space also. | ||
87 | |||
88 | What: /sys/class/cxl/<afu>m/pp_mmio_len | ||
89 | Date: September 2014 | ||
90 | Contact: linuxppc-dev@lists.ozlabs.org | ||
91 | Description: read only | ||
92 | Decimal value of the Per Process MMIO space length. | ||
93 | |||
94 | What: /sys/class/cxl/<afu>m/pp_mmio_off | ||
95 | Date: September 2014 | ||
96 | Contact: linuxppc-dev@lists.ozlabs.org | ||
97 | Description: read only | ||
98 | Decimal value of the Per Process MMIO space offset. | ||
99 | |||
100 | |||
101 | Card info (eg. /sys/class/cxl/card0) | ||
102 | |||
103 | What: /sys/class/cxl/<card>/caia_version | ||
104 | Date: September 2014 | ||
105 | Contact: linuxppc-dev@lists.ozlabs.org | ||
106 | Description: read only | ||
107 | Identifies the CAIA Version the card implements. | ||
108 | |||
109 | What: /sys/class/cxl/<card>/psl_version | ||
110 | Date: September 2014 | ||
111 | Contact: linuxppc-dev@lists.ozlabs.org | ||
112 | Description: read only | ||
113 | Identifies the revision level of the PSL. | ||
114 | |||
115 | What: /sys/class/cxl/<card>/base_image | ||
116 | Date: September 2014 | ||
117 | Contact: linuxppc-dev@lists.ozlabs.org | ||
118 | Description: read only | ||
119 | Identifies the revision level of the base image for devices | ||
120 | that support loadable PSLs. For FPGAs this field identifies | ||
121 | the image contained in the on-adapter flash which is loaded | ||
122 | during the initial program load. | ||
123 | |||
124 | What: /sys/class/cxl/<card>/image_loaded | ||
125 | Date: September 2014 | ||
126 | Contact: linuxppc-dev@lists.ozlabs.org | ||
127 | Description: read only | ||
128 | Will return "user" or "factory" depending on the image loaded | ||
129 | onto the card. | ||
diff --git a/Documentation/devicetree/bindings/pci/fsl,pci.txt b/Documentation/devicetree/bindings/pci/fsl,pci.txt new file mode 100644 index 000000000000..d8ac4a768e7e --- /dev/null +++ b/Documentation/devicetree/bindings/pci/fsl,pci.txt | |||
@@ -0,0 +1,27 @@ | |||
1 | * Bus Enumeration by Freescale PCI-X Agent | ||
2 | |||
3 | Typically any Freescale PCI-X bridge hardware strapped into Agent mode | ||
4 | is prevented from enumerating the bus. The PrPMC form-factor requires | ||
5 | all mezzanines to be PCI-X Agents, but one per system may still | ||
6 | enumerate the bus. | ||
7 | |||
8 | The property defined below will allow a PCI-X bridge to be used for bus | ||
9 | enumeration despite being strapped into Agent mode. | ||
10 | |||
11 | Required properties: | ||
12 | - fsl,pci-agent-force-enum : There is no value associated with this | ||
13 | property. The property itself is treated as a boolean. | ||
14 | |||
15 | Example: | ||
16 | |||
17 | /* PCI-X bridge known to be PrPMC Monarch */ | ||
18 | pci0: pci@ef008000 { | ||
19 | fsl,pci-agent-force-enum; | ||
20 | #interrupt-cells = <1>; | ||
21 | #size-cells = <2>; | ||
22 | #address-cells = <3>; | ||
23 | compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci"; | ||
24 | device_type = "pci"; | ||
25 | ... | ||
26 | ... | ||
27 | }; | ||
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 7e240a7c9ab1..8136e1fd30fd 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt | |||
@@ -313,6 +313,7 @@ Code Seq#(hex) Include File Comments | |||
313 | 0xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca> | 313 | 0xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca> |
314 | 0xB3 00 linux/mmc/ioctl.h | 314 | 0xB3 00 linux/mmc/ioctl.h |
315 | 0xC0 00-0F linux/usb/iowarrior.h | 315 | 0xC0 00-0F linux/usb/iowarrior.h |
316 | 0xCA 00-0F uapi/misc/cxl.h | ||
316 | 0xCB 00-1F CBM serial IEC bus in development: | 317 | 0xCB 00-1F CBM serial IEC bus in development: |
317 | <mailto:michael.klein@puffin.lb.shuttle.de> | 318 | <mailto:michael.klein@puffin.lb.shuttle.de> |
318 | 0xCD 01 linux/reiserfs_fs.h | 319 | 0xCD 01 linux/reiserfs_fs.h |
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX index a68784d0a1ee..6fd0e8bb8140 100644 --- a/Documentation/powerpc/00-INDEX +++ b/Documentation/powerpc/00-INDEX | |||
@@ -11,6 +11,8 @@ bootwrapper.txt | |||
11 | cpu_features.txt | 11 | cpu_features.txt |
12 | - info on how we support a variety of CPUs with minimal compile-time | 12 | - info on how we support a variety of CPUs with minimal compile-time |
13 | options. | 13 | options. |
14 | cxl.txt | ||
15 | - Overview of the CXL driver. | ||
14 | eeh-pci-error-recovery.txt | 16 | eeh-pci-error-recovery.txt |
15 | - info on PCI Bus EEH Error Recovery | 17 | - info on PCI Bus EEH Error Recovery |
16 | firmware-assisted-dump.txt | 18 | firmware-assisted-dump.txt |
diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.txt new file mode 100644 index 000000000000..2c71ecc519d9 --- /dev/null +++ b/Documentation/powerpc/cxl.txt | |||
@@ -0,0 +1,379 @@ | |||
1 | Coherent Accelerator Interface (CXL) | ||
2 | ==================================== | ||
3 | |||
4 | Introduction | ||
5 | ============ | ||
6 | |||
7 | The coherent accelerator interface is designed to allow the | ||
8 | coherent connection of accelerators (FPGAs and other devices) to a | ||
9 | POWER system. These devices need to adhere to the Coherent | ||
10 | Accelerator Interface Architecture (CAIA). | ||
11 | |||
12 | IBM refers to this as the Coherent Accelerator Processor Interface | ||
13 | or CAPI. In the kernel it's referred to by the name CXL to avoid | ||
14 | confusion with the ISDN CAPI subsystem. | ||
15 | |||
16 | Coherent in this context means that the accelerator and CPUs can | ||
17 | both access system memory directly and with the same effective | ||
18 | addresses. | ||
19 | |||
20 | |||
21 | Hardware overview | ||
22 | ================= | ||
23 | |||
24 | POWER8 FPGA | ||
25 | +----------+ +---------+ | ||
26 | | | | | | ||
27 | | CPU | | AFU | | ||
28 | | | | | | ||
29 | | | | | | ||
30 | | | | | | ||
31 | +----------+ +---------+ | ||
32 | | PHB | | | | ||
33 | | +------+ | PSL | | ||
34 | | | CAPP |<------>| | | ||
35 | +---+------+ PCIE +---------+ | ||
36 | |||
37 | The POWER8 chip has a Coherently Attached Processor Proxy (CAPP) | ||
38 | unit which is part of the PCIe Host Bridge (PHB). This is managed | ||
39 | by Linux by calls into OPAL. Linux doesn't directly program the | ||
40 | CAPP. | ||
41 | |||
42 | The FPGA (or coherently attached device) consists of two parts. | ||
43 | The POWER Service Layer (PSL) and the Accelerator Function Unit | ||
44 | (AFU). The AFU is used to implement specific functionality behind | ||
45 | the PSL. The PSL, among other things, provides memory address | ||
46 | translation services to allow each AFU direct access to userspace | ||
47 | memory. | ||
48 | |||
49 | The AFU is the core part of the accelerator (eg. the compression, | ||
50 | crypto etc function). The kernel has no knowledge of the function | ||
51 | of the AFU. Only userspace interacts directly with the AFU. | ||
52 | |||
53 | The PSL provides the translation and interrupt services that the | ||
54 | AFU needs. This is what the kernel interacts with. For example, if | ||
55 | the AFU needs to read a particular effective address, it sends | ||
56 | that address to the PSL, the PSL then translates it, fetches the | ||
57 | data from memory and returns it to the AFU. If the PSL has a | ||
58 | translation miss, it interrupts the kernel and the kernel services | ||
59 | the fault. The context to which this fault is serviced is based on | ||
60 | who owns that acceleration function. | ||
61 | |||
62 | |||
63 | AFU Modes | ||
64 | ========= | ||
65 | |||
66 | There are two programming modes supported by the AFU. Dedicated | ||
67 | and AFU directed. AFU may support one or both modes. | ||
68 | |||
69 | When using dedicated mode only one MMU context is supported. In | ||
70 | this mode, only one userspace process can use the accelerator at | ||
71 | time. | ||
72 | |||
73 | When using AFU directed mode, up to 16K simultaneous contexts can | ||
74 | be supported. This means up to 16K simultaneous userspace | ||
75 | applications may use the accelerator (although specific AFUs may | ||
76 | support fewer). In this mode, the AFU sends a 16 bit context ID | ||
77 | with each of its requests. This tells the PSL which context is | ||
78 | associated with each operation. If the PSL can't translate an | ||
79 | operation, the ID can also be accessed by the kernel so it can | ||
80 | determine the userspace context associated with an operation. | ||
81 | |||
82 | |||
83 | MMIO space | ||
84 | ========== | ||
85 | |||
86 | A portion of the accelerator MMIO space can be directly mapped | ||
87 | from the AFU to userspace. Either the whole space can be mapped or | ||
88 | just a per context portion. The hardware is self describing, hence | ||
89 | the kernel can determine the offset and size of the per context | ||
90 | portion. | ||
91 | |||
92 | |||
93 | Interrupts | ||
94 | ========== | ||
95 | |||
96 | AFUs may generate interrupts that are destined for userspace. These | ||
97 | are received by the kernel as hardware interrupts and passed onto | ||
98 | userspace by a read syscall documented below. | ||
99 | |||
100 | Data storage faults and error interrupts are handled by the kernel | ||
101 | driver. | ||
102 | |||
103 | |||
104 | Work Element Descriptor (WED) | ||
105 | ============================= | ||
106 | |||
107 | The WED is a 64-bit parameter passed to the AFU when a context is | ||
108 | started. Its format is up to the AFU hence the kernel has no | ||
109 | knowledge of what it represents. Typically it will be the | ||
110 | effective address of a work queue or status block where the AFU | ||
111 | and userspace can share control and status information. | ||
112 | |||
113 | |||
114 | |||
115 | |||
116 | User API | ||
117 | ======== | ||
118 | |||
119 | For AFUs operating in AFU directed mode, two character device | ||
120 | files will be created. /dev/cxl/afu0.0m will correspond to a | ||
121 | master context and /dev/cxl/afu0.0s will correspond to a slave | ||
122 | context. Master contexts have access to the full MMIO space an | ||
123 | AFU provides. Slave contexts have access to only the per process | ||
124 | MMIO space an AFU provides. | ||
125 | |||
126 | For AFUs operating in dedicated process mode, the driver will | ||
127 | only create a single character device per AFU called | ||
128 | /dev/cxl/afu0.0d. This will have access to the entire MMIO space | ||
129 | that the AFU provides (like master contexts in AFU directed). | ||
130 | |||
131 | The types described below are defined in include/uapi/misc/cxl.h | ||
132 | |||
133 | The following file operations are supported on both slave and | ||
134 | master devices. | ||
135 | |||
136 | |||
137 | open | ||
138 | ---- | ||
139 | |||
140 | Opens the device and allocates a file descriptor to be used with | ||
141 | the rest of the API. | ||
142 | |||
143 | A dedicated mode AFU only has one context and only allows the | ||
144 | device to be opened once. | ||
145 | |||
146 | An AFU directed mode AFU can have many contexts, the device can be | ||
147 | opened once for each context that is available. | ||
148 | |||
149 | When all available contexts are allocated the open call will fail | ||
150 | and return -ENOSPC. | ||
151 | |||
152 | Note: IRQs need to be allocated for each context, which may limit | ||
153 | the number of contexts that can be created, and therefore | ||
154 | how many times the device can be opened. The POWER8 CAPP | ||
155 | supports 2040 IRQs and 3 are used by the kernel, so 2037 are | ||
156 | left. If 1 IRQ is needed per context, then only 2037 | ||
157 | contexts can be allocated. If 4 IRQs are needed per context, | ||
158 | then only 2037/4 = 509 contexts can be allocated. | ||
159 | |||
160 | |||
161 | ioctl | ||
162 | ----- | ||
163 | |||
164 | CXL_IOCTL_START_WORK: | ||
165 | Starts the AFU context and associates it with the current | ||
166 | process. Once this ioctl is successfully executed, all memory | ||
167 | mapped into this process is accessible to this AFU context | ||
168 | using the same effective addresses. No additional calls are | ||
169 | required to map/unmap memory. The AFU memory context will be | ||
170 | updated as userspace allocates and frees memory. This ioctl | ||
171 | returns once the AFU context is started. | ||
172 | |||
173 | Takes a pointer to a struct cxl_ioctl_start_work: | ||
174 | |||
175 | struct cxl_ioctl_start_work { | ||
176 | __u64 flags; | ||
177 | __u64 work_element_descriptor; | ||
178 | __u64 amr; | ||
179 | __s16 num_interrupts; | ||
180 | __s16 reserved1; | ||
181 | __s32 reserved2; | ||
182 | __u64 reserved3; | ||
183 | __u64 reserved4; | ||
184 | __u64 reserved5; | ||
185 | __u64 reserved6; | ||
186 | }; | ||
187 | |||
188 | flags: | ||
189 | Indicates which optional fields in the structure are | ||
190 | valid. | ||
191 | |||
192 | work_element_descriptor: | ||
193 | The Work Element Descriptor (WED) is a 64-bit argument | ||
194 | defined by the AFU. Typically this is an effective | ||
195 | address pointing to an AFU specific structure | ||
196 | describing what work to perform. | ||
197 | |||
198 | amr: | ||
199 | Authority Mask Register (AMR), same as the powerpc | ||
200 | AMR. This field is only used by the kernel when the | ||
201 | corresponding CXL_START_WORK_AMR value is specified in | ||
202 | flags. If not specified the kernel will use a default | ||
203 | value of 0. | ||
204 | |||
205 | num_interrupts: | ||
206 | Number of userspace interrupts to request. This field | ||
207 | is only used by the kernel when the corresponding | ||
208 | CXL_START_WORK_NUM_IRQS value is specified in flags. | ||
209 | If not specified the minimum number required by the | ||
210 | AFU will be allocated. The min and max number can be | ||
211 | obtained from sysfs. | ||
212 | |||
213 | reserved fields: | ||
214 | For ABI padding and future extensions | ||
215 | |||
216 | CXL_IOCTL_GET_PROCESS_ELEMENT: | ||
217 | Get the current context id, also known as the process element. | ||
218 | The value is returned from the kernel as a __u32. | ||
219 | |||
220 | |||
221 | mmap | ||
222 | ---- | ||
223 | |||
224 | An AFU may have an MMIO space to facilitate communication with the | ||
225 | AFU. If it does, the MMIO space can be accessed via mmap. The size | ||
226 | and contents of this area are specific to the particular AFU. The | ||
227 | size can be discovered via sysfs. | ||
228 | |||
229 | In AFU directed mode, master contexts are allowed to map all of | ||
230 | the MMIO space and slave contexts are allowed to only map the per | ||
231 | process MMIO space associated with the context. In dedicated | ||
232 | process mode the entire MMIO space can always be mapped. | ||
233 | |||
234 | This mmap call must be done after the START_WORK ioctl. | ||
235 | |||
236 | Care should be taken when accessing MMIO space. Only 32 and 64-bit | ||
237 | accesses are supported by POWER8. Also, the AFU will be designed | ||
238 | with a specific endianness, so all MMIO accesses should consider | ||
239 | endianness (recommend endian(3) variants like: le64toh(), | ||
240 | be64toh() etc). These endian issues equally apply to shared memory | ||
241 | queues the WED may describe. | ||
242 | |||
243 | |||
244 | read | ||
245 | ---- | ||
246 | |||
247 | Reads events from the AFU. Blocks if no events are pending | ||
248 | (unless O_NONBLOCK is supplied). Returns -EIO in the case of an | ||
249 | unrecoverable error or if the card is removed. | ||
250 | |||
251 | read() will always return an integral number of events. | ||
252 | |||
253 | The buffer passed to read() must be at least 4K bytes. | ||
254 | |||
255 | The result of the read will be a buffer of one or more events, | ||
256 | each event is of type struct cxl_event, of varying size. | ||
257 | |||
258 | struct cxl_event { | ||
259 | struct cxl_event_header header; | ||
260 | union { | ||
261 | struct cxl_event_afu_interrupt irq; | ||
262 | struct cxl_event_data_storage fault; | ||
263 | struct cxl_event_afu_error afu_error; | ||
264 | }; | ||
265 | }; | ||
266 | |||
267 | The struct cxl_event_header is defined as: | ||
268 | |||
269 | struct cxl_event_header { | ||
270 | __u16 type; | ||
271 | __u16 size; | ||
272 | __u16 process_element; | ||
273 | __u16 reserved1; | ||
274 | }; | ||
275 | |||
276 | type: | ||
277 | This defines the type of event. The type determines how | ||
278 | the rest of the event is structured. These types are | ||
279 | described below and defined by enum cxl_event_type. | ||
280 | |||
281 | size: | ||
282 | This is the size of the event in bytes including the | ||
283 | struct cxl_event_header. The start of the next event can | ||
284 | be found at this offset from the start of the current | ||
285 | event. | ||
286 | |||
287 | process_element: | ||
288 | Context ID of the event. | ||
289 | |||
290 | reserved field: | ||
291 | For future extensions and padding. | ||
292 | |||
293 | If the event type is CXL_EVENT_AFU_INTERRUPT then the event | ||
294 | structure is defined as: | ||
295 | |||
296 | struct cxl_event_afu_interrupt { | ||
297 | __u16 flags; | ||
298 | __u16 irq; /* Raised AFU interrupt number */ | ||
299 | __u32 reserved1; | ||
300 | }; | ||
301 | |||
302 | flags: | ||
303 | These flags indicate which optional fields are present | ||
304 | in this struct. Currently all fields are mandatory. | ||
305 | |||
306 | irq: | ||
307 | The IRQ number sent by the AFU. | ||
308 | |||
309 | reserved field: | ||
310 | For future extensions and padding. | ||
311 | |||
312 | If the event type is CXL_EVENT_DATA_STORAGE then the event | ||
313 | structure is defined as: | ||
314 | |||
315 | struct cxl_event_data_storage { | ||
316 | __u16 flags; | ||
317 | __u16 reserved1; | ||
318 | __u32 reserved2; | ||
319 | __u64 addr; | ||
320 | __u64 dsisr; | ||
321 | __u64 reserved3; | ||
322 | }; | ||
323 | |||
324 | flags: | ||
325 | These flags indicate which optional fields are present in | ||
326 | this struct. Currently all fields are mandatory. | ||
327 | |||
328 | address: | ||
329 | The address that the AFU unsuccessfully attempted to | ||
330 | access. Valid accesses will be handled transparently by the | ||
331 | kernel but invalid accesses will generate this event. | ||
332 | |||
333 | dsisr: | ||
334 | This field gives information on the type of fault. It is a | ||
335 | copy of the DSISR from the PSL hardware when the address | ||
336 | fault occurred. The form of the DSISR is as defined in the | ||
337 | CAIA. | ||
338 | |||
339 | reserved fields: | ||
340 | For future extensions | ||
341 | |||
342 | If the event type is CXL_EVENT_AFU_ERROR then the event structure | ||
343 | is defined as: | ||
344 | |||
345 | struct cxl_event_afu_error { | ||
346 | __u16 flags; | ||
347 | __u16 reserved1; | ||
348 | __u32 reserved2; | ||
349 | __u64 error; | ||
350 | }; | ||
351 | |||
352 | flags: | ||
353 | These flags indicate which optional fields are present in | ||
354 | this struct. Currently all fields are Mandatory. | ||
355 | |||
356 | error: | ||
357 | Error status from the AFU. Defined by the AFU. | ||
358 | |||
359 | reserved fields: | ||
360 | For future extensions and padding | ||
361 | |||
362 | Sysfs Class | ||
363 | =========== | ||
364 | |||
365 | A cxl sysfs class is added under /sys/class/cxl to facilitate | ||
366 | enumeration and tuning of the accelerators. Its layout is | ||
367 | described in Documentation/ABI/testing/sysfs-class-cxl | ||
368 | |||
369 | Udev rules | ||
370 | ========== | ||
371 | |||
372 | The following udev rules could be used to create a symlink to the | ||
373 | most logical chardev to use in any programming mode (afuX.Yd for | ||
374 | dedicated, afuX.Ys for afu directed), since the API is virtually | ||
375 | identical for each: | ||
376 | |||
377 | SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b" | ||
378 | SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \ | ||
379 | KERNEL=="afu[0-9]*.[0-9]*s", SYMLINK="cxl/%b" | ||
diff --git a/MAINTAINERS b/MAINTAINERS index f8d882e13200..1e53b32fa07b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2761,6 +2761,18 @@ W: http://www.chelsio.com | |||
2761 | S: Supported | 2761 | S: Supported |
2762 | F: drivers/net/ethernet/chelsio/cxgb4vf/ | 2762 | F: drivers/net/ethernet/chelsio/cxgb4vf/ |
2763 | 2763 | ||
2764 | CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER | ||
2765 | M: Ian Munsie <imunsie@au1.ibm.com> | ||
2766 | M: Michael Neuling <mikey@neuling.org> | ||
2767 | L: linuxppc-dev@lists.ozlabs.org | ||
2768 | S: Supported | ||
2769 | F: drivers/misc/cxl/ | ||
2770 | F: include/misc/cxl.h | ||
2771 | F: include/uapi/misc/cxl.h | ||
2772 | F: Documentation/powerpc/cxl.txt | ||
2773 | F: Documentation/powerpc/cxl.txt | ||
2774 | F: Documentation/ABI/testing/sysfs-class-cxl | ||
2775 | |||
2764 | STMMAC ETHERNET DRIVER | 2776 | STMMAC ETHERNET DRIVER |
2765 | M: Giuseppe Cavallaro <peppe.cavallaro@st.com> | 2777 | M: Giuseppe Cavallaro <peppe.cavallaro@st.com> |
2766 | L: netdev@vger.kernel.org | 2778 | L: netdev@vger.kernel.org |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4bc7b62fb4b6..88eace4e28c3 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -147,6 +147,7 @@ config PPC | |||
147 | select ARCH_USE_CMPXCHG_LOCKREF if PPC64 | 147 | select ARCH_USE_CMPXCHG_LOCKREF if PPC64 |
148 | select HAVE_ARCH_AUDITSYSCALL | 148 | select HAVE_ARCH_AUDITSYSCALL |
149 | select ARCH_SUPPORTS_ATOMIC_RMW | 149 | select ARCH_SUPPORTS_ATOMIC_RMW |
150 | select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN | ||
150 | 151 | ||
151 | config GENERIC_CSUM | 152 | config GENERIC_CSUM |
152 | def_bool CPU_LITTLE_ENDIAN | 153 | def_bool CPU_LITTLE_ENDIAN |
@@ -182,7 +183,7 @@ config SCHED_OMIT_FRAME_POINTER | |||
182 | 183 | ||
183 | config ARCH_MAY_HAVE_PC_FDC | 184 | config ARCH_MAY_HAVE_PC_FDC |
184 | bool | 185 | bool |
185 | default !PPC_PSERIES || PCI | 186 | default PCI |
186 | 187 | ||
187 | config PPC_OF | 188 | config PPC_OF |
188 | def_bool y | 189 | def_bool y |
@@ -287,6 +288,10 @@ config PPC_EMULATE_SSTEP | |||
287 | bool | 288 | bool |
288 | default y if KPROBES || UPROBES || XMON || HAVE_HW_BREAKPOINT | 289 | default y if KPROBES || UPROBES || XMON || HAVE_HW_BREAKPOINT |
289 | 290 | ||
291 | config ZONE_DMA32 | ||
292 | bool | ||
293 | default y if PPC64 | ||
294 | |||
290 | source "init/Kconfig" | 295 | source "init/Kconfig" |
291 | 296 | ||
292 | source "kernel/Kconfig.freezer" | 297 | source "kernel/Kconfig.freezer" |
@@ -603,6 +608,10 @@ config PPC_SUBPAGE_PROT | |||
603 | to set access permissions (read/write, readonly, or no access) | 608 | to set access permissions (read/write, readonly, or no access) |
604 | on the 4k subpages of each 64k page. | 609 | on the 4k subpages of each 64k page. |
605 | 610 | ||
611 | config PPC_COPRO_BASE | ||
612 | bool | ||
613 | default n | ||
614 | |||
606 | config SCHED_SMT | 615 | config SCHED_SMT |
607 | bool "SMT (Hyperthreading) scheduler support" | 616 | bool "SMT (Hyperthreading) scheduler support" |
608 | depends on PPC64 && SMP | 617 | depends on PPC64 && SMP |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 5687e299d0a5..132d9c681d6a 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -135,6 +135,7 @@ CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4) | |||
135 | CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5) | 135 | CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5) |
136 | CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) | 136 | CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) |
137 | CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) | 137 | CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) |
138 | CFLAGS-$(CONFIG_POWER8_CPU) += $(call cc-option,-mcpu=power8) | ||
138 | 139 | ||
139 | # Altivec option not allowed with e500mc64 in GCC. | 140 | # Altivec option not allowed with e500mc64 in GCC. |
140 | ifeq ($(CONFIG_ALTIVEC),y) | 141 | ifeq ($(CONFIG_ALTIVEC),y) |
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index ccc25eddbcb8..8a5bc1cfc6aa 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -389,7 +389,12 @@ $(obj)/zImage: $(addprefix $(obj)/, $(image-y)) | |||
389 | $(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y)) | 389 | $(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y)) |
390 | @rm -f $@; ln $< $@ | 390 | @rm -f $@; ln $< $@ |
391 | 391 | ||
392 | # Only install the vmlinux | ||
392 | install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y)) | 393 | install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y)) |
394 | sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" | ||
395 | |||
396 | # Install the vmlinux and other built boot targets. | ||
397 | zInstall: $(CONFIGURE) $(addprefix $(obj)/, $(image-y)) | ||
393 | sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^ | 398 | sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^ |
394 | 399 | ||
395 | # anything not in $(targets) | 400 | # anything not in $(targets) |
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi index 97479f0ce630..aecee9690a88 100644 --- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi | |||
@@ -410,7 +410,7 @@ | |||
410 | /include/ "qoriq-gpio-3.dtsi" | 410 | /include/ "qoriq-gpio-3.dtsi" |
411 | /include/ "qoriq-usb2-mph-0.dtsi" | 411 | /include/ "qoriq-usb2-mph-0.dtsi" |
412 | usb0: usb@210000 { | 412 | usb0: usb@210000 { |
413 | compatible = "fsl-usb2-mph-v2.4", "fsl-usb2-mph"; | 413 | compatible = "fsl-usb2-mph-v2.5", "fsl-usb2-mph"; |
414 | fsl,iommu-parent = <&pamu1>; | 414 | fsl,iommu-parent = <&pamu1>; |
415 | fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */ | 415 | fsl,liodn-reg = <&guts 0x520>; /* USB1LIODNR */ |
416 | phy_type = "utmi"; | 416 | phy_type = "utmi"; |
@@ -418,7 +418,7 @@ | |||
418 | }; | 418 | }; |
419 | /include/ "qoriq-usb2-dr-0.dtsi" | 419 | /include/ "qoriq-usb2-dr-0.dtsi" |
420 | usb1: usb@211000 { | 420 | usb1: usb@211000 { |
421 | compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr"; | 421 | compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr"; |
422 | fsl,iommu-parent = <&pamu1>; | 422 | fsl,iommu-parent = <&pamu1>; |
423 | fsl,liodn-reg = <&guts 0x524>; /* USB1LIODNR */ | 423 | fsl,liodn-reg = <&guts 0x524>; /* USB1LIODNR */ |
424 | dr_mode = "host"; | 424 | dr_mode = "host"; |
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi index a3d582e0361a..7e2fc7cdce48 100644 --- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi | |||
@@ -498,13 +498,13 @@ | |||
498 | /include/ "qoriq-gpio-3.dtsi" | 498 | /include/ "qoriq-gpio-3.dtsi" |
499 | /include/ "qoriq-usb2-mph-0.dtsi" | 499 | /include/ "qoriq-usb2-mph-0.dtsi" |
500 | usb0: usb@210000 { | 500 | usb0: usb@210000 { |
501 | compatible = "fsl-usb2-mph-v2.4", "fsl-usb2-mph"; | 501 | compatible = "fsl-usb2-mph-v2.5", "fsl-usb2-mph"; |
502 | phy_type = "utmi"; | 502 | phy_type = "utmi"; |
503 | port0; | 503 | port0; |
504 | }; | 504 | }; |
505 | /include/ "qoriq-usb2-dr-0.dtsi" | 505 | /include/ "qoriq-usb2-dr-0.dtsi" |
506 | usb1: usb@211000 { | 506 | usb1: usb@211000 { |
507 | compatible = "fsl-usb2-dr-v2.4", "fsl-usb2-dr"; | 507 | compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr"; |
508 | dr_mode = "host"; | 508 | dr_mode = "host"; |
509 | phy_type = "utmi"; | 509 | phy_type = "utmi"; |
510 | }; | 510 | }; |
diff --git a/arch/powerpc/boot/dts/t1040rdb.dts b/arch/powerpc/boot/dts/t1040rdb.dts new file mode 100644 index 000000000000..79a0bed04c1a --- /dev/null +++ b/arch/powerpc/boot/dts/t1040rdb.dts | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * T1040RDB Device Tree Source | ||
3 | * | ||
4 | * Copyright 2014 Freescale Semiconductor Inc. | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are met: | ||
8 | * * Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * * Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * * Neither the name of Freescale Semiconductor nor the | ||
14 | * names of its contributors may be used to endorse or promote products | ||
15 | * derived from this software without specific prior written permission. | ||
16 | * | ||
17 | * | ||
18 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") as published by the Free Software | ||
20 | * Foundation, either version 2 of that License or (at your option) any | ||
21 | * later version. | ||
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY | ||
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | /include/ "fsl/t104xsi-pre.dtsi" | ||
36 | /include/ "t104xrdb.dtsi" | ||
37 | |||
38 | / { | ||
39 | model = "fsl,T1040RDB"; | ||
40 | compatible = "fsl,T1040RDB"; | ||
41 | ifc: localbus@ffe124000 { | ||
42 | cpld@3,0 { | ||
43 | compatible = "fsl,t1040rdb-cpld"; | ||
44 | }; | ||
45 | }; | ||
46 | }; | ||
47 | |||
48 | /include/ "fsl/t1040si-post.dtsi" | ||
diff --git a/arch/powerpc/boot/dts/t1042rdb.dts b/arch/powerpc/boot/dts/t1042rdb.dts new file mode 100644 index 000000000000..738c23790e94 --- /dev/null +++ b/arch/powerpc/boot/dts/t1042rdb.dts | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * T1042RDB Device Tree Source | ||
3 | * | ||
4 | * Copyright 2014 Freescale Semiconductor Inc. | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are met: | ||
8 | * * Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * * Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * * Neither the name of Freescale Semiconductor nor the | ||
14 | * names of its contributors may be used to endorse or promote products | ||
15 | * derived from this software without specific prior written permission. | ||
16 | * | ||
17 | * | ||
18 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") as published by the Free Software | ||
20 | * Foundation, either version 2 of that License or (at your option) any | ||
21 | * later version. | ||
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY | ||
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | /include/ "fsl/t104xsi-pre.dtsi" | ||
36 | /include/ "t104xrdb.dtsi" | ||
37 | |||
38 | / { | ||
39 | model = "fsl,T1042RDB"; | ||
40 | compatible = "fsl,T1042RDB"; | ||
41 | ifc: localbus@ffe124000 { | ||
42 | cpld@3,0 { | ||
43 | compatible = "fsl,t1042rdb-cpld"; | ||
44 | }; | ||
45 | }; | ||
46 | }; | ||
47 | |||
48 | /include/ "fsl/t1042si-post.dtsi" | ||
diff --git a/arch/powerpc/boot/dts/t1042rdb_pi.dts b/arch/powerpc/boot/dts/t1042rdb_pi.dts new file mode 100644 index 000000000000..634f751fa6d3 --- /dev/null +++ b/arch/powerpc/boot/dts/t1042rdb_pi.dts | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * T1042RDB_PI Device Tree Source | ||
3 | * | ||
4 | * Copyright 2014 Freescale Semiconductor Inc. | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are met: | ||
8 | * * Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * * Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * * Neither the name of Freescale Semiconductor nor the | ||
14 | * names of its contributors may be used to endorse or promote products | ||
15 | * derived from this software without specific prior written permission. | ||
16 | * | ||
17 | * | ||
18 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") as published by the Free Software | ||
20 | * Foundation, either version 2 of that License or (at your option) any | ||
21 | * later version. | ||
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY | ||
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | /include/ "fsl/t104xsi-pre.dtsi" | ||
36 | /include/ "t104xrdb.dtsi" | ||
37 | |||
38 | / { | ||
39 | model = "fsl,T1042RDB_PI"; | ||
40 | compatible = "fsl,T1042RDB_PI"; | ||
41 | ifc: localbus@ffe124000 { | ||
42 | cpld@3,0 { | ||
43 | compatible = "fsl,t1042rdb_pi-cpld"; | ||
44 | }; | ||
45 | }; | ||
46 | soc: soc@ffe000000 { | ||
47 | i2c@118000 { | ||
48 | rtc@68 { | ||
49 | compatible = "dallas,ds1337"; | ||
50 | reg = <0x68>; | ||
51 | interrupts = <0x2 0x1 0 0>; | ||
52 | }; | ||
53 | }; | ||
54 | }; | ||
55 | }; | ||
56 | |||
57 | /include/ "fsl/t1042si-post.dtsi" | ||
diff --git a/arch/powerpc/boot/dts/t104xrdb.dtsi b/arch/powerpc/boot/dts/t104xrdb.dtsi new file mode 100644 index 000000000000..1cf0f3c5f7e5 --- /dev/null +++ b/arch/powerpc/boot/dts/t104xrdb.dtsi | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * T1040RDB/T1042RDB Device Tree Source | ||
3 | * | ||
4 | * Copyright 2014 Freescale Semiconductor Inc. | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are met: | ||
8 | * * Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * * Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * * Neither the name of Freescale Semiconductor nor the | ||
14 | * names of its contributors may be used to endorse or promote products | ||
15 | * derived from this software without specific prior written permission. | ||
16 | * | ||
17 | * | ||
18 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") as published by the Free Software | ||
20 | * Foundation, either version 2 of that License or (at your option) any | ||
21 | * later version. | ||
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY | ||
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | / { | ||
36 | |||
37 | ifc: localbus@ffe124000 { | ||
38 | reg = <0xf 0xfe124000 0 0x2000>; | ||
39 | ranges = <0 0 0xf 0xe8000000 0x08000000 | ||
40 | 2 0 0xf 0xff800000 0x00010000 | ||
41 | 3 0 0xf 0xffdf0000 0x00008000>; | ||
42 | |||
43 | nor@0,0 { | ||
44 | #address-cells = <1>; | ||
45 | #size-cells = <1>; | ||
46 | compatible = "cfi-flash"; | ||
47 | reg = <0x0 0x0 0x8000000>; | ||
48 | bank-width = <2>; | ||
49 | device-width = <1>; | ||
50 | }; | ||
51 | |||
52 | nand@2,0 { | ||
53 | #address-cells = <1>; | ||
54 | #size-cells = <1>; | ||
55 | compatible = "fsl,ifc-nand"; | ||
56 | reg = <0x2 0x0 0x10000>; | ||
57 | }; | ||
58 | |||
59 | cpld@3,0 { | ||
60 | reg = <3 0 0x300>; | ||
61 | }; | ||
62 | }; | ||
63 | |||
64 | memory { | ||
65 | device_type = "memory"; | ||
66 | }; | ||
67 | |||
68 | dcsr: dcsr@f00000000 { | ||
69 | ranges = <0x00000000 0xf 0x00000000 0x01072000>; | ||
70 | }; | ||
71 | |||
72 | soc: soc@ffe000000 { | ||
73 | ranges = <0x00000000 0xf 0xfe000000 0x1000000>; | ||
74 | reg = <0xf 0xfe000000 0 0x00001000>; | ||
75 | |||
76 | spi@110000 { | ||
77 | flash@0 { | ||
78 | #address-cells = <1>; | ||
79 | #size-cells = <1>; | ||
80 | compatible = "micron,n25q512a"; | ||
81 | reg = <0>; | ||
82 | spi-max-frequency = <10000000>; /* input clock */ | ||
83 | }; | ||
84 | }; | ||
85 | |||
86 | i2c@118100 { | ||
87 | pca9546@77 { | ||
88 | compatible = "nxp,pca9546"; | ||
89 | reg = <0x77>; | ||
90 | #address-cells = <1>; | ||
91 | #size-cells = <0>; | ||
92 | }; | ||
93 | }; | ||
94 | |||
95 | }; | ||
96 | |||
97 | pci0: pcie@ffe240000 { | ||
98 | reg = <0xf 0xfe240000 0 0x10000>; | ||
99 | ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x10000000 | ||
100 | 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>; | ||
101 | pcie@0 { | ||
102 | ranges = <0x02000000 0 0xe0000000 | ||
103 | 0x02000000 0 0xe0000000 | ||
104 | 0 0x10000000 | ||
105 | |||
106 | 0x01000000 0 0x00000000 | ||
107 | 0x01000000 0 0x00000000 | ||
108 | 0 0x00010000>; | ||
109 | }; | ||
110 | }; | ||
111 | |||
112 | pci1: pcie@ffe250000 { | ||
113 | reg = <0xf 0xfe250000 0 0x10000>; | ||
114 | ranges = <0x02000000 0x0 0xe0000000 0xc 0x10000000 0x0 0x10000000 | ||
115 | 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>; | ||
116 | pcie@0 { | ||
117 | ranges = <0x02000000 0 0xe0000000 | ||
118 | 0x02000000 0 0xe0000000 | ||
119 | 0 0x10000000 | ||
120 | |||
121 | 0x01000000 0 0x00000000 | ||
122 | 0x01000000 0 0x00000000 | ||
123 | 0 0x00010000>; | ||
124 | }; | ||
125 | }; | ||
126 | |||
127 | pci2: pcie@ffe260000 { | ||
128 | reg = <0xf 0xfe260000 0 0x10000>; | ||
129 | ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x10000000 | ||
130 | 0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>; | ||
131 | pcie@0 { | ||
132 | ranges = <0x02000000 0 0xe0000000 | ||
133 | 0x02000000 0 0xe0000000 | ||
134 | 0 0x10000000 | ||
135 | |||
136 | 0x01000000 0 0x00000000 | ||
137 | 0x01000000 0 0x00000000 | ||
138 | 0 0x00010000>; | ||
139 | }; | ||
140 | }; | ||
141 | |||
142 | pci3: pcie@ffe270000 { | ||
143 | reg = <0xf 0xfe270000 0 0x10000>; | ||
144 | ranges = <0x02000000 0 0xe0000000 0xc 0x30000000 0 0x10000000 | ||
145 | 0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>; | ||
146 | pcie@0 { | ||
147 | ranges = <0x02000000 0 0xe0000000 | ||
148 | 0x02000000 0 0xe0000000 | ||
149 | 0 0x10000000 | ||
150 | |||
151 | 0x01000000 0 0x00000000 | ||
152 | 0x01000000 0 0x00000000 | ||
153 | 0 0x00010000>; | ||
154 | }; | ||
155 | }; | ||
156 | }; | ||
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 45fd06cdc3e8..7a7b3c879f96 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig | |||
@@ -18,6 +18,7 @@ CONFIG_OPROFILE=m | |||
18 | CONFIG_MODULES=y | 18 | CONFIG_MODULES=y |
19 | CONFIG_MODULE_UNLOAD=y | 19 | CONFIG_MODULE_UNLOAD=y |
20 | # CONFIG_BLK_DEV_BSG is not set | 20 | # CONFIG_BLK_DEV_BSG is not set |
21 | # CONFIG_PPC_POWERNV is not set | ||
21 | # CONFIG_PPC_PSERIES is not set | 22 | # CONFIG_PPC_PSERIES is not set |
22 | # CONFIG_PPC_PMAC is not set | 23 | # CONFIG_PPC_PMAC is not set |
23 | CONFIG_PPC_PS3=y | 24 | CONFIG_PPC_PS3=y |
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig index 77d7bf3ca2ac..acccbfde8a50 100644 --- a/arch/powerpc/configs/celleb_defconfig +++ b/arch/powerpc/configs/celleb_defconfig | |||
@@ -15,6 +15,7 @@ CONFIG_MODULES=y | |||
15 | CONFIG_MODULE_UNLOAD=y | 15 | CONFIG_MODULE_UNLOAD=y |
16 | CONFIG_MODVERSIONS=y | 16 | CONFIG_MODVERSIONS=y |
17 | CONFIG_MODULE_SRCVERSION_ALL=y | 17 | CONFIG_MODULE_SRCVERSION_ALL=y |
18 | # CONFIG_PPC_POWERNV is not set | ||
18 | # CONFIG_PPC_PSERIES is not set | 19 | # CONFIG_PPC_PSERIES is not set |
19 | # CONFIG_PPC_PMAC is not set | 20 | # CONFIG_PPC_PMAC is not set |
20 | CONFIG_PPC_CELLEB=y | 21 | CONFIG_PPC_CELLEB=y |
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig index 6a3c58adf253..688e9e4d29a1 100644 --- a/arch/powerpc/configs/corenet32_smp_defconfig +++ b/arch/powerpc/configs/corenet32_smp_defconfig | |||
@@ -165,6 +165,8 @@ CONFIG_NFS_FS=y | |||
165 | CONFIG_NFS_V4=y | 165 | CONFIG_NFS_V4=y |
166 | CONFIG_ROOT_NFS=y | 166 | CONFIG_ROOT_NFS=y |
167 | CONFIG_NFSD=m | 167 | CONFIG_NFSD=m |
168 | CONFIG_NLS_CODEPAGE_437=y | ||
169 | CONFIG_NLS_CODEPAGE_850=y | ||
168 | CONFIG_NLS_ISO8859_1=y | 170 | CONFIG_NLS_ISO8859_1=y |
169 | CONFIG_NLS_UTF8=m | 171 | CONFIG_NLS_UTF8=m |
170 | CONFIG_MAGIC_SYSRQ=y | 172 | CONFIG_MAGIC_SYSRQ=y |
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index 269d6e47c67d..6db97e4414b2 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig | |||
@@ -50,7 +50,6 @@ CONFIG_NET_IPIP=y | |||
50 | CONFIG_IP_MROUTE=y | 50 | CONFIG_IP_MROUTE=y |
51 | CONFIG_IP_PIMSM_V1=y | 51 | CONFIG_IP_PIMSM_V1=y |
52 | CONFIG_IP_PIMSM_V2=y | 52 | CONFIG_IP_PIMSM_V2=y |
53 | CONFIG_ARPD=y | ||
54 | CONFIG_INET_ESP=y | 53 | CONFIG_INET_ESP=y |
55 | # CONFIG_INET_XFRM_MODE_BEET is not set | 54 | # CONFIG_INET_XFRM_MODE_BEET is not set |
56 | # CONFIG_INET_LRO is not set | 55 | # CONFIG_INET_LRO is not set |
@@ -60,33 +59,17 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | |||
60 | CONFIG_DEVTMPFS=y | 59 | CONFIG_DEVTMPFS=y |
61 | CONFIG_DEVTMPFS_MOUNT=y | 60 | CONFIG_DEVTMPFS_MOUNT=y |
62 | CONFIG_MTD=y | 61 | CONFIG_MTD=y |
63 | CONFIG_MTD_OF_PARTS=y | ||
64 | CONFIG_MTD_CMDLINE_PARTS=y | 62 | CONFIG_MTD_CMDLINE_PARTS=y |
65 | CONFIG_MTD_CHAR=y | ||
66 | CONFIG_MTD_BLKDEVS=y | ||
67 | CONFIG_MTD_BLOCK=y | 63 | CONFIG_MTD_BLOCK=y |
68 | CONFIG_FTL=y | 64 | CONFIG_FTL=y |
69 | CONFIG_MTD_CFI=y | 65 | CONFIG_MTD_CFI=y |
70 | CONFIG_MTD_GEN_PROBE=y | ||
71 | CONFIG_MTD_MAP_BANK_WIDTH_1=y | ||
72 | CONFIG_MTD_MAP_BANK_WIDTH_2=y | ||
73 | CONFIG_MTD_MAP_BANK_WIDTH_4=y | ||
74 | CONFIG_MTD_CFI_I1=y | ||
75 | CONFIG_MTD_CFI_I2=y | ||
76 | CONFIG_MTD_CFI_INTELEXT=y | 66 | CONFIG_MTD_CFI_INTELEXT=y |
77 | CONFIG_MTD_CFI_AMDSTD=y | 67 | CONFIG_MTD_CFI_AMDSTD=y |
78 | CONFIG_MTD_PHYSMAP_OF=y | 68 | CONFIG_MTD_PHYSMAP_OF=y |
79 | CONFIG_MTD_M25P80=y | ||
80 | CONFIG_MTD_CFI_UTIL=y | ||
81 | CONFIG_MTD_NAND_ECC=y | ||
82 | CONFIG_MTD_NAND=y | 69 | CONFIG_MTD_NAND=y |
83 | CONFIG_MTD_NAND_IDS=y | ||
84 | CONFIG_MTD_NAND_FSL_ELBC=y | 70 | CONFIG_MTD_NAND_FSL_ELBC=y |
85 | CONFIG_MTD_NAND_FSL_IFC=y | 71 | CONFIG_MTD_NAND_FSL_IFC=y |
86 | CONFIG_MTD_UBI=y | 72 | CONFIG_MTD_UBI=y |
87 | CONFIG_MTD_UBI_WL_THRESHOLD=4096 | ||
88 | CONFIG_MTD_UBI_BEB_RESERVE=1 | ||
89 | CONFIG_PROC_DEVICETREE=y | ||
90 | CONFIG_BLK_DEV_LOOP=y | 73 | CONFIG_BLK_DEV_LOOP=y |
91 | CONFIG_BLK_DEV_RAM=y | 74 | CONFIG_BLK_DEV_RAM=y |
92 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 75 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
@@ -102,6 +85,7 @@ CONFIG_INPUT_FF_MEMLESS=m | |||
102 | # CONFIG_INPUT_KEYBOARD is not set | 85 | # CONFIG_INPUT_KEYBOARD is not set |
103 | # CONFIG_INPUT_MOUSE is not set | 86 | # CONFIG_INPUT_MOUSE is not set |
104 | CONFIG_SERIO_LIBPS2=y | 87 | CONFIG_SERIO_LIBPS2=y |
88 | CONFIG_PPC_EPAPR_HV_BYTECHAN=y | ||
105 | CONFIG_SERIAL_8250=y | 89 | CONFIG_SERIAL_8250=y |
106 | CONFIG_SERIAL_8250_CONSOLE=y | 90 | CONFIG_SERIAL_8250_CONSOLE=y |
107 | CONFIG_SERIAL_8250_MANY_PORTS=y | 91 | CONFIG_SERIAL_8250_MANY_PORTS=y |
@@ -115,7 +99,6 @@ CONFIG_SPI_GPIO=y | |||
115 | CONFIG_SPI_FSL_SPI=y | 99 | CONFIG_SPI_FSL_SPI=y |
116 | CONFIG_SPI_FSL_ESPI=y | 100 | CONFIG_SPI_FSL_ESPI=y |
117 | # CONFIG_HWMON is not set | 101 | # CONFIG_HWMON is not set |
118 | CONFIG_VIDEO_OUTPUT_CONTROL=y | ||
119 | CONFIG_USB_HID=m | 102 | CONFIG_USB_HID=m |
120 | CONFIG_USB=y | 103 | CONFIG_USB=y |
121 | CONFIG_USB_MON=y | 104 | CONFIG_USB_MON=y |
@@ -124,14 +107,17 @@ CONFIG_USB_EHCI_FSL=y | |||
124 | CONFIG_USB_STORAGE=y | 107 | CONFIG_USB_STORAGE=y |
125 | CONFIG_MMC=y | 108 | CONFIG_MMC=y |
126 | CONFIG_MMC_SDHCI=y | 109 | CONFIG_MMC_SDHCI=y |
110 | CONFIG_EDAC=y | ||
111 | CONFIG_EDAC_MM_EDAC=y | ||
127 | CONFIG_RTC_CLASS=y | 112 | CONFIG_RTC_CLASS=y |
128 | CONFIG_RTC_DRV_DS1307=y | 113 | CONFIG_RTC_DRV_DS1307=y |
129 | CONFIG_RTC_DRV_DS1374=y | 114 | CONFIG_RTC_DRV_DS1374=y |
130 | CONFIG_RTC_DRV_DS3232=y | 115 | CONFIG_RTC_DRV_DS3232=y |
131 | CONFIG_EDAC=y | ||
132 | CONFIG_EDAC_MM_EDAC=y | ||
133 | CONFIG_DMADEVICES=y | 116 | CONFIG_DMADEVICES=y |
134 | CONFIG_FSL_DMA=y | 117 | CONFIG_FSL_DMA=y |
118 | CONFIG_VIRT_DRIVERS=y | ||
119 | CONFIG_FSL_HV_MANAGER=y | ||
120 | CONFIG_FSL_CORENET_CF=y | ||
135 | CONFIG_EXT2_FS=y | 121 | CONFIG_EXT2_FS=y |
136 | CONFIG_EXT3_FS=y | 122 | CONFIG_EXT3_FS=y |
137 | CONFIG_ISO9660_FS=m | 123 | CONFIG_ISO9660_FS=m |
@@ -144,35 +130,24 @@ CONFIG_NTFS_FS=y | |||
144 | CONFIG_PROC_KCORE=y | 130 | CONFIG_PROC_KCORE=y |
145 | CONFIG_TMPFS=y | 131 | CONFIG_TMPFS=y |
146 | CONFIG_HUGETLBFS=y | 132 | CONFIG_HUGETLBFS=y |
147 | CONFIG_MISC_FILESYSTEMS=y | ||
148 | CONFIG_JFFS2_FS=y | 133 | CONFIG_JFFS2_FS=y |
149 | CONFIG_JFFS2_FS_DEBUG=1 | 134 | CONFIG_JFFS2_FS_DEBUG=1 |
150 | CONFIG_JFFS2_FS_WRITEBUFFER=y | ||
151 | CONFIG_JFFS2_ZLIB=y | ||
152 | CONFIG_JFFS2_RTIME=y | ||
153 | CONFIG_UBIFS_FS=y | 135 | CONFIG_UBIFS_FS=y |
154 | CONFIG_UBIFS_FS_XATTR=y | ||
155 | CONFIG_UBIFS_FS_LZO=y | ||
156 | CONFIG_UBIFS_FS_ZLIB=y | ||
157 | CONFIG_NFS_FS=y | 136 | CONFIG_NFS_FS=y |
158 | CONFIG_NFS_V4=y | 137 | CONFIG_NFS_V4=y |
159 | CONFIG_ROOT_NFS=y | 138 | CONFIG_ROOT_NFS=y |
160 | CONFIG_NFSD=m | 139 | CONFIG_NFSD=m |
140 | CONFIG_NLS_CODEPAGE_437=y | ||
141 | CONFIG_NLS_CODEPAGE_850=y | ||
161 | CONFIG_NLS_ISO8859_1=y | 142 | CONFIG_NLS_ISO8859_1=y |
162 | CONFIG_NLS_UTF8=m | 143 | CONFIG_NLS_UTF8=m |
163 | CONFIG_CRC_T10DIF=y | 144 | CONFIG_CRC_T10DIF=y |
164 | CONFIG_CRC16=y | 145 | CONFIG_DEBUG_INFO=y |
165 | CONFIG_ZLIB_DEFLATE=y | ||
166 | CONFIG_LZO_COMPRESS=y | ||
167 | CONFIG_LZO_DECOMPRESS=y | ||
168 | CONFIG_CRYPTO_DEFLATE=y | ||
169 | CONFIG_CRYPTO_LZO=y | ||
170 | CONFIG_FRAME_WARN=1024 | 146 | CONFIG_FRAME_WARN=1024 |
171 | CONFIG_MAGIC_SYSRQ=y | ||
172 | CONFIG_DEBUG_FS=y | 147 | CONFIG_DEBUG_FS=y |
148 | CONFIG_MAGIC_SYSRQ=y | ||
173 | CONFIG_DEBUG_SHIRQ=y | 149 | CONFIG_DEBUG_SHIRQ=y |
174 | CONFIG_DETECT_HUNG_TASK=y | 150 | CONFIG_DETECT_HUNG_TASK=y |
175 | CONFIG_DEBUG_INFO=y | ||
176 | CONFIG_CRYPTO_NULL=y | 151 | CONFIG_CRYPTO_NULL=y |
177 | CONFIG_CRYPTO_PCBC=m | 152 | CONFIG_CRYPTO_PCBC=m |
178 | CONFIG_CRYPTO_MD4=y | 153 | CONFIG_CRYPTO_MD4=y |
@@ -180,4 +155,3 @@ CONFIG_CRYPTO_SHA256=y | |||
180 | CONFIG_CRYPTO_SHA512=y | 155 | CONFIG_CRYPTO_SHA512=y |
181 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 156 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
182 | CONFIG_CRYPTO_DEV_FSL_CAAM=y | 157 | CONFIG_CRYPTO_DEV_FSL_CAAM=y |
183 | CONFIG_FSL_CORENET_CF=y | ||
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 7594c5ac6481..6fab06f7f411 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig | |||
@@ -16,6 +16,7 @@ CONFIG_MODULES=y | |||
16 | CONFIG_MODULE_UNLOAD=y | 16 | CONFIG_MODULE_UNLOAD=y |
17 | CONFIG_MODVERSIONS=y | 17 | CONFIG_MODVERSIONS=y |
18 | CONFIG_MODULE_SRCVERSION_ALL=y | 18 | CONFIG_MODULE_SRCVERSION_ALL=y |
19 | # CONFIG_PPC_POWERNV is not set | ||
19 | # CONFIG_PPC_PSERIES is not set | 20 | # CONFIG_PPC_PSERIES is not set |
20 | CONFIG_CPU_FREQ=y | 21 | CONFIG_CPU_FREQ=y |
21 | CONFIG_CPU_FREQ_GOV_POWERSAVE=y | 22 | CONFIG_CPU_FREQ_GOV_POWERSAVE=y |
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index c8b6a9ddb21b..fbd9e4163311 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig | |||
@@ -16,6 +16,7 @@ CONFIG_MODULE_UNLOAD=y | |||
16 | CONFIG_MODVERSIONS=y | 16 | CONFIG_MODVERSIONS=y |
17 | CONFIG_MODULE_SRCVERSION_ALL=y | 17 | CONFIG_MODULE_SRCVERSION_ALL=y |
18 | # CONFIG_BLK_DEV_BSG is not set | 18 | # CONFIG_BLK_DEV_BSG is not set |
19 | # CONFIG_PPC_POWERNV is not set | ||
19 | # CONFIG_PPC_PSERIES is not set | 20 | # CONFIG_PPC_PSERIES is not set |
20 | # CONFIG_PPC_PMAC is not set | 21 | # CONFIG_PPC_PMAC is not set |
21 | CONFIG_PPC_MAPLE=y | 22 | CONFIG_PPC_MAPLE=y |
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index fa1bfd37f1ec..d2c415489f72 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig | |||
@@ -213,7 +213,6 @@ CONFIG_RTC_DRV_DS1307=y | |||
213 | CONFIG_RTC_DRV_DS1374=y | 213 | CONFIG_RTC_DRV_DS1374=y |
214 | CONFIG_RTC_DRV_DS3232=y | 214 | CONFIG_RTC_DRV_DS3232=y |
215 | CONFIG_RTC_DRV_CMOS=y | 215 | CONFIG_RTC_DRV_CMOS=y |
216 | CONFIG_RTC_DRV_DS1307=y | ||
217 | CONFIG_DMADEVICES=y | 216 | CONFIG_DMADEVICES=y |
218 | CONFIG_FSL_DMA=y | 217 | CONFIG_FSL_DMA=y |
219 | # CONFIG_NET_DMA is not set | 218 | # CONFIG_NET_DMA is not set |
@@ -227,6 +226,9 @@ CONFIG_UDF_FS=m | |||
227 | CONFIG_MSDOS_FS=m | 226 | CONFIG_MSDOS_FS=m |
228 | CONFIG_VFAT_FS=y | 227 | CONFIG_VFAT_FS=y |
229 | CONFIG_NTFS_FS=y | 228 | CONFIG_NTFS_FS=y |
229 | CONFIG_NLS_CODEPAGE_437=y | ||
230 | CONFIG_NLS_CODEPAGE_850=y | ||
231 | CONFIG_NLS_ISO8859_1=y | ||
230 | CONFIG_PROC_KCORE=y | 232 | CONFIG_PROC_KCORE=y |
231 | CONFIG_TMPFS=y | 233 | CONFIG_TMPFS=y |
232 | CONFIG_HUGETLBFS=y | 234 | CONFIG_HUGETLBFS=y |
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index 0b452ebd8b3d..87460083dbc7 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig | |||
@@ -214,7 +214,6 @@ CONFIG_RTC_DRV_DS1307=y | |||
214 | CONFIG_RTC_DRV_DS1374=y | 214 | CONFIG_RTC_DRV_DS1374=y |
215 | CONFIG_RTC_DRV_DS3232=y | 215 | CONFIG_RTC_DRV_DS3232=y |
216 | CONFIG_RTC_DRV_CMOS=y | 216 | CONFIG_RTC_DRV_CMOS=y |
217 | CONFIG_RTC_DRV_DS1307=y | ||
218 | CONFIG_DMADEVICES=y | 217 | CONFIG_DMADEVICES=y |
219 | CONFIG_FSL_DMA=y | 218 | CONFIG_FSL_DMA=y |
220 | # CONFIG_NET_DMA is not set | 219 | # CONFIG_NET_DMA is not set |
@@ -228,6 +227,9 @@ CONFIG_UDF_FS=m | |||
228 | CONFIG_MSDOS_FS=m | 227 | CONFIG_MSDOS_FS=m |
229 | CONFIG_VFAT_FS=y | 228 | CONFIG_VFAT_FS=y |
230 | CONFIG_NTFS_FS=y | 229 | CONFIG_NTFS_FS=y |
230 | CONFIG_NLS_CODEPAGE_437=y | ||
231 | CONFIG_NLS_CODEPAGE_850=y | ||
232 | CONFIG_NLS_ISO8859_1=y | ||
231 | CONFIG_PROC_KCORE=y | 233 | CONFIG_PROC_KCORE=y |
232 | CONFIG_TMPFS=y | 234 | CONFIG_TMPFS=y |
233 | CONFIG_HUGETLBFS=y | 235 | CONFIG_HUGETLBFS=y |
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig index 35595ea74ff4..fc58aa8a89e4 100644 --- a/arch/powerpc/configs/mpc86xx_defconfig +++ b/arch/powerpc/configs/mpc86xx_defconfig | |||
@@ -145,6 +145,9 @@ CONFIG_UDF_FS=m | |||
145 | CONFIG_MSDOS_FS=m | 145 | CONFIG_MSDOS_FS=m |
146 | CONFIG_VFAT_FS=y | 146 | CONFIG_VFAT_FS=y |
147 | CONFIG_NTFS_FS=y | 147 | CONFIG_NTFS_FS=y |
148 | CONFIG_NLS_CODEPAGE_437=y | ||
149 | CONFIG_NLS_CODEPAGE_850=y | ||
150 | CONFIG_NLS_ISO8859_1=y | ||
148 | CONFIG_PROC_KCORE=y | 151 | CONFIG_PROC_KCORE=y |
149 | CONFIG_TMPFS=y | 152 | CONFIG_TMPFS=y |
150 | CONFIG_ADFS_FS=m | 153 | CONFIG_ADFS_FS=m |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index e5e7838af008..3e72c8c06a0d 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -14,6 +14,7 @@ CONFIG_MODULE_UNLOAD=y | |||
14 | # CONFIG_BLK_DEV_BSG is not set | 14 | # CONFIG_BLK_DEV_BSG is not set |
15 | CONFIG_PARTITION_ADVANCED=y | 15 | CONFIG_PARTITION_ADVANCED=y |
16 | CONFIG_MAC_PARTITION=y | 16 | CONFIG_MAC_PARTITION=y |
17 | # CONFIG_PPC_POWERNV is not set | ||
17 | # CONFIG_PPC_PSERIES is not set | 18 | # CONFIG_PPC_PSERIES is not set |
18 | # CONFIG_PPC_PMAC is not set | 19 | # CONFIG_PPC_PMAC is not set |
19 | CONFIG_PPC_PASEMI=y | 20 | CONFIG_PPC_PASEMI=y |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 36518870e6b2..20bc5e2d368d 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
@@ -50,6 +50,7 @@ CONFIG_HZ_100=y | |||
50 | CONFIG_BINFMT_MISC=m | 50 | CONFIG_BINFMT_MISC=m |
51 | CONFIG_PPC_TRANSACTIONAL_MEM=y | 51 | CONFIG_PPC_TRANSACTIONAL_MEM=y |
52 | CONFIG_KEXEC=y | 52 | CONFIG_KEXEC=y |
53 | CONFIG_CRASH_DUMP=y | ||
53 | CONFIG_IRQ_ALL_CPUS=y | 54 | CONFIG_IRQ_ALL_CPUS=y |
54 | CONFIG_MEMORY_HOTREMOVE=y | 55 | CONFIG_MEMORY_HOTREMOVE=y |
55 | CONFIG_SCHED_SMT=y | 56 | CONFIG_SCHED_SMT=y |
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 3eb53d741070..3a39283333c3 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h | |||
@@ -133,7 +133,6 @@ extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | |||
133 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | 133 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); |
134 | extern void _exception(int, struct pt_regs *, int, unsigned long); | 134 | extern void _exception(int, struct pt_regs *, int, unsigned long); |
135 | extern void die(const char *, struct pt_regs *, long); | 135 | extern void die(const char *, struct pt_regs *, long); |
136 | extern void print_backtrace(unsigned long *); | ||
137 | 136 | ||
138 | #endif /* !__ASSEMBLY__ */ | 137 | #endif /* !__ASSEMBLY__ */ |
139 | 138 | ||
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h new file mode 100644 index 000000000000..ce216df31381 --- /dev/null +++ b/arch/powerpc/include/asm/copro.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_POWERPC_COPRO_H | ||
11 | #define _ASM_POWERPC_COPRO_H | ||
12 | |||
13 | struct copro_slb | ||
14 | { | ||
15 | u64 esid, vsid; | ||
16 | }; | ||
17 | |||
18 | int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | ||
19 | unsigned long dsisr, unsigned *flt); | ||
20 | |||
21 | int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); | ||
22 | |||
23 | |||
24 | #ifdef CONFIG_PPC_COPRO_BASE | ||
25 | void copro_flush_all_slbs(struct mm_struct *mm); | ||
26 | #else | ||
27 | static inline void copro_flush_all_slbs(struct mm_struct *mm) {} | ||
28 | #endif | ||
29 | #endif /* _ASM_POWERPC_COPRO_H */ | ||
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 150866b2a3fe..894d538f3567 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -135,6 +135,7 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
135 | 135 | ||
136 | extern int dma_set_mask(struct device *dev, u64 dma_mask); | 136 | extern int dma_set_mask(struct device *dev, u64 dma_mask); |
137 | extern int __dma_set_mask(struct device *dev, u64 dma_mask); | 137 | extern int __dma_set_mask(struct device *dev, u64 dma_mask); |
138 | extern u64 __dma_get_required_mask(struct device *dev); | ||
138 | 139 | ||
139 | #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) | 140 | #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) |
140 | 141 | ||
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 9983c3d26bca..3b260efbfbf9 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h | |||
@@ -146,6 +146,11 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev) | |||
146 | return edev ? edev->pdev : NULL; | 146 | return edev ? edev->pdev : NULL; |
147 | } | 147 | } |
148 | 148 | ||
149 | static inline struct eeh_pe *eeh_dev_to_pe(struct eeh_dev* edev) | ||
150 | { | ||
151 | return edev ? edev->pe : NULL; | ||
152 | } | ||
153 | |||
149 | /* Return values from eeh_ops::next_error */ | 154 | /* Return values from eeh_ops::next_error */ |
150 | enum { | 155 | enum { |
151 | EEH_NEXT_ERR_NONE = 0, | 156 | EEH_NEXT_ERR_NONE = 0, |
@@ -167,6 +172,7 @@ enum { | |||
167 | #define EEH_OPT_ENABLE 1 /* EEH enable */ | 172 | #define EEH_OPT_ENABLE 1 /* EEH enable */ |
168 | #define EEH_OPT_THAW_MMIO 2 /* MMIO enable */ | 173 | #define EEH_OPT_THAW_MMIO 2 /* MMIO enable */ |
169 | #define EEH_OPT_THAW_DMA 3 /* DMA enable */ | 174 | #define EEH_OPT_THAW_DMA 3 /* DMA enable */ |
175 | #define EEH_OPT_FREEZE_PE 4 /* Freeze PE */ | ||
170 | #define EEH_STATE_UNAVAILABLE (1 << 0) /* State unavailable */ | 176 | #define EEH_STATE_UNAVAILABLE (1 << 0) /* State unavailable */ |
171 | #define EEH_STATE_NOT_SUPPORT (1 << 1) /* EEH not supported */ | 177 | #define EEH_STATE_NOT_SUPPORT (1 << 1) /* EEH not supported */ |
172 | #define EEH_STATE_RESET_ACTIVE (1 << 2) /* Active reset */ | 178 | #define EEH_STATE_RESET_ACTIVE (1 << 2) /* Active reset */ |
@@ -198,6 +204,8 @@ struct eeh_ops { | |||
198 | int (*wait_state)(struct eeh_pe *pe, int max_wait); | 204 | int (*wait_state)(struct eeh_pe *pe, int max_wait); |
199 | int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len); | 205 | int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len); |
200 | int (*configure_bridge)(struct eeh_pe *pe); | 206 | int (*configure_bridge)(struct eeh_pe *pe); |
207 | int (*err_inject)(struct eeh_pe *pe, int type, int func, | ||
208 | unsigned long addr, unsigned long mask); | ||
201 | int (*read_config)(struct device_node *dn, int where, int size, u32 *val); | 209 | int (*read_config)(struct device_node *dn, int where, int size, u32 *val); |
202 | int (*write_config)(struct device_node *dn, int where, int size, u32 val); | 210 | int (*write_config)(struct device_node *dn, int where, int size, u32 val); |
203 | int (*next_error)(struct eeh_pe **pe); | 211 | int (*next_error)(struct eeh_pe **pe); |
@@ -269,8 +277,7 @@ void eeh_dev_phb_init_dynamic(struct pci_controller *phb); | |||
269 | int eeh_init(void); | 277 | int eeh_init(void); |
270 | int __init eeh_ops_register(struct eeh_ops *ops); | 278 | int __init eeh_ops_register(struct eeh_ops *ops); |
271 | int __exit eeh_ops_unregister(const char *name); | 279 | int __exit eeh_ops_unregister(const char *name); |
272 | unsigned long eeh_check_failure(const volatile void __iomem *token, | 280 | int eeh_check_failure(const volatile void __iomem *token); |
273 | unsigned long val); | ||
274 | int eeh_dev_check_failure(struct eeh_dev *edev); | 281 | int eeh_dev_check_failure(struct eeh_dev *edev); |
275 | void eeh_addr_cache_build(void); | 282 | void eeh_addr_cache_build(void); |
276 | void eeh_add_device_early(struct device_node *); | 283 | void eeh_add_device_early(struct device_node *); |
@@ -279,6 +286,8 @@ void eeh_add_device_late(struct pci_dev *); | |||
279 | void eeh_add_device_tree_late(struct pci_bus *); | 286 | void eeh_add_device_tree_late(struct pci_bus *); |
280 | void eeh_add_sysfs_files(struct pci_bus *); | 287 | void eeh_add_sysfs_files(struct pci_bus *); |
281 | void eeh_remove_device(struct pci_dev *); | 288 | void eeh_remove_device(struct pci_dev *); |
289 | int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state); | ||
290 | int eeh_pe_reset_and_recover(struct eeh_pe *pe); | ||
282 | int eeh_dev_open(struct pci_dev *pdev); | 291 | int eeh_dev_open(struct pci_dev *pdev); |
283 | void eeh_dev_release(struct pci_dev *pdev); | 292 | void eeh_dev_release(struct pci_dev *pdev); |
284 | struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group); | 293 | struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group); |
@@ -321,9 +330,9 @@ static inline void *eeh_dev_init(struct device_node *dn, void *data) | |||
321 | 330 | ||
322 | static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { } | 331 | static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { } |
323 | 332 | ||
324 | static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) | 333 | static inline int eeh_check_failure(const volatile void __iomem *token) |
325 | { | 334 | { |
326 | return val; | 335 | return 0; |
327 | } | 336 | } |
328 | 337 | ||
329 | #define eeh_dev_check_failure(x) (0) | 338 | #define eeh_dev_check_failure(x) (0) |
@@ -354,7 +363,7 @@ static inline u8 eeh_readb(const volatile void __iomem *addr) | |||
354 | { | 363 | { |
355 | u8 val = in_8(addr); | 364 | u8 val = in_8(addr); |
356 | if (EEH_POSSIBLE_ERROR(val, u8)) | 365 | if (EEH_POSSIBLE_ERROR(val, u8)) |
357 | return eeh_check_failure(addr, val); | 366 | eeh_check_failure(addr); |
358 | return val; | 367 | return val; |
359 | } | 368 | } |
360 | 369 | ||
@@ -362,7 +371,7 @@ static inline u16 eeh_readw(const volatile void __iomem *addr) | |||
362 | { | 371 | { |
363 | u16 val = in_le16(addr); | 372 | u16 val = in_le16(addr); |
364 | if (EEH_POSSIBLE_ERROR(val, u16)) | 373 | if (EEH_POSSIBLE_ERROR(val, u16)) |
365 | return eeh_check_failure(addr, val); | 374 | eeh_check_failure(addr); |
366 | return val; | 375 | return val; |
367 | } | 376 | } |
368 | 377 | ||
@@ -370,7 +379,7 @@ static inline u32 eeh_readl(const volatile void __iomem *addr) | |||
370 | { | 379 | { |
371 | u32 val = in_le32(addr); | 380 | u32 val = in_le32(addr); |
372 | if (EEH_POSSIBLE_ERROR(val, u32)) | 381 | if (EEH_POSSIBLE_ERROR(val, u32)) |
373 | return eeh_check_failure(addr, val); | 382 | eeh_check_failure(addr); |
374 | return val; | 383 | return val; |
375 | } | 384 | } |
376 | 385 | ||
@@ -378,7 +387,7 @@ static inline u64 eeh_readq(const volatile void __iomem *addr) | |||
378 | { | 387 | { |
379 | u64 val = in_le64(addr); | 388 | u64 val = in_le64(addr); |
380 | if (EEH_POSSIBLE_ERROR(val, u64)) | 389 | if (EEH_POSSIBLE_ERROR(val, u64)) |
381 | return eeh_check_failure(addr, val); | 390 | eeh_check_failure(addr); |
382 | return val; | 391 | return val; |
383 | } | 392 | } |
384 | 393 | ||
@@ -386,7 +395,7 @@ static inline u16 eeh_readw_be(const volatile void __iomem *addr) | |||
386 | { | 395 | { |
387 | u16 val = in_be16(addr); | 396 | u16 val = in_be16(addr); |
388 | if (EEH_POSSIBLE_ERROR(val, u16)) | 397 | if (EEH_POSSIBLE_ERROR(val, u16)) |
389 | return eeh_check_failure(addr, val); | 398 | eeh_check_failure(addr); |
390 | return val; | 399 | return val; |
391 | } | 400 | } |
392 | 401 | ||
@@ -394,7 +403,7 @@ static inline u32 eeh_readl_be(const volatile void __iomem *addr) | |||
394 | { | 403 | { |
395 | u32 val = in_be32(addr); | 404 | u32 val = in_be32(addr); |
396 | if (EEH_POSSIBLE_ERROR(val, u32)) | 405 | if (EEH_POSSIBLE_ERROR(val, u32)) |
397 | return eeh_check_failure(addr, val); | 406 | eeh_check_failure(addr); |
398 | return val; | 407 | return val; |
399 | } | 408 | } |
400 | 409 | ||
@@ -402,7 +411,7 @@ static inline u64 eeh_readq_be(const volatile void __iomem *addr) | |||
402 | { | 411 | { |
403 | u64 val = in_be64(addr); | 412 | u64 val = in_be64(addr); |
404 | if (EEH_POSSIBLE_ERROR(val, u64)) | 413 | if (EEH_POSSIBLE_ERROR(val, u64)) |
405 | return eeh_check_failure(addr, val); | 414 | eeh_check_failure(addr); |
406 | return val; | 415 | return val; |
407 | } | 416 | } |
408 | 417 | ||
@@ -416,7 +425,7 @@ static inline void eeh_memcpy_fromio(void *dest, const | |||
416 | * were copied. Check all four bytes. | 425 | * were copied. Check all four bytes. |
417 | */ | 426 | */ |
418 | if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32)) | 427 | if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32)) |
419 | eeh_check_failure(src, *((u32 *)(dest + n - 4))); | 428 | eeh_check_failure(src); |
420 | } | 429 | } |
421 | 430 | ||
422 | /* in-string eeh macros */ | 431 | /* in-string eeh macros */ |
@@ -425,7 +434,7 @@ static inline void eeh_readsb(const volatile void __iomem *addr, void * buf, | |||
425 | { | 434 | { |
426 | _insb(addr, buf, ns); | 435 | _insb(addr, buf, ns); |
427 | if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8)) | 436 | if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8)) |
428 | eeh_check_failure(addr, *(u8*)buf); | 437 | eeh_check_failure(addr); |
429 | } | 438 | } |
430 | 439 | ||
431 | static inline void eeh_readsw(const volatile void __iomem *addr, void * buf, | 440 | static inline void eeh_readsw(const volatile void __iomem *addr, void * buf, |
@@ -433,7 +442,7 @@ static inline void eeh_readsw(const volatile void __iomem *addr, void * buf, | |||
433 | { | 442 | { |
434 | _insw(addr, buf, ns); | 443 | _insw(addr, buf, ns); |
435 | if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16)) | 444 | if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16)) |
436 | eeh_check_failure(addr, *(u16*)buf); | 445 | eeh_check_failure(addr); |
437 | } | 446 | } |
438 | 447 | ||
439 | static inline void eeh_readsl(const volatile void __iomem *addr, void * buf, | 448 | static inline void eeh_readsl(const volatile void __iomem *addr, void * buf, |
@@ -441,7 +450,7 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf, | |||
441 | { | 450 | { |
442 | _insl(addr, buf, nl); | 451 | _insl(addr, buf, nl); |
443 | if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32)) | 452 | if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32)) |
444 | eeh_check_failure(addr, *(u32*)buf); | 453 | eeh_check_failure(addr); |
445 | } | 454 | } |
446 | 455 | ||
447 | #endif /* CONFIG_PPC64 */ | 456 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h index 5b0c98bd46ab..1cb39c96d155 100644 --- a/arch/powerpc/include/asm/hydra.h +++ b/arch/powerpc/include/asm/hydra.h | |||
@@ -95,7 +95,6 @@ extern volatile struct Hydra __iomem *Hydra; | |||
95 | #define HYDRA_INT_SPARE 19 | 95 | #define HYDRA_INT_SPARE 19 |
96 | 96 | ||
97 | extern int hydra_init(void); | 97 | extern int hydra_init(void); |
98 | extern void macio_adb_init(void); | ||
99 | 98 | ||
100 | #endif /* __KERNEL__ */ | 99 | #endif /* __KERNEL__ */ |
101 | 100 | ||
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 41f13cec8a8f..e8e3a0a04eb0 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h | |||
@@ -31,11 +31,6 @@ extern atomic_t ppc_n_lost_interrupts; | |||
31 | 31 | ||
32 | extern irq_hw_number_t virq_to_hw(unsigned int virq); | 32 | extern irq_hw_number_t virq_to_hw(unsigned int virq); |
33 | 33 | ||
34 | /** | ||
35 | * irq_early_init - Init irq remapping subsystem | ||
36 | */ | ||
37 | extern void irq_early_init(void); | ||
38 | |||
39 | static __inline__ int irq_canonicalize(int irq) | 34 | static __inline__ int irq_canonicalize(int irq) |
40 | { | 35 | { |
41 | return irq; | 36 | return irq; |
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 16d7e33d35e9..19c36cba37c4 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h | |||
@@ -81,7 +81,6 @@ extern void default_machine_crash_shutdown(struct pt_regs *regs); | |||
81 | extern int crash_shutdown_register(crash_shutdown_t handler); | 81 | extern int crash_shutdown_register(crash_shutdown_t handler); |
82 | extern int crash_shutdown_unregister(crash_shutdown_t handler); | 82 | extern int crash_shutdown_unregister(crash_shutdown_t handler); |
83 | 83 | ||
84 | extern void machine_kexec_simple(struct kimage *image); | ||
85 | extern void crash_kexec_secondary(struct pt_regs *regs); | 84 | extern void crash_kexec_secondary(struct pt_regs *regs); |
86 | extern int overlaps_crashkernel(unsigned long start, unsigned long size); | 85 | extern int overlaps_crashkernel(unsigned long start, unsigned long size); |
87 | extern void reserve_crashkernel(void); | 86 | extern void reserve_crashkernel(void); |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 3af721633618..307347f8ddbd 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -328,8 +328,6 @@ extern struct machdep_calls *machine_id; | |||
328 | 328 | ||
329 | extern void probe_machine(void); | 329 | extern void probe_machine(void); |
330 | 330 | ||
331 | extern char cmd_line[COMMAND_LINE_SIZE]; | ||
332 | |||
333 | #ifdef CONFIG_PPC_PMAC | 331 | #ifdef CONFIG_PPC_PMAC |
334 | /* | 332 | /* |
335 | * Power macintoshes have either a CUDA, PMU or SMU controlling | 333 | * Power macintoshes have either a CUDA, PMU or SMU controlling |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index d76514487d6f..aeebc94b2bce 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -190,6 +190,13 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) | |||
190 | 190 | ||
191 | #ifndef __ASSEMBLY__ | 191 | #ifndef __ASSEMBLY__ |
192 | 192 | ||
193 | static inline int slb_vsid_shift(int ssize) | ||
194 | { | ||
195 | if (ssize == MMU_SEGSIZE_256M) | ||
196 | return SLB_VSID_SHIFT; | ||
197 | return SLB_VSID_SHIFT_1T; | ||
198 | } | ||
199 | |||
193 | static inline int segment_shift(int ssize) | 200 | static inline int segment_shift(int ssize) |
194 | { | 201 | { |
195 | if (ssize == MMU_SEGSIZE_256M) | 202 | if (ssize == MMU_SEGSIZE_256M) |
@@ -317,6 +324,7 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access, | |||
317 | unsigned int local, int ssize); | 324 | unsigned int local, int ssize); |
318 | struct mm_struct; | 325 | struct mm_struct; |
319 | unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); | 326 | unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); |
327 | extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap); | ||
320 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); | 328 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); |
321 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | 329 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, |
322 | pte_t *ptep, unsigned long trap, int local, int ssize, | 330 | pte_t *ptep, unsigned long trap, int local, int ssize, |
@@ -342,6 +350,8 @@ extern void hash_failure_debug(unsigned long ea, unsigned long access, | |||
342 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | 350 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, |
343 | unsigned long pstart, unsigned long prot, | 351 | unsigned long pstart, unsigned long prot, |
344 | int psize, int ssize); | 352 | int psize, int ssize); |
353 | int htab_remove_mapping(unsigned long vstart, unsigned long vend, | ||
354 | int psize, int ssize); | ||
345 | extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); | 355 | extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); |
346 | extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); | 356 | extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); |
347 | 357 | ||
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 86055e598269..9124b0ede1fc 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
@@ -135,6 +135,7 @@ struct opal_sg_list { | |||
135 | #define OPAL_FLASH_MANAGE 77 | 135 | #define OPAL_FLASH_MANAGE 77 |
136 | #define OPAL_FLASH_UPDATE 78 | 136 | #define OPAL_FLASH_UPDATE 78 |
137 | #define OPAL_RESYNC_TIMEBASE 79 | 137 | #define OPAL_RESYNC_TIMEBASE 79 |
138 | #define OPAL_CHECK_TOKEN 80 | ||
138 | #define OPAL_DUMP_INIT 81 | 139 | #define OPAL_DUMP_INIT 81 |
139 | #define OPAL_DUMP_INFO 82 | 140 | #define OPAL_DUMP_INFO 82 |
140 | #define OPAL_DUMP_READ 83 | 141 | #define OPAL_DUMP_READ 83 |
@@ -146,7 +147,9 @@ struct opal_sg_list { | |||
146 | #define OPAL_GET_PARAM 89 | 147 | #define OPAL_GET_PARAM 89 |
147 | #define OPAL_SET_PARAM 90 | 148 | #define OPAL_SET_PARAM 90 |
148 | #define OPAL_DUMP_RESEND 91 | 149 | #define OPAL_DUMP_RESEND 91 |
150 | #define OPAL_PCI_SET_PHB_CXL_MODE 93 | ||
149 | #define OPAL_DUMP_INFO2 94 | 151 | #define OPAL_DUMP_INFO2 94 |
152 | #define OPAL_PCI_ERR_INJECT 96 | ||
150 | #define OPAL_PCI_EEH_FREEZE_SET 97 | 153 | #define OPAL_PCI_EEH_FREEZE_SET 97 |
151 | #define OPAL_HANDLE_HMI 98 | 154 | #define OPAL_HANDLE_HMI 98 |
152 | #define OPAL_REGISTER_DUMP_REGION 101 | 155 | #define OPAL_REGISTER_DUMP_REGION 101 |
@@ -199,6 +202,35 @@ enum OpalPciErrorSeverity { | |||
199 | OPAL_EEH_SEV_INF = 5 | 202 | OPAL_EEH_SEV_INF = 5 |
200 | }; | 203 | }; |
201 | 204 | ||
205 | enum OpalErrinjectType { | ||
206 | OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0, | ||
207 | OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1, | ||
208 | }; | ||
209 | |||
210 | enum OpalErrinjectFunc { | ||
211 | /* IOA bus specific errors */ | ||
212 | OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0, | ||
213 | OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1, | ||
214 | OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2, | ||
215 | OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3, | ||
216 | OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4, | ||
217 | OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5, | ||
218 | OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6, | ||
219 | OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7, | ||
220 | OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8, | ||
221 | OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9, | ||
222 | OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10, | ||
223 | OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11, | ||
224 | OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12, | ||
225 | OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13, | ||
226 | OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14, | ||
227 | OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15, | ||
228 | OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16, | ||
229 | OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17, | ||
230 | OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18, | ||
231 | OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19, | ||
232 | }; | ||
233 | |||
202 | enum OpalShpcAction { | 234 | enum OpalShpcAction { |
203 | OPAL_SHPC_GET_LINK_STATE = 0, | 235 | OPAL_SHPC_GET_LINK_STATE = 0, |
204 | OPAL_SHPC_GET_SLOT_STATE = 1 | 236 | OPAL_SHPC_GET_SLOT_STATE = 1 |
@@ -356,9 +388,12 @@ enum OpalM64EnableAction { | |||
356 | }; | 388 | }; |
357 | 389 | ||
358 | enum OpalPciResetScope { | 390 | enum OpalPciResetScope { |
359 | OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3, | 391 | OPAL_RESET_PHB_COMPLETE = 1, |
360 | OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5, | 392 | OPAL_RESET_PCI_LINK = 2, |
361 | OPAL_PCI_IODA_TABLE_RESET = 6, | 393 | OPAL_RESET_PHB_ERROR = 3, |
394 | OPAL_RESET_PCI_HOT = 4, | ||
395 | OPAL_RESET_PCI_FUNDAMENTAL = 5, | ||
396 | OPAL_RESET_PCI_IODA_TABLE = 6 | ||
362 | }; | 397 | }; |
363 | 398 | ||
364 | enum OpalPciReinitScope { | 399 | enum OpalPciReinitScope { |
@@ -819,6 +854,8 @@ int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number, | |||
819 | uint64_t eeh_action_token); | 854 | uint64_t eeh_action_token); |
820 | int64_t opal_pci_eeh_freeze_set(uint64_t phb_id, uint64_t pe_number, | 855 | int64_t opal_pci_eeh_freeze_set(uint64_t phb_id, uint64_t pe_number, |
821 | uint64_t eeh_action_token); | 856 | uint64_t eeh_action_token); |
857 | int64_t opal_pci_err_inject(uint64_t phb_id, uint32_t pe_no, uint32_t type, | ||
858 | uint32_t func, uint64_t addr, uint64_t mask); | ||
822 | int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state); | 859 | int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state); |
823 | 860 | ||
824 | 861 | ||
@@ -887,6 +924,7 @@ int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe, | |||
887 | __be16 *pci_error_type, __be16 *severity); | 924 | __be16 *pci_error_type, __be16 *severity); |
888 | int64_t opal_pci_poll(uint64_t phb_id); | 925 | int64_t opal_pci_poll(uint64_t phb_id); |
889 | int64_t opal_return_cpu(void); | 926 | int64_t opal_return_cpu(void); |
927 | int64_t opal_check_token(uint64_t token); | ||
890 | int64_t opal_reinit_cpus(uint64_t flags); | 928 | int64_t opal_reinit_cpus(uint64_t flags); |
891 | 929 | ||
892 | int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); | 930 | int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); |
@@ -924,6 +962,7 @@ int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); | |||
924 | int64_t opal_handle_hmi(void); | 962 | int64_t opal_handle_hmi(void); |
925 | int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end); | 963 | int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end); |
926 | int64_t opal_unregister_dump_region(uint32_t id); | 964 | int64_t opal_unregister_dump_region(uint32_t id); |
965 | int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number); | ||
927 | 966 | ||
928 | /* Internal functions */ | 967 | /* Internal functions */ |
929 | extern int early_init_dt_scan_opal(unsigned long node, const char *uname, | 968 | extern int early_init_dt_scan_opal(unsigned long node, const char *uname, |
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 88693cef4f3d..d908a46d05c0 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h | |||
@@ -42,20 +42,40 @@ | |||
42 | 42 | ||
43 | typedef unsigned long pte_basic_t; | 43 | typedef unsigned long pte_basic_t; |
44 | 44 | ||
45 | static __inline__ void clear_page(void *addr) | 45 | static inline void clear_page(void *addr) |
46 | { | 46 | { |
47 | unsigned long lines, line_size; | 47 | unsigned long iterations; |
48 | 48 | unsigned long onex, twox, fourx, eightx; | |
49 | line_size = ppc64_caches.dline_size; | 49 | |
50 | lines = ppc64_caches.dlines_per_page; | 50 | iterations = ppc64_caches.dlines_per_page / 8; |
51 | 51 | ||
52 | __asm__ __volatile__( | 52 | /* |
53 | * Some verisions of gcc use multiply instructions to | ||
54 | * calculate the offsets so lets give it a hand to | ||
55 | * do better. | ||
56 | */ | ||
57 | onex = ppc64_caches.dline_size; | ||
58 | twox = onex << 1; | ||
59 | fourx = onex << 2; | ||
60 | eightx = onex << 3; | ||
61 | |||
62 | asm volatile( | ||
53 | "mtctr %1 # clear_page\n\ | 63 | "mtctr %1 # clear_page\n\ |
54 | 1: dcbz 0,%0\n\ | 64 | .balign 16\n\ |
55 | add %0,%0,%3\n\ | 65 | 1: dcbz 0,%0\n\ |
66 | dcbz %3,%0\n\ | ||
67 | dcbz %4,%0\n\ | ||
68 | dcbz %5,%0\n\ | ||
69 | dcbz %6,%0\n\ | ||
70 | dcbz %7,%0\n\ | ||
71 | dcbz %8,%0\n\ | ||
72 | dcbz %9,%0\n\ | ||
73 | add %0,%0,%10\n\ | ||
56 | bdnz+ 1b" | 74 | bdnz+ 1b" |
57 | : "=r" (addr) | 75 | : "=&r" (addr) |
58 | : "r" (lines), "0" (addr), "r" (line_size) | 76 | : "r" (iterations), "0" (addr), "b" (onex), "b" (twox), |
77 | "b" (twox+onex), "b" (fourx), "b" (fourx+onex), | ||
78 | "b" (twox+fourx), "b" (eightx-onex), "r" (eightx) | ||
59 | : "ctr", "memory"); | 79 | : "ctr", "memory"); |
60 | } | 80 | } |
61 | 81 | ||
@@ -104,7 +124,6 @@ extern unsigned long slice_get_unmapped_area(unsigned long addr, | |||
104 | extern unsigned int get_slice_psize(struct mm_struct *mm, | 124 | extern unsigned int get_slice_psize(struct mm_struct *mm, |
105 | unsigned long addr); | 125 | unsigned long addr); |
106 | 126 | ||
107 | extern void slice_init_context(struct mm_struct *mm, unsigned int psize); | ||
108 | extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); | 127 | extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); |
109 | extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, | 128 | extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, |
110 | unsigned long len, unsigned int psize); | 129 | unsigned long len, unsigned int psize); |
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 47edde8c3556..945e47adf7db 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h | |||
@@ -8,8 +8,6 @@ | |||
8 | #include <linux/threads.h> | 8 | #include <linux/threads.h> |
9 | #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ | 9 | #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ |
10 | 10 | ||
11 | extern unsigned long va_to_phys(unsigned long address); | ||
12 | extern pte_t *va_to_pte(unsigned long address); | ||
13 | extern unsigned long ioremap_bot; | 11 | extern unsigned long ioremap_bot; |
14 | 12 | ||
15 | #ifdef CONFIG_44x | 13 | #ifdef CONFIG_44x |
@@ -50,10 +48,10 @@ extern int icache_44x_need_flush; | |||
50 | #define FIRST_USER_ADDRESS 0 | 48 | #define FIRST_USER_ADDRESS 0 |
51 | 49 | ||
52 | #define pte_ERROR(e) \ | 50 | #define pte_ERROR(e) \ |
53 | printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ | 51 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ |
54 | (unsigned long long)pte_val(e)) | 52 | (unsigned long long)pte_val(e)) |
55 | #define pgd_ERROR(e) \ | 53 | #define pgd_ERROR(e) \ |
56 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 54 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
57 | 55 | ||
58 | /* | 56 | /* |
59 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary | 57 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h index 12798c9d4b4b..7b935683f268 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h | |||
@@ -64,7 +64,7 @@ | |||
64 | (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | 64 | (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) |
65 | 65 | ||
66 | #define pud_ERROR(e) \ | 66 | #define pud_ERROR(e) \ |
67 | printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | 67 | pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ | 70 | * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 7b3d54fae46f..ae153c40ab7c 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -328,11 +328,11 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) | |||
328 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | 328 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) |
329 | 329 | ||
330 | #define pte_ERROR(e) \ | 330 | #define pte_ERROR(e) \ |
331 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | 331 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
332 | #define pmd_ERROR(e) \ | 332 | #define pmd_ERROR(e) \ |
333 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | 333 | pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) |
334 | #define pgd_ERROR(e) \ | 334 | #define pgd_ERROR(e) \ |
335 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 335 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
336 | 336 | ||
337 | /* Encode and de-code a swap entry */ | 337 | /* Encode and de-code a swap entry */ |
338 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) | 338 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index f60d4ea8b50c..316f9a5da173 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #ifndef __ASSEMBLY__ | 5 | #ifndef __ASSEMBLY__ |
6 | #include <linux/mmdebug.h> | 6 | #include <linux/mmdebug.h> |
7 | #include <linux/mmzone.h> | ||
7 | #include <asm/processor.h> /* For TASK_SIZE */ | 8 | #include <asm/processor.h> /* For TASK_SIZE */ |
8 | #include <asm/mmu.h> | 9 | #include <asm/mmu.h> |
9 | #include <asm/page.h> | 10 | #include <asm/page.h> |
@@ -248,6 +249,8 @@ extern unsigned long empty_zero_page[]; | |||
248 | 249 | ||
249 | extern pgd_t swapper_pg_dir[]; | 250 | extern pgd_t swapper_pg_dir[]; |
250 | 251 | ||
252 | void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn); | ||
253 | int dma_pfn_limit_to_zone(u64 pfn_limit); | ||
251 | extern void paging_init(void); | 254 | extern void paging_init(void); |
252 | 255 | ||
253 | /* | 256 | /* |
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 12c32c5f533d..67859edbf8fd 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h | |||
@@ -273,7 +273,7 @@ static inline long plpar_set_mode(unsigned long mflags, unsigned long resource, | |||
273 | static inline long enable_reloc_on_exceptions(void) | 273 | static inline long enable_reloc_on_exceptions(void) |
274 | { | 274 | { |
275 | /* mflags = 3: Exceptions at 0xC000000000004000 */ | 275 | /* mflags = 3: Exceptions at 0xC000000000004000 */ |
276 | return plpar_set_mode(3, 3, 0, 0); | 276 | return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0); |
277 | } | 277 | } |
278 | 278 | ||
279 | /* | 279 | /* |
@@ -284,7 +284,7 @@ static inline long enable_reloc_on_exceptions(void) | |||
284 | * returns H_SUCCESS. | 284 | * returns H_SUCCESS. |
285 | */ | 285 | */ |
286 | static inline long disable_reloc_on_exceptions(void) { | 286 | static inline long disable_reloc_on_exceptions(void) { |
287 | return plpar_set_mode(0, 3, 0, 0); | 287 | return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0); |
288 | } | 288 | } |
289 | 289 | ||
290 | /* | 290 | /* |
@@ -297,7 +297,7 @@ static inline long disable_reloc_on_exceptions(void) { | |||
297 | static inline long enable_big_endian_exceptions(void) | 297 | static inline long enable_big_endian_exceptions(void) |
298 | { | 298 | { |
299 | /* mflags = 0: big endian exceptions */ | 299 | /* mflags = 0: big endian exceptions */ |
300 | return plpar_set_mode(0, 4, 0, 0); | 300 | return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0); |
301 | } | 301 | } |
302 | 302 | ||
303 | /* | 303 | /* |
@@ -310,17 +310,17 @@ static inline long enable_big_endian_exceptions(void) | |||
310 | static inline long enable_little_endian_exceptions(void) | 310 | static inline long enable_little_endian_exceptions(void) |
311 | { | 311 | { |
312 | /* mflags = 1: little endian exceptions */ | 312 | /* mflags = 1: little endian exceptions */ |
313 | return plpar_set_mode(1, 4, 0, 0); | 313 | return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0); |
314 | } | 314 | } |
315 | 315 | ||
316 | static inline long plapr_set_ciabr(unsigned long ciabr) | 316 | static inline long plapr_set_ciabr(unsigned long ciabr) |
317 | { | 317 | { |
318 | return plpar_set_mode(0, 1, ciabr, 0); | 318 | return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0); |
319 | } | 319 | } |
320 | 320 | ||
321 | static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0) | 321 | static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0) |
322 | { | 322 | { |
323 | return plpar_set_mode(0, 2, dawr0, dawrx0); | 323 | return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0); |
324 | } | 324 | } |
325 | 325 | ||
326 | #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ | 326 | #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ |
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h new file mode 100644 index 000000000000..f09a22fa1bd7 --- /dev/null +++ b/arch/powerpc/include/asm/pnv-pci.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_PNV_PCI_H | ||
11 | #define _ASM_PNV_PCI_H | ||
12 | |||
13 | #include <linux/pci.h> | ||
14 | #include <misc/cxl.h> | ||
15 | |||
16 | int pnv_phb_to_cxl(struct pci_dev *dev); | ||
17 | int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, | ||
18 | unsigned int virq); | ||
19 | int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num); | ||
20 | void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num); | ||
21 | int pnv_cxl_get_irq_count(struct pci_dev *dev); | ||
22 | struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev); | ||
23 | |||
24 | #ifdef CONFIG_CXL_BASE | ||
25 | int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs, | ||
26 | struct pci_dev *dev, int num); | ||
27 | void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, | ||
28 | struct pci_dev *dev); | ||
29 | #endif | ||
30 | |||
31 | #endif | ||
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 74b79f07f041..7f436ba1b56f 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h | |||
@@ -76,8 +76,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window, | |||
76 | unsigned long *busno, unsigned long *phys, | 76 | unsigned long *busno, unsigned long *phys, |
77 | unsigned long *size); | 77 | unsigned long *size); |
78 | 78 | ||
79 | extern void kdump_move_device_tree(void); | ||
80 | |||
81 | extern void of_instantiate_rtc(void); | 79 | extern void of_instantiate_rtc(void); |
82 | 80 | ||
83 | extern int of_get_ibm_chip_id(struct device_node *np); | 81 | extern int of_get_ibm_chip_id(struct device_node *np); |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 0c0505956a29..fe3f9488f321 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -947,7 +947,7 @@ | |||
947 | * 32-bit 8xx: | 947 | * 32-bit 8xx: |
948 | * - SPRG0 scratch for exception vectors | 948 | * - SPRG0 scratch for exception vectors |
949 | * - SPRG1 scratch for exception vectors | 949 | * - SPRG1 scratch for exception vectors |
950 | * - SPRG2 apparently unused but initialized | 950 | * - SPRG2 scratch for exception vectors |
951 | * | 951 | * |
952 | */ | 952 | */ |
953 | #ifdef CONFIG_PPC64 | 953 | #ifdef CONFIG_PPC64 |
@@ -1057,6 +1057,7 @@ | |||
1057 | #ifdef CONFIG_8xx | 1057 | #ifdef CONFIG_8xx |
1058 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 | 1058 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 |
1059 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 | 1059 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 |
1060 | #define SPRN_SPRG_SCRATCH2 SPRN_SPRG2 | ||
1060 | #endif | 1061 | #endif |
1061 | 1062 | ||
1062 | 1063 | ||
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h index b1d2deceeedb..ec800f28fec5 100644 --- a/arch/powerpc/include/asm/rio.h +++ b/arch/powerpc/include/asm/rio.h | |||
@@ -13,7 +13,6 @@ | |||
13 | #ifndef ASM_PPC_RIO_H | 13 | #ifndef ASM_PPC_RIO_H |
14 | #define ASM_PPC_RIO_H | 14 | #define ASM_PPC_RIO_H |
15 | 15 | ||
16 | extern void platform_rio_init(void); | ||
17 | #ifdef CONFIG_FSL_RIO | 16 | #ifdef CONFIG_FSL_RIO |
18 | extern int fsl_rio_mcheck_exception(struct pt_regs *); | 17 | extern int fsl_rio_mcheck_exception(struct pt_regs *); |
19 | #else | 18 | #else |
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h index 37b7ca39ec9f..a6e6e2bf9d15 100644 --- a/arch/powerpc/include/asm/spu.h +++ b/arch/powerpc/include/asm/spu.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
30 | #include <asm/reg.h> | ||
31 | #include <asm/copro.h> | ||
30 | 32 | ||
31 | #define LS_SIZE (256 * 1024) | 33 | #define LS_SIZE (256 * 1024) |
32 | #define LS_ADDR_MASK (LS_SIZE - 1) | 34 | #define LS_ADDR_MASK (LS_SIZE - 1) |
@@ -277,9 +279,6 @@ void spu_remove_dev_attr(struct device_attribute *attr); | |||
277 | int spu_add_dev_attr_group(struct attribute_group *attrs); | 279 | int spu_add_dev_attr_group(struct attribute_group *attrs); |
278 | void spu_remove_dev_attr_group(struct attribute_group *attrs); | 280 | void spu_remove_dev_attr_group(struct attribute_group *attrs); |
279 | 281 | ||
280 | int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | ||
281 | unsigned long dsisr, unsigned *flt); | ||
282 | |||
283 | /* | 282 | /* |
284 | * Notifier blocks: | 283 | * Notifier blocks: |
285 | * | 284 | * |
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index f593b0f9b627..d3a42cc45a82 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h | |||
@@ -25,3 +25,65 @@ struct pt_regs; | |||
25 | 25 | ||
26 | /* Emulate instructions that cause a transfer of control. */ | 26 | /* Emulate instructions that cause a transfer of control. */ |
27 | extern int emulate_step(struct pt_regs *regs, unsigned int instr); | 27 | extern int emulate_step(struct pt_regs *regs, unsigned int instr); |
28 | |||
29 | enum instruction_type { | ||
30 | COMPUTE, /* arith/logical/CR op, etc. */ | ||
31 | LOAD, | ||
32 | LOAD_MULTI, | ||
33 | LOAD_FP, | ||
34 | LOAD_VMX, | ||
35 | LOAD_VSX, | ||
36 | STORE, | ||
37 | STORE_MULTI, | ||
38 | STORE_FP, | ||
39 | STORE_VMX, | ||
40 | STORE_VSX, | ||
41 | LARX, | ||
42 | STCX, | ||
43 | BRANCH, | ||
44 | MFSPR, | ||
45 | MTSPR, | ||
46 | CACHEOP, | ||
47 | BARRIER, | ||
48 | SYSCALL, | ||
49 | MFMSR, | ||
50 | MTMSR, | ||
51 | RFI, | ||
52 | INTERRUPT, | ||
53 | UNKNOWN | ||
54 | }; | ||
55 | |||
56 | #define INSTR_TYPE_MASK 0x1f | ||
57 | |||
58 | /* Load/store flags, ORed in with type */ | ||
59 | #define SIGNEXT 0x20 | ||
60 | #define UPDATE 0x40 /* matches bit in opcode 31 instructions */ | ||
61 | #define BYTEREV 0x80 | ||
62 | |||
63 | /* Cacheop values, ORed in with type */ | ||
64 | #define CACHEOP_MASK 0x700 | ||
65 | #define DCBST 0 | ||
66 | #define DCBF 0x100 | ||
67 | #define DCBTST 0x200 | ||
68 | #define DCBT 0x300 | ||
69 | #define ICBI 0x400 | ||
70 | |||
71 | /* Size field in type word */ | ||
72 | #define SIZE(n) ((n) << 8) | ||
73 | #define GETSIZE(w) ((w) >> 8) | ||
74 | |||
75 | #define MKOP(t, f, s) ((t) | (f) | SIZE(s)) | ||
76 | |||
77 | struct instruction_op { | ||
78 | int type; | ||
79 | int reg; | ||
80 | unsigned long val; | ||
81 | /* For LOAD/STORE/LARX/STCX */ | ||
82 | unsigned long ea; | ||
83 | int update_reg; | ||
84 | /* For MFSPR */ | ||
85 | int spr; | ||
86 | }; | ||
87 | |||
88 | extern int analyse_instr(struct instruction_op *op, struct pt_regs *regs, | ||
89 | unsigned int instr); | ||
diff --git a/arch/powerpc/include/asm/tsi108.h b/arch/powerpc/include/asm/tsi108.h index f8b60793b7a9..d531d9e173ef 100644 --- a/arch/powerpc/include/asm/tsi108.h +++ b/arch/powerpc/include/asm/tsi108.h | |||
@@ -84,10 +84,6 @@ | |||
84 | extern u32 tsi108_pci_cfg_base; | 84 | extern u32 tsi108_pci_cfg_base; |
85 | /* Exported functions */ | 85 | /* Exported functions */ |
86 | 86 | ||
87 | extern int tsi108_bridge_init(struct pci_controller *hose, uint phys_csr_base); | ||
88 | extern unsigned long tsi108_get_mem_size(void); | ||
89 | extern unsigned long tsi108_get_cpu_clk(void); | ||
90 | extern unsigned long tsi108_get_sdc_clk(void); | ||
91 | extern int tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfn, | 87 | extern int tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfn, |
92 | int offset, int len, u32 val); | 88 | int offset, int len, u32 val); |
93 | extern int tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn, | 89 | extern int tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn, |
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index b51fba10e733..78f2675f2aac 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h | |||
@@ -52,7 +52,6 @@ extern void __init udbg_init_44x_as1(void); | |||
52 | extern void __init udbg_init_40x_realmode(void); | 52 | extern void __init udbg_init_40x_realmode(void); |
53 | extern void __init udbg_init_cpm(void); | 53 | extern void __init udbg_init_cpm(void); |
54 | extern void __init udbg_init_usbgecko(void); | 54 | extern void __init udbg_init_usbgecko(void); |
55 | extern void __init udbg_init_wsp(void); | ||
56 | extern void __init udbg_init_memcons(void); | 55 | extern void __init udbg_init_memcons(void); |
57 | extern void __init udbg_init_ehv_bc(void); | 56 | extern void __init udbg_init_ehv_bc(void); |
58 | extern void __init udbg_init_ps3gelic(void); | 57 | extern void __init udbg_init_ps3gelic(void); |
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 9a5c928bb3c6..5b3a903adae6 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h | |||
@@ -42,32 +42,65 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct | |||
42 | 42 | ||
43 | #else | 43 | #else |
44 | 44 | ||
45 | #ifdef CONFIG_64BIT | ||
46 | |||
47 | /* unused */ | ||
45 | struct word_at_a_time { | 48 | struct word_at_a_time { |
46 | const unsigned long one_bits, high_bits; | ||
47 | }; | 49 | }; |
48 | 50 | ||
49 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | 51 | #define WORD_AT_A_TIME_CONSTANTS { } |
50 | 52 | ||
51 | #ifdef CONFIG_64BIT | 53 | /* This will give us 0xff for a NULL char and 0x00 elsewhere */ |
54 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) | ||
55 | { | ||
56 | unsigned long ret; | ||
57 | unsigned long zero = 0; | ||
52 | 58 | ||
53 | /* Alan Modra's little-endian strlen tail for 64-bit */ | 59 | asm("cmpb %0,%1,%2" : "=r" (ret) : "r" (a), "r" (zero)); |
54 | #define create_zero_mask(mask) (mask) | 60 | *bits = ret; |
55 | 61 | ||
56 | static inline unsigned long find_zero(unsigned long mask) | 62 | return ret; |
63 | } | ||
64 | |||
65 | static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) | ||
66 | { | ||
67 | return bits; | ||
68 | } | ||
69 | |||
70 | /* Alan Modra's little-endian strlen tail for 64-bit */ | ||
71 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
57 | { | 72 | { |
58 | unsigned long leading_zero_bits; | 73 | unsigned long leading_zero_bits; |
59 | long trailing_zero_bit_mask; | 74 | long trailing_zero_bit_mask; |
60 | 75 | ||
61 | asm ("addi %1,%2,-1\n\t" | 76 | asm("addi %1,%2,-1\n\t" |
62 | "andc %1,%1,%2\n\t" | 77 | "andc %1,%1,%2\n\t" |
63 | "popcntd %0,%1" | 78 | "popcntd %0,%1" |
64 | : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) | 79 | : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) |
65 | : "r" (mask)); | 80 | : "r" (bits)); |
66 | return leading_zero_bits >> 3; | 81 | |
82 | return leading_zero_bits; | ||
83 | } | ||
84 | |||
85 | static inline unsigned long find_zero(unsigned long mask) | ||
86 | { | ||
87 | return mask >> 3; | ||
88 | } | ||
89 | |||
90 | /* This assumes that we never ask for an all 1s bitmask */ | ||
91 | static inline unsigned long zero_bytemask(unsigned long mask) | ||
92 | { | ||
93 | return (1UL << mask) - 1; | ||
67 | } | 94 | } |
68 | 95 | ||
69 | #else /* 32-bit case */ | 96 | #else /* 32-bit case */ |
70 | 97 | ||
98 | struct word_at_a_time { | ||
99 | const unsigned long one_bits, high_bits; | ||
100 | }; | ||
101 | |||
102 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
103 | |||
71 | /* | 104 | /* |
72 | * This is largely generic for little-endian machines, but the | 105 | * This is largely generic for little-endian machines, but the |
73 | * optimal byte mask counting is probably going to be something | 106 | * optimal byte mask counting is probably going to be something |
@@ -96,8 +129,6 @@ static inline unsigned long find_zero(unsigned long mask) | |||
96 | return count_masked_bytes(mask); | 129 | return count_masked_bytes(mask); |
97 | } | 130 | } |
98 | 131 | ||
99 | #endif | ||
100 | |||
101 | /* Return nonzero if it has a zero */ | 132 | /* Return nonzero if it has a zero */ |
102 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) | 133 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) |
103 | { | 134 | { |
@@ -114,6 +145,59 @@ static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, | |||
114 | /* The mask we created is directly usable as a bytemask */ | 145 | /* The mask we created is directly usable as a bytemask */ |
115 | #define zero_bytemask(mask) (mask) | 146 | #define zero_bytemask(mask) (mask) |
116 | 147 | ||
148 | #endif /* CONFIG_64BIT */ | ||
149 | |||
150 | #endif /* __BIG_ENDIAN__ */ | ||
151 | |||
152 | /* | ||
153 | * We use load_unaligned_zero() in a selftest, which builds a userspace | ||
154 | * program. Some linker scripts seem to discard the .fixup section, so allow | ||
155 | * the test code to use a different section name. | ||
156 | */ | ||
157 | #ifndef FIXUP_SECTION | ||
158 | #define FIXUP_SECTION ".fixup" | ||
159 | #endif | ||
160 | |||
161 | static inline unsigned long load_unaligned_zeropad(const void *addr) | ||
162 | { | ||
163 | unsigned long ret, offset, tmp; | ||
164 | |||
165 | asm( | ||
166 | "1: " PPC_LL "%[ret], 0(%[addr])\n" | ||
167 | "2:\n" | ||
168 | ".section " FIXUP_SECTION ",\"ax\"\n" | ||
169 | "3: " | ||
170 | #ifdef __powerpc64__ | ||
171 | "clrrdi %[tmp], %[addr], 3\n\t" | ||
172 | "clrlsldi %[offset], %[addr], 61, 3\n\t" | ||
173 | "ld %[ret], 0(%[tmp])\n\t" | ||
174 | #ifdef __BIG_ENDIAN__ | ||
175 | "sld %[ret], %[ret], %[offset]\n\t" | ||
176 | #else | ||
177 | "srd %[ret], %[ret], %[offset]\n\t" | ||
117 | #endif | 178 | #endif |
179 | #else | ||
180 | "clrrwi %[tmp], %[addr], 2\n\t" | ||
181 | "clrlslwi %[offset], %[addr], 30, 3\n\t" | ||
182 | "lwz %[ret], 0(%[tmp])\n\t" | ||
183 | #ifdef __BIG_ENDIAN__ | ||
184 | "slw %[ret], %[ret], %[offset]\n\t" | ||
185 | #else | ||
186 | "srw %[ret], %[ret], %[offset]\n\t" | ||
187 | #endif | ||
188 | #endif | ||
189 | "b 2b\n" | ||
190 | ".previous\n" | ||
191 | ".section __ex_table,\"a\"\n\t" | ||
192 | PPC_LONG_ALIGN "\n\t" | ||
193 | PPC_LONG "1b,3b\n" | ||
194 | ".previous" | ||
195 | : [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret) | ||
196 | : [addr] "b" (addr), "m" (*(unsigned long *)addr)); | ||
197 | |||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | #undef FIXUP_SECTION | ||
118 | 202 | ||
119 | #endif /* _ASM_WORD_AT_A_TIME_H */ | 203 | #endif /* _ASM_WORD_AT_A_TIME_H */ |
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index 282d43a0c855..0d050ea37a04 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h | |||
@@ -29,6 +29,7 @@ | |||
29 | /* Native ICP */ | 29 | /* Native ICP */ |
30 | #ifdef CONFIG_PPC_ICP_NATIVE | 30 | #ifdef CONFIG_PPC_ICP_NATIVE |
31 | extern int icp_native_init(void); | 31 | extern int icp_native_init(void); |
32 | extern void icp_native_flush_interrupt(void); | ||
32 | #else | 33 | #else |
33 | static inline int icp_native_init(void) { return -ENODEV; } | 34 | static inline int icp_native_init(void) { return -ENODEV; } |
34 | #endif | 35 | #endif |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 670c312d914e..502cf69b6c89 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -93,6 +93,9 @@ obj-$(CONFIG_PPC32) += entry_32.o setup_32.o | |||
93 | obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o | 93 | obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o |
94 | obj-$(CONFIG_KGDB) += kgdb.o | 94 | obj-$(CONFIG_KGDB) += kgdb.o |
95 | obj-$(CONFIG_MODULES) += ppc_ksyms.o | 95 | obj-$(CONFIG_MODULES) += ppc_ksyms.o |
96 | ifeq ($(CONFIG_PPC32),y) | ||
97 | obj-$(CONFIG_MODULES) += ppc_ksyms_32.o | ||
98 | endif | ||
96 | obj-$(CONFIG_BOOTX_TEXT) += btext.o | 99 | obj-$(CONFIG_BOOTX_TEXT) += btext.o |
97 | obj-$(CONFIG_SMP) += smp.o | 100 | obj-$(CONFIG_SMP) += smp.o |
98 | obj-$(CONFIG_KPROBES) += kprobes.o | 101 | obj-$(CONFIG_KPROBES) += kprobes.o |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 7a13f378ca2c..c78e6dac4d7d 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/crash_dump.h> | 14 | #include <linux/crash_dump.h> |
15 | #include <linux/bootmem.h> | 15 | #include <linux/bootmem.h> |
16 | #include <linux/io.h> | ||
16 | #include <linux/memblock.h> | 17 | #include <linux/memblock.h> |
17 | #include <asm/code-patching.h> | 18 | #include <asm/code-patching.h> |
18 | #include <asm/kdump.h> | 19 | #include <asm/kdump.h> |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index bd1a2aba599f..735979764cd4 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -106,10 +106,14 @@ int __init swiotlb_setup_bus_notifier(void) | |||
106 | return 0; | 106 | return 0; |
107 | } | 107 | } |
108 | 108 | ||
109 | void swiotlb_detect_4g(void) | 109 | void __init swiotlb_detect_4g(void) |
110 | { | 110 | { |
111 | if ((memblock_end_of_DRAM() - 1) > 0xffffffff) | 111 | if ((memblock_end_of_DRAM() - 1) > 0xffffffff) { |
112 | ppc_swiotlb_enable = 1; | 112 | ppc_swiotlb_enable = 1; |
113 | #ifdef CONFIG_ZONE_DMA32 | ||
114 | limit_zone_pfn(ZONE_DMA32, (1ULL << 32) >> PAGE_SHIFT); | ||
115 | #endif | ||
116 | } | ||
113 | } | 117 | } |
114 | 118 | ||
115 | static int __init swiotlb_late_init(void) | 119 | static int __init swiotlb_late_init(void) |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index ee78f6e49d64..adac9dc54aee 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/vio.h> | 15 | #include <asm/vio.h> |
16 | #include <asm/bug.h> | 16 | #include <asm/bug.h> |
17 | #include <asm/machdep.h> | 17 | #include <asm/machdep.h> |
18 | #include <asm/swiotlb.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * Generic direct DMA implementation | 21 | * Generic direct DMA implementation |
@@ -25,6 +26,18 @@ | |||
25 | * default the offset is PCI_DRAM_OFFSET. | 26 | * default the offset is PCI_DRAM_OFFSET. |
26 | */ | 27 | */ |
27 | 28 | ||
29 | static u64 __maybe_unused get_pfn_limit(struct device *dev) | ||
30 | { | ||
31 | u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1; | ||
32 | struct dev_archdata __maybe_unused *sd = &dev->archdata; | ||
33 | |||
34 | #ifdef CONFIG_SWIOTLB | ||
35 | if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops) | ||
36 | pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); | ||
37 | #endif | ||
38 | |||
39 | return pfn; | ||
40 | } | ||
28 | 41 | ||
29 | void *dma_direct_alloc_coherent(struct device *dev, size_t size, | 42 | void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
30 | dma_addr_t *dma_handle, gfp_t flag, | 43 | dma_addr_t *dma_handle, gfp_t flag, |
@@ -40,6 +53,26 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |||
40 | #else | 53 | #else |
41 | struct page *page; | 54 | struct page *page; |
42 | int node = dev_to_node(dev); | 55 | int node = dev_to_node(dev); |
56 | u64 pfn = get_pfn_limit(dev); | ||
57 | int zone; | ||
58 | |||
59 | zone = dma_pfn_limit_to_zone(pfn); | ||
60 | if (zone < 0) { | ||
61 | dev_err(dev, "%s: No suitable zone for pfn %#llx\n", | ||
62 | __func__, pfn); | ||
63 | return NULL; | ||
64 | } | ||
65 | |||
66 | switch (zone) { | ||
67 | case ZONE_DMA: | ||
68 | flag |= GFP_DMA; | ||
69 | break; | ||
70 | #ifdef CONFIG_ZONE_DMA32 | ||
71 | case ZONE_DMA32: | ||
72 | flag |= GFP_DMA32; | ||
73 | break; | ||
74 | #endif | ||
75 | }; | ||
43 | 76 | ||
44 | /* ignore region specifiers */ | 77 | /* ignore region specifiers */ |
45 | flag &= ~(__GFP_HIGHMEM); | 78 | flag &= ~(__GFP_HIGHMEM); |
@@ -202,6 +235,7 @@ int __dma_set_mask(struct device *dev, u64 dma_mask) | |||
202 | *dev->dma_mask = dma_mask; | 235 | *dev->dma_mask = dma_mask; |
203 | return 0; | 236 | return 0; |
204 | } | 237 | } |
238 | |||
205 | int dma_set_mask(struct device *dev, u64 dma_mask) | 239 | int dma_set_mask(struct device *dev, u64 dma_mask) |
206 | { | 240 | { |
207 | if (ppc_md.dma_set_mask) | 241 | if (ppc_md.dma_set_mask) |
@@ -210,13 +244,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask) | |||
210 | } | 244 | } |
211 | EXPORT_SYMBOL(dma_set_mask); | 245 | EXPORT_SYMBOL(dma_set_mask); |
212 | 246 | ||
213 | u64 dma_get_required_mask(struct device *dev) | 247 | u64 __dma_get_required_mask(struct device *dev) |
214 | { | 248 | { |
215 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | 249 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
216 | 250 | ||
217 | if (ppc_md.dma_get_required_mask) | ||
218 | return ppc_md.dma_get_required_mask(dev); | ||
219 | |||
220 | if (unlikely(dma_ops == NULL)) | 251 | if (unlikely(dma_ops == NULL)) |
221 | return 0; | 252 | return 0; |
222 | 253 | ||
@@ -225,6 +256,14 @@ u64 dma_get_required_mask(struct device *dev) | |||
225 | 256 | ||
226 | return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); | 257 | return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); |
227 | } | 258 | } |
259 | |||
260 | u64 dma_get_required_mask(struct device *dev) | ||
261 | { | ||
262 | if (ppc_md.dma_get_required_mask) | ||
263 | return ppc_md.dma_get_required_mask(dev); | ||
264 | |||
265 | return __dma_get_required_mask(dev); | ||
266 | } | ||
228 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | 267 | EXPORT_SYMBOL_GPL(dma_get_required_mask); |
229 | 268 | ||
230 | static int __init dma_init(void) | 269 | static int __init dma_init(void) |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 59a64f8dc85f..d543e4179c18 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -117,7 +117,7 @@ static DEFINE_MUTEX(eeh_dev_mutex); | |||
117 | * not dynamically alloced, so that it ends up in RMO where RTAS | 117 | * not dynamically alloced, so that it ends up in RMO where RTAS |
118 | * can access it. | 118 | * can access it. |
119 | */ | 119 | */ |
120 | #define EEH_PCI_REGS_LOG_LEN 4096 | 120 | #define EEH_PCI_REGS_LOG_LEN 8192 |
121 | static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN]; | 121 | static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN]; |
122 | 122 | ||
123 | /* | 123 | /* |
@@ -148,16 +148,12 @@ static int __init eeh_setup(char *str) | |||
148 | } | 148 | } |
149 | __setup("eeh=", eeh_setup); | 149 | __setup("eeh=", eeh_setup); |
150 | 150 | ||
151 | /** | 151 | /* |
152 | * eeh_gather_pci_data - Copy assorted PCI config space registers to buff | 152 | * This routine captures assorted PCI configuration space data |
153 | * @edev: device to report data for | 153 | * for the indicated PCI device, and puts them into a buffer |
154 | * @buf: point to buffer in which to log | 154 | * for RTAS error logging. |
155 | * @len: amount of room in buffer | ||
156 | * | ||
157 | * This routine captures assorted PCI configuration space data, | ||
158 | * and puts them into a buffer for RTAS error logging. | ||
159 | */ | 155 | */ |
160 | static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len) | 156 | static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len) |
161 | { | 157 | { |
162 | struct device_node *dn = eeh_dev_to_of_node(edev); | 158 | struct device_node *dn = eeh_dev_to_of_node(edev); |
163 | u32 cfg; | 159 | u32 cfg; |
@@ -255,6 +251,19 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len) | |||
255 | return n; | 251 | return n; |
256 | } | 252 | } |
257 | 253 | ||
254 | static void *eeh_dump_pe_log(void *data, void *flag) | ||
255 | { | ||
256 | struct eeh_pe *pe = data; | ||
257 | struct eeh_dev *edev, *tmp; | ||
258 | size_t *plen = flag; | ||
259 | |||
260 | eeh_pe_for_each_dev(pe, edev, tmp) | ||
261 | *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, | ||
262 | EEH_PCI_REGS_LOG_LEN - *plen); | ||
263 | |||
264 | return NULL; | ||
265 | } | ||
266 | |||
258 | /** | 267 | /** |
259 | * eeh_slot_error_detail - Generate combined log including driver log and error log | 268 | * eeh_slot_error_detail - Generate combined log including driver log and error log |
260 | * @pe: EEH PE | 269 | * @pe: EEH PE |
@@ -268,7 +277,6 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char *buf, size_t len) | |||
268 | void eeh_slot_error_detail(struct eeh_pe *pe, int severity) | 277 | void eeh_slot_error_detail(struct eeh_pe *pe, int severity) |
269 | { | 278 | { |
270 | size_t loglen = 0; | 279 | size_t loglen = 0; |
271 | struct eeh_dev *edev, *tmp; | ||
272 | 280 | ||
273 | /* | 281 | /* |
274 | * When the PHB is fenced or dead, it's pointless to collect | 282 | * When the PHB is fenced or dead, it's pointless to collect |
@@ -286,10 +294,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) | |||
286 | eeh_pe_restore_bars(pe); | 294 | eeh_pe_restore_bars(pe); |
287 | 295 | ||
288 | pci_regs_buf[0] = 0; | 296 | pci_regs_buf[0] = 0; |
289 | eeh_pe_for_each_dev(pe, edev, tmp) { | 297 | eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); |
290 | loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen, | ||
291 | EEH_PCI_REGS_LOG_LEN - loglen); | ||
292 | } | ||
293 | } | 298 | } |
294 | 299 | ||
295 | eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); | 300 | eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); |
@@ -410,7 +415,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) | |||
410 | } | 415 | } |
411 | dn = eeh_dev_to_of_node(edev); | 416 | dn = eeh_dev_to_of_node(edev); |
412 | dev = eeh_dev_to_pci_dev(edev); | 417 | dev = eeh_dev_to_pci_dev(edev); |
413 | pe = edev->pe; | 418 | pe = eeh_dev_to_pe(edev); |
414 | 419 | ||
415 | /* Access to IO BARs might get this far and still not want checking. */ | 420 | /* Access to IO BARs might get this far and still not want checking. */ |
416 | if (!pe) { | 421 | if (!pe) { |
@@ -542,17 +547,16 @@ EXPORT_SYMBOL_GPL(eeh_dev_check_failure); | |||
542 | 547 | ||
543 | /** | 548 | /** |
544 | * eeh_check_failure - Check if all 1's data is due to EEH slot freeze | 549 | * eeh_check_failure - Check if all 1's data is due to EEH slot freeze |
545 | * @token: I/O token, should be address in the form 0xA.... | 550 | * @token: I/O address |
546 | * @val: value, should be all 1's (XXX why do we need this arg??) | ||
547 | * | 551 | * |
548 | * Check for an EEH failure at the given token address. Call this | 552 | * Check for an EEH failure at the given I/O address. Call this |
549 | * routine if the result of a read was all 0xff's and you want to | 553 | * routine if the result of a read was all 0xff's and you want to |
550 | * find out if this is due to an EEH slot freeze event. This routine | 554 | * find out if this is due to an EEH slot freeze event. This routine |
551 | * will query firmware for the EEH status. | 555 | * will query firmware for the EEH status. |
552 | * | 556 | * |
553 | * Note this routine is safe to call in an interrupt context. | 557 | * Note this routine is safe to call in an interrupt context. |
554 | */ | 558 | */ |
555 | unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) | 559 | int eeh_check_failure(const volatile void __iomem *token) |
556 | { | 560 | { |
557 | unsigned long addr; | 561 | unsigned long addr; |
558 | struct eeh_dev *edev; | 562 | struct eeh_dev *edev; |
@@ -562,13 +566,11 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon | |||
562 | edev = eeh_addr_cache_get_dev(addr); | 566 | edev = eeh_addr_cache_get_dev(addr); |
563 | if (!edev) { | 567 | if (!edev) { |
564 | eeh_stats.no_device++; | 568 | eeh_stats.no_device++; |
565 | return val; | 569 | return 0; |
566 | } | 570 | } |
567 | 571 | ||
568 | eeh_dev_check_failure(edev); | 572 | return eeh_dev_check_failure(edev); |
569 | return val; | ||
570 | } | 573 | } |
571 | |||
572 | EXPORT_SYMBOL(eeh_check_failure); | 574 | EXPORT_SYMBOL(eeh_check_failure); |
573 | 575 | ||
574 | 576 | ||
@@ -582,25 +584,51 @@ EXPORT_SYMBOL(eeh_check_failure); | |||
582 | */ | 584 | */ |
583 | int eeh_pci_enable(struct eeh_pe *pe, int function) | 585 | int eeh_pci_enable(struct eeh_pe *pe, int function) |
584 | { | 586 | { |
585 | int rc, flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | 587 | int active_flag, rc; |
586 | 588 | ||
587 | /* | 589 | /* |
588 | * pHyp doesn't allow to enable IO or DMA on unfrozen PE. | 590 | * pHyp doesn't allow to enable IO or DMA on unfrozen PE. |
589 | * Also, it's pointless to enable them on unfrozen PE. So | 591 | * Also, it's pointless to enable them on unfrozen PE. So |
590 | * we have the check here. | 592 | * we have to check before enabling IO or DMA. |
591 | */ | 593 | */ |
592 | if (function == EEH_OPT_THAW_MMIO || | 594 | switch (function) { |
593 | function == EEH_OPT_THAW_DMA) { | 595 | case EEH_OPT_THAW_MMIO: |
596 | active_flag = EEH_STATE_MMIO_ACTIVE; | ||
597 | break; | ||
598 | case EEH_OPT_THAW_DMA: | ||
599 | active_flag = EEH_STATE_DMA_ACTIVE; | ||
600 | break; | ||
601 | case EEH_OPT_DISABLE: | ||
602 | case EEH_OPT_ENABLE: | ||
603 | case EEH_OPT_FREEZE_PE: | ||
604 | active_flag = 0; | ||
605 | break; | ||
606 | default: | ||
607 | pr_warn("%s: Invalid function %d\n", | ||
608 | __func__, function); | ||
609 | return -EINVAL; | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * Check if IO or DMA has been enabled before | ||
614 | * enabling them. | ||
615 | */ | ||
616 | if (active_flag) { | ||
594 | rc = eeh_ops->get_state(pe, NULL); | 617 | rc = eeh_ops->get_state(pe, NULL); |
595 | if (rc < 0) | 618 | if (rc < 0) |
596 | return rc; | 619 | return rc; |
597 | 620 | ||
598 | /* Needn't to enable or already enabled */ | 621 | /* Needn't enable it at all */ |
599 | if ((rc == EEH_STATE_NOT_SUPPORT) || | 622 | if (rc == EEH_STATE_NOT_SUPPORT) |
600 | ((rc & flags) == flags)) | 623 | return 0; |
624 | |||
625 | /* It's already enabled */ | ||
626 | if (rc & active_flag) | ||
601 | return 0; | 627 | return 0; |
602 | } | 628 | } |
603 | 629 | ||
630 | |||
631 | /* Issue the request */ | ||
604 | rc = eeh_ops->set_option(pe, function); | 632 | rc = eeh_ops->set_option(pe, function); |
605 | if (rc) | 633 | if (rc) |
606 | pr_warn("%s: Unexpected state change %d on " | 634 | pr_warn("%s: Unexpected state change %d on " |
@@ -608,17 +636,17 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) | |||
608 | __func__, function, pe->phb->global_number, | 636 | __func__, function, pe->phb->global_number, |
609 | pe->addr, rc); | 637 | pe->addr, rc); |
610 | 638 | ||
611 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); | 639 | /* Check if the request is finished successfully */ |
612 | if (rc <= 0) | 640 | if (active_flag) { |
613 | return rc; | 641 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); |
642 | if (rc <= 0) | ||
643 | return rc; | ||
614 | 644 | ||
615 | if ((function == EEH_OPT_THAW_MMIO) && | 645 | if (rc & active_flag) |
616 | (rc & EEH_STATE_MMIO_ENABLED)) | 646 | return 0; |
617 | return 0; | ||
618 | 647 | ||
619 | if ((function == EEH_OPT_THAW_DMA) && | 648 | return -EIO; |
620 | (rc & EEH_STATE_DMA_ENABLED)) | 649 | } |
621 | return 0; | ||
622 | 650 | ||
623 | return rc; | 651 | return rc; |
624 | } | 652 | } |
@@ -634,7 +662,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function) | |||
634 | int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | 662 | int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) |
635 | { | 663 | { |
636 | struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); | 664 | struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); |
637 | struct eeh_pe *pe = edev->pe; | 665 | struct eeh_pe *pe = eeh_dev_to_pe(edev); |
638 | 666 | ||
639 | if (!pe) { | 667 | if (!pe) { |
640 | pr_err("%s: No PE found on PCI device %s\n", | 668 | pr_err("%s: No PE found on PCI device %s\n", |
@@ -645,14 +673,18 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
645 | switch (state) { | 673 | switch (state) { |
646 | case pcie_deassert_reset: | 674 | case pcie_deassert_reset: |
647 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); | 675 | eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); |
676 | eeh_pe_state_clear(pe, EEH_PE_RESET); | ||
648 | break; | 677 | break; |
649 | case pcie_hot_reset: | 678 | case pcie_hot_reset: |
679 | eeh_pe_state_mark(pe, EEH_PE_RESET); | ||
650 | eeh_ops->reset(pe, EEH_RESET_HOT); | 680 | eeh_ops->reset(pe, EEH_RESET_HOT); |
651 | break; | 681 | break; |
652 | case pcie_warm_reset: | 682 | case pcie_warm_reset: |
683 | eeh_pe_state_mark(pe, EEH_PE_RESET); | ||
653 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); | 684 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); |
654 | break; | 685 | break; |
655 | default: | 686 | default: |
687 | eeh_pe_state_clear(pe, EEH_PE_RESET); | ||
656 | return -EINVAL; | 688 | return -EINVAL; |
657 | }; | 689 | }; |
658 | 690 | ||
@@ -1141,6 +1173,85 @@ void eeh_remove_device(struct pci_dev *dev) | |||
1141 | edev->mode &= ~EEH_DEV_SYSFS; | 1173 | edev->mode &= ~EEH_DEV_SYSFS; |
1142 | } | 1174 | } |
1143 | 1175 | ||
1176 | int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state) | ||
1177 | { | ||
1178 | int ret; | ||
1179 | |||
1180 | ret = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); | ||
1181 | if (ret) { | ||
1182 | pr_warn("%s: Failure %d enabling IO on PHB#%x-PE#%x\n", | ||
1183 | __func__, ret, pe->phb->global_number, pe->addr); | ||
1184 | return ret; | ||
1185 | } | ||
1186 | |||
1187 | ret = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); | ||
1188 | if (ret) { | ||
1189 | pr_warn("%s: Failure %d enabling DMA on PHB#%x-PE#%x\n", | ||
1190 | __func__, ret, pe->phb->global_number, pe->addr); | ||
1191 | return ret; | ||
1192 | } | ||
1193 | |||
1194 | /* Clear software isolated state */ | ||
1195 | if (sw_state && (pe->state & EEH_PE_ISOLATED)) | ||
1196 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); | ||
1197 | |||
1198 | return ret; | ||
1199 | } | ||
1200 | |||
1201 | |||
1202 | static struct pci_device_id eeh_reset_ids[] = { | ||
1203 | { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ | ||
1204 | { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ | ||
1205 | { 0 } | ||
1206 | }; | ||
1207 | |||
1208 | static int eeh_pe_change_owner(struct eeh_pe *pe) | ||
1209 | { | ||
1210 | struct eeh_dev *edev, *tmp; | ||
1211 | struct pci_dev *pdev; | ||
1212 | struct pci_device_id *id; | ||
1213 | int flags, ret; | ||
1214 | |||
1215 | /* Check PE state */ | ||
1216 | flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | ||
1217 | ret = eeh_ops->get_state(pe, NULL); | ||
1218 | if (ret < 0 || ret == EEH_STATE_NOT_SUPPORT) | ||
1219 | return 0; | ||
1220 | |||
1221 | /* Unfrozen PE, nothing to do */ | ||
1222 | if ((ret & flags) == flags) | ||
1223 | return 0; | ||
1224 | |||
1225 | /* Frozen PE, check if it needs PE level reset */ | ||
1226 | eeh_pe_for_each_dev(pe, edev, tmp) { | ||
1227 | pdev = eeh_dev_to_pci_dev(edev); | ||
1228 | if (!pdev) | ||
1229 | continue; | ||
1230 | |||
1231 | for (id = &eeh_reset_ids[0]; id->vendor != 0; id++) { | ||
1232 | if (id->vendor != PCI_ANY_ID && | ||
1233 | id->vendor != pdev->vendor) | ||
1234 | continue; | ||
1235 | if (id->device != PCI_ANY_ID && | ||
1236 | id->device != pdev->device) | ||
1237 | continue; | ||
1238 | if (id->subvendor != PCI_ANY_ID && | ||
1239 | id->subvendor != pdev->subsystem_vendor) | ||
1240 | continue; | ||
1241 | if (id->subdevice != PCI_ANY_ID && | ||
1242 | id->subdevice != pdev->subsystem_device) | ||
1243 | continue; | ||
1244 | |||
1245 | goto reset; | ||
1246 | } | ||
1247 | } | ||
1248 | |||
1249 | return eeh_unfreeze_pe(pe, true); | ||
1250 | |||
1251 | reset: | ||
1252 | return eeh_pe_reset_and_recover(pe); | ||
1253 | } | ||
1254 | |||
1144 | /** | 1255 | /** |
1145 | * eeh_dev_open - Increase count of pass through devices for PE | 1256 | * eeh_dev_open - Increase count of pass through devices for PE |
1146 | * @pdev: PCI device | 1257 | * @pdev: PCI device |
@@ -1153,6 +1264,7 @@ void eeh_remove_device(struct pci_dev *dev) | |||
1153 | int eeh_dev_open(struct pci_dev *pdev) | 1264 | int eeh_dev_open(struct pci_dev *pdev) |
1154 | { | 1265 | { |
1155 | struct eeh_dev *edev; | 1266 | struct eeh_dev *edev; |
1267 | int ret = -ENODEV; | ||
1156 | 1268 | ||
1157 | mutex_lock(&eeh_dev_mutex); | 1269 | mutex_lock(&eeh_dev_mutex); |
1158 | 1270 | ||
@@ -1165,6 +1277,16 @@ int eeh_dev_open(struct pci_dev *pdev) | |||
1165 | if (!edev || !edev->pe) | 1277 | if (!edev || !edev->pe) |
1166 | goto out; | 1278 | goto out; |
1167 | 1279 | ||
1280 | /* | ||
1281 | * The PE might have been put into frozen state, but we | ||
1282 | * didn't detect that yet. The passed through PCI devices | ||
1283 | * in frozen PE won't work properly. Clear the frozen state | ||
1284 | * in advance. | ||
1285 | */ | ||
1286 | ret = eeh_pe_change_owner(edev->pe); | ||
1287 | if (ret) | ||
1288 | goto out; | ||
1289 | |||
1168 | /* Increase PE's pass through count */ | 1290 | /* Increase PE's pass through count */ |
1169 | atomic_inc(&edev->pe->pass_dev_cnt); | 1291 | atomic_inc(&edev->pe->pass_dev_cnt); |
1170 | mutex_unlock(&eeh_dev_mutex); | 1292 | mutex_unlock(&eeh_dev_mutex); |
@@ -1172,7 +1294,7 @@ int eeh_dev_open(struct pci_dev *pdev) | |||
1172 | return 0; | 1294 | return 0; |
1173 | out: | 1295 | out: |
1174 | mutex_unlock(&eeh_dev_mutex); | 1296 | mutex_unlock(&eeh_dev_mutex); |
1175 | return -ENODEV; | 1297 | return ret; |
1176 | } | 1298 | } |
1177 | EXPORT_SYMBOL_GPL(eeh_dev_open); | 1299 | EXPORT_SYMBOL_GPL(eeh_dev_open); |
1178 | 1300 | ||
@@ -1202,6 +1324,7 @@ void eeh_dev_release(struct pci_dev *pdev) | |||
1202 | /* Decrease PE's pass through count */ | 1324 | /* Decrease PE's pass through count */ |
1203 | atomic_dec(&edev->pe->pass_dev_cnt); | 1325 | atomic_dec(&edev->pe->pass_dev_cnt); |
1204 | WARN_ON(atomic_read(&edev->pe->pass_dev_cnt) < 0); | 1326 | WARN_ON(atomic_read(&edev->pe->pass_dev_cnt) < 0); |
1327 | eeh_pe_change_owner(edev->pe); | ||
1205 | out: | 1328 | out: |
1206 | mutex_unlock(&eeh_dev_mutex); | 1329 | mutex_unlock(&eeh_dev_mutex); |
1207 | } | 1330 | } |
@@ -1281,8 +1404,10 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option) | |||
1281 | */ | 1404 | */ |
1282 | switch (option) { | 1405 | switch (option) { |
1283 | case EEH_OPT_ENABLE: | 1406 | case EEH_OPT_ENABLE: |
1284 | if (eeh_enabled()) | 1407 | if (eeh_enabled()) { |
1408 | ret = eeh_pe_change_owner(pe); | ||
1285 | break; | 1409 | break; |
1410 | } | ||
1286 | ret = -EIO; | 1411 | ret = -EIO; |
1287 | break; | 1412 | break; |
1288 | case EEH_OPT_DISABLE: | 1413 | case EEH_OPT_DISABLE: |
@@ -1294,7 +1419,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option) | |||
1294 | break; | 1419 | break; |
1295 | } | 1420 | } |
1296 | 1421 | ||
1297 | ret = eeh_ops->set_option(pe, option); | 1422 | ret = eeh_pci_enable(pe, option); |
1298 | break; | 1423 | break; |
1299 | default: | 1424 | default: |
1300 | pr_debug("%s: Option %d out of range (%d, %d)\n", | 1425 | pr_debug("%s: Option %d out of range (%d, %d)\n", |
@@ -1345,6 +1470,36 @@ int eeh_pe_get_state(struct eeh_pe *pe) | |||
1345 | } | 1470 | } |
1346 | EXPORT_SYMBOL_GPL(eeh_pe_get_state); | 1471 | EXPORT_SYMBOL_GPL(eeh_pe_get_state); |
1347 | 1472 | ||
1473 | static int eeh_pe_reenable_devices(struct eeh_pe *pe) | ||
1474 | { | ||
1475 | struct eeh_dev *edev, *tmp; | ||
1476 | struct pci_dev *pdev; | ||
1477 | int ret = 0; | ||
1478 | |||
1479 | /* Restore config space */ | ||
1480 | eeh_pe_restore_bars(pe); | ||
1481 | |||
1482 | /* | ||
1483 | * Reenable PCI devices as the devices passed | ||
1484 | * through are always enabled before the reset. | ||
1485 | */ | ||
1486 | eeh_pe_for_each_dev(pe, edev, tmp) { | ||
1487 | pdev = eeh_dev_to_pci_dev(edev); | ||
1488 | if (!pdev) | ||
1489 | continue; | ||
1490 | |||
1491 | ret = pci_reenable_device(pdev); | ||
1492 | if (ret) { | ||
1493 | pr_warn("%s: Failure %d reenabling %s\n", | ||
1494 | __func__, ret, pci_name(pdev)); | ||
1495 | return ret; | ||
1496 | } | ||
1497 | } | ||
1498 | |||
1499 | /* The PE is still in frozen state */ | ||
1500 | return eeh_unfreeze_pe(pe, true); | ||
1501 | } | ||
1502 | |||
1348 | /** | 1503 | /** |
1349 | * eeh_pe_reset - Issue PE reset according to specified type | 1504 | * eeh_pe_reset - Issue PE reset according to specified type |
1350 | * @pe: EEH PE | 1505 | * @pe: EEH PE |
@@ -1368,23 +1523,22 @@ int eeh_pe_reset(struct eeh_pe *pe, int option) | |||
1368 | switch (option) { | 1523 | switch (option) { |
1369 | case EEH_RESET_DEACTIVATE: | 1524 | case EEH_RESET_DEACTIVATE: |
1370 | ret = eeh_ops->reset(pe, option); | 1525 | ret = eeh_ops->reset(pe, option); |
1526 | eeh_pe_state_clear(pe, EEH_PE_RESET); | ||
1371 | if (ret) | 1527 | if (ret) |
1372 | break; | 1528 | break; |
1373 | 1529 | ||
1374 | /* | 1530 | ret = eeh_pe_reenable_devices(pe); |
1375 | * The PE is still in frozen state and we need to clear | ||
1376 | * that. It's good to clear frozen state after deassert | ||
1377 | * to avoid messy IO access during reset, which might | ||
1378 | * cause recursive frozen PE. | ||
1379 | */ | ||
1380 | ret = eeh_ops->set_option(pe, EEH_OPT_THAW_MMIO); | ||
1381 | if (!ret) | ||
1382 | ret = eeh_ops->set_option(pe, EEH_OPT_THAW_DMA); | ||
1383 | if (!ret) | ||
1384 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); | ||
1385 | break; | 1531 | break; |
1386 | case EEH_RESET_HOT: | 1532 | case EEH_RESET_HOT: |
1387 | case EEH_RESET_FUNDAMENTAL: | 1533 | case EEH_RESET_FUNDAMENTAL: |
1534 | /* | ||
1535 | * Proactively freeze the PE to drop all MMIO access | ||
1536 | * during reset, which should be banned as it's always | ||
1537 | * cause recursive EEH error. | ||
1538 | */ | ||
1539 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | ||
1540 | |||
1541 | eeh_pe_state_mark(pe, EEH_PE_RESET); | ||
1388 | ret = eeh_ops->reset(pe, option); | 1542 | ret = eeh_ops->reset(pe, option); |
1389 | break; | 1543 | break; |
1390 | default: | 1544 | default: |
@@ -1413,9 +1567,6 @@ int eeh_pe_configure(struct eeh_pe *pe) | |||
1413 | if (!pe) | 1567 | if (!pe) |
1414 | return -ENODEV; | 1568 | return -ENODEV; |
1415 | 1569 | ||
1416 | /* Restore config space for the affected devices */ | ||
1417 | eeh_pe_restore_bars(pe); | ||
1418 | |||
1419 | return ret; | 1570 | return ret; |
1420 | } | 1571 | } |
1421 | EXPORT_SYMBOL_GPL(eeh_pe_configure); | 1572 | EXPORT_SYMBOL_GPL(eeh_pe_configure); |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 6a0dcee8e931..3fd514f8e4b2 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -180,6 +180,22 @@ static bool eeh_dev_removed(struct eeh_dev *edev) | |||
180 | return false; | 180 | return false; |
181 | } | 181 | } |
182 | 182 | ||
183 | static void *eeh_dev_save_state(void *data, void *userdata) | ||
184 | { | ||
185 | struct eeh_dev *edev = data; | ||
186 | struct pci_dev *pdev; | ||
187 | |||
188 | if (!edev) | ||
189 | return NULL; | ||
190 | |||
191 | pdev = eeh_dev_to_pci_dev(edev); | ||
192 | if (!pdev) | ||
193 | return NULL; | ||
194 | |||
195 | pci_save_state(pdev); | ||
196 | return NULL; | ||
197 | } | ||
198 | |||
183 | /** | 199 | /** |
184 | * eeh_report_error - Report pci error to each device driver | 200 | * eeh_report_error - Report pci error to each device driver |
185 | * @data: eeh device | 201 | * @data: eeh device |
@@ -303,6 +319,22 @@ static void *eeh_report_reset(void *data, void *userdata) | |||
303 | return NULL; | 319 | return NULL; |
304 | } | 320 | } |
305 | 321 | ||
322 | static void *eeh_dev_restore_state(void *data, void *userdata) | ||
323 | { | ||
324 | struct eeh_dev *edev = data; | ||
325 | struct pci_dev *pdev; | ||
326 | |||
327 | if (!edev) | ||
328 | return NULL; | ||
329 | |||
330 | pdev = eeh_dev_to_pci_dev(edev); | ||
331 | if (!pdev) | ||
332 | return NULL; | ||
333 | |||
334 | pci_restore_state(pdev); | ||
335 | return NULL; | ||
336 | } | ||
337 | |||
306 | /** | 338 | /** |
307 | * eeh_report_resume - Tell device to resume normal operations | 339 | * eeh_report_resume - Tell device to resume normal operations |
308 | * @data: eeh device | 340 | * @data: eeh device |
@@ -450,38 +482,82 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) | |||
450 | static void *__eeh_clear_pe_frozen_state(void *data, void *flag) | 482 | static void *__eeh_clear_pe_frozen_state(void *data, void *flag) |
451 | { | 483 | { |
452 | struct eeh_pe *pe = (struct eeh_pe *)data; | 484 | struct eeh_pe *pe = (struct eeh_pe *)data; |
453 | int i, rc; | 485 | bool *clear_sw_state = flag; |
486 | int i, rc = 1; | ||
454 | 487 | ||
455 | for (i = 0; i < 3; i++) { | 488 | for (i = 0; rc && i < 3; i++) |
456 | rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); | 489 | rc = eeh_unfreeze_pe(pe, clear_sw_state); |
457 | if (rc) | ||
458 | continue; | ||
459 | rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); | ||
460 | if (!rc) | ||
461 | break; | ||
462 | } | ||
463 | 490 | ||
464 | /* The PE has been isolated, clear it */ | 491 | /* Stop immediately on any errors */ |
465 | if (rc) { | 492 | if (rc) { |
466 | pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n", | 493 | pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n", |
467 | __func__, pe->phb->global_number, pe->addr, rc); | 494 | __func__, rc, pe->phb->global_number, pe->addr); |
468 | return (void *)pe; | 495 | return (void *)pe; |
469 | } | 496 | } |
470 | 497 | ||
471 | return NULL; | 498 | return NULL; |
472 | } | 499 | } |
473 | 500 | ||
474 | static int eeh_clear_pe_frozen_state(struct eeh_pe *pe) | 501 | static int eeh_clear_pe_frozen_state(struct eeh_pe *pe, |
502 | bool clear_sw_state) | ||
475 | { | 503 | { |
476 | void *rc; | 504 | void *rc; |
477 | 505 | ||
478 | rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL); | 506 | rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state); |
479 | if (!rc) | 507 | if (!rc) |
480 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); | 508 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); |
481 | 509 | ||
482 | return rc ? -EIO : 0; | 510 | return rc ? -EIO : 0; |
483 | } | 511 | } |
484 | 512 | ||
513 | int eeh_pe_reset_and_recover(struct eeh_pe *pe) | ||
514 | { | ||
515 | int result, ret; | ||
516 | |||
517 | /* Bail if the PE is being recovered */ | ||
518 | if (pe->state & EEH_PE_RECOVERING) | ||
519 | return 0; | ||
520 | |||
521 | /* Put the PE into recovery mode */ | ||
522 | eeh_pe_state_mark(pe, EEH_PE_RECOVERING); | ||
523 | |||
524 | /* Save states */ | ||
525 | eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); | ||
526 | |||
527 | /* Report error */ | ||
528 | eeh_pe_dev_traverse(pe, eeh_report_error, &result); | ||
529 | |||
530 | /* Issue reset */ | ||
531 | eeh_pe_state_mark(pe, EEH_PE_RESET); | ||
532 | ret = eeh_reset_pe(pe); | ||
533 | if (ret) { | ||
534 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET); | ||
535 | return ret; | ||
536 | } | ||
537 | eeh_pe_state_clear(pe, EEH_PE_RESET); | ||
538 | |||
539 | /* Unfreeze the PE */ | ||
540 | ret = eeh_clear_pe_frozen_state(pe, true); | ||
541 | if (ret) { | ||
542 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING); | ||
543 | return ret; | ||
544 | } | ||
545 | |||
546 | /* Notify completion of reset */ | ||
547 | eeh_pe_dev_traverse(pe, eeh_report_reset, &result); | ||
548 | |||
549 | /* Restore device state */ | ||
550 | eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL); | ||
551 | |||
552 | /* Resume */ | ||
553 | eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); | ||
554 | |||
555 | /* Clear recovery mode */ | ||
556 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING); | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
485 | /** | 561 | /** |
486 | * eeh_reset_device - Perform actual reset of a pci slot | 562 | * eeh_reset_device - Perform actual reset of a pci slot |
487 | * @pe: EEH PE | 563 | * @pe: EEH PE |
@@ -540,7 +616,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) | |||
540 | eeh_pe_state_clear(pe, EEH_PE_RESET); | 616 | eeh_pe_state_clear(pe, EEH_PE_RESET); |
541 | 617 | ||
542 | /* Clear frozen state */ | 618 | /* Clear frozen state */ |
543 | rc = eeh_clear_pe_frozen_state(pe); | 619 | rc = eeh_clear_pe_frozen_state(pe, false); |
544 | if (rc) | 620 | if (rc) |
545 | return rc; | 621 | return rc; |
546 | 622 | ||
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 00e3844525a6..53dd0915e690 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
@@ -428,7 +428,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev) | |||
428 | } | 428 | } |
429 | 429 | ||
430 | /* Remove the EEH device */ | 430 | /* Remove the EEH device */ |
431 | pe = edev->pe; | 431 | pe = eeh_dev_to_pe(edev); |
432 | edev->pe = NULL; | 432 | edev->pe = NULL; |
433 | list_del(&edev->list); | 433 | list_del(&edev->list); |
434 | 434 | ||
@@ -584,6 +584,8 @@ static void *__eeh_pe_state_clear(void *data, void *flag) | |||
584 | { | 584 | { |
585 | struct eeh_pe *pe = (struct eeh_pe *)data; | 585 | struct eeh_pe *pe = (struct eeh_pe *)data; |
586 | int state = *((int *)flag); | 586 | int state = *((int *)flag); |
587 | struct eeh_dev *edev, *tmp; | ||
588 | struct pci_dev *pdev; | ||
587 | 589 | ||
588 | /* Keep the state of permanently removed PE intact */ | 590 | /* Keep the state of permanently removed PE intact */ |
589 | if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) && | 591 | if ((pe->freeze_count > EEH_MAX_ALLOWED_FREEZES) && |
@@ -592,9 +594,22 @@ static void *__eeh_pe_state_clear(void *data, void *flag) | |||
592 | 594 | ||
593 | pe->state &= ~state; | 595 | pe->state &= ~state; |
594 | 596 | ||
595 | /* Clear check count since last isolation */ | 597 | /* |
596 | if (state & EEH_PE_ISOLATED) | 598 | * Special treatment on clearing isolated state. Clear |
597 | pe->check_count = 0; | 599 | * check count since last isolation and put all affected |
600 | * devices to normal state. | ||
601 | */ | ||
602 | if (!(state & EEH_PE_ISOLATED)) | ||
603 | return NULL; | ||
604 | |||
605 | pe->check_count = 0; | ||
606 | eeh_pe_for_each_dev(pe, edev, tmp) { | ||
607 | pdev = eeh_dev_to_pci_dev(edev); | ||
608 | if (!pdev) | ||
609 | continue; | ||
610 | |||
611 | pdev->error_state = pci_channel_io_normal; | ||
612 | } | ||
598 | 613 | ||
599 | return NULL; | 614 | return NULL; |
600 | } | 615 | } |
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index e2595ba4b720..f19b1e5cb060 100644 --- a/arch/powerpc/kernel/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c | |||
@@ -54,6 +54,43 @@ EEH_SHOW_ATTR(eeh_mode, mode, "0x%x"); | |||
54 | EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); | 54 | EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x"); |
55 | EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); | 55 | EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x"); |
56 | 56 | ||
57 | static ssize_t eeh_pe_state_show(struct device *dev, | ||
58 | struct device_attribute *attr, char *buf) | ||
59 | { | ||
60 | struct pci_dev *pdev = to_pci_dev(dev); | ||
61 | struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); | ||
62 | int state; | ||
63 | |||
64 | if (!edev || !edev->pe) | ||
65 | return -ENODEV; | ||
66 | |||
67 | state = eeh_ops->get_state(edev->pe, NULL); | ||
68 | return sprintf(buf, "%0x08x %0x08x\n", | ||
69 | state, edev->pe->state); | ||
70 | } | ||
71 | |||
72 | static ssize_t eeh_pe_state_store(struct device *dev, | ||
73 | struct device_attribute *attr, | ||
74 | const char *buf, size_t count) | ||
75 | { | ||
76 | struct pci_dev *pdev = to_pci_dev(dev); | ||
77 | struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); | ||
78 | |||
79 | if (!edev || !edev->pe) | ||
80 | return -ENODEV; | ||
81 | |||
82 | /* Nothing to do if it's not frozen */ | ||
83 | if (!(edev->pe->state & EEH_PE_ISOLATED)) | ||
84 | return count; | ||
85 | |||
86 | if (eeh_unfreeze_pe(edev->pe, true)) | ||
87 | return -EIO; | ||
88 | |||
89 | return count; | ||
90 | } | ||
91 | |||
92 | static DEVICE_ATTR_RW(eeh_pe_state); | ||
93 | |||
57 | void eeh_sysfs_add_device(struct pci_dev *pdev) | 94 | void eeh_sysfs_add_device(struct pci_dev *pdev) |
58 | { | 95 | { |
59 | struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); | 96 | struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); |
@@ -68,9 +105,10 @@ void eeh_sysfs_add_device(struct pci_dev *pdev) | |||
68 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); | 105 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); |
69 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); | 106 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); |
70 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); | 107 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); |
108 | rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_state); | ||
71 | 109 | ||
72 | if (rc) | 110 | if (rc) |
73 | printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); | 111 | pr_warn("EEH: Unable to create sysfs entries\n"); |
74 | else if (edev) | 112 | else if (edev) |
75 | edev->mode |= EEH_DEV_SYSFS; | 113 | edev->mode |= EEH_DEV_SYSFS; |
76 | } | 114 | } |
@@ -92,6 +130,7 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev) | |||
92 | device_remove_file(&pdev->dev, &dev_attr_eeh_mode); | 130 | device_remove_file(&pdev->dev, &dev_attr_eeh_mode); |
93 | device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); | 131 | device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); |
94 | device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); | 132 | device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); |
133 | device_remove_file(&pdev->dev, &dev_attr_eeh_pe_state); | ||
95 | 134 | ||
96 | if (edev) | 135 | if (edev) |
97 | edev->mode &= ~EEH_DEV_SYSFS; | 136 | edev->mode &= ~EEH_DEV_SYSFS; |
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 7ee876d2adb5..fafff8dbd5d9 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -104,12 +104,15 @@ turn_on_mmu: | |||
104 | * task's thread_struct. | 104 | * task's thread_struct. |
105 | */ | 105 | */ |
106 | #define EXCEPTION_PROLOG \ | 106 | #define EXCEPTION_PROLOG \ |
107 | mtspr SPRN_SPRG_SCRATCH0,r10; \ | 107 | EXCEPTION_PROLOG_0; \ |
108 | mtspr SPRN_SPRG_SCRATCH1,r11; \ | ||
109 | mfcr r10; \ | ||
110 | EXCEPTION_PROLOG_1; \ | 108 | EXCEPTION_PROLOG_1; \ |
111 | EXCEPTION_PROLOG_2 | 109 | EXCEPTION_PROLOG_2 |
112 | 110 | ||
111 | #define EXCEPTION_PROLOG_0 \ | ||
112 | mtspr SPRN_SPRG_SCRATCH0,r10; \ | ||
113 | mtspr SPRN_SPRG_SCRATCH1,r11; \ | ||
114 | mfcr r10 | ||
115 | |||
113 | #define EXCEPTION_PROLOG_1 \ | 116 | #define EXCEPTION_PROLOG_1 \ |
114 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ | 117 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \ |
115 | andi. r11,r11,MSR_PR; \ | 118 | andi. r11,r11,MSR_PR; \ |
@@ -145,6 +148,14 @@ turn_on_mmu: | |||
145 | SAVE_2GPRS(7, r11) | 148 | SAVE_2GPRS(7, r11) |
146 | 149 | ||
147 | /* | 150 | /* |
151 | * Exception exit code. | ||
152 | */ | ||
153 | #define EXCEPTION_EPILOG_0 \ | ||
154 | mtcr r10; \ | ||
155 | mfspr r10,SPRN_SPRG_SCRATCH0; \ | ||
156 | mfspr r11,SPRN_SPRG_SCRATCH1 | ||
157 | |||
158 | /* | ||
148 | * Note: code which follows this uses cr0.eq (set if from kernel), | 159 | * Note: code which follows this uses cr0.eq (set if from kernel), |
149 | * r11, r12 (SRR0), and r9 (SRR1). | 160 | * r11, r12 (SRR0), and r9 (SRR1). |
150 | * | 161 | * |
@@ -293,16 +304,8 @@ InstructionTLBMiss: | |||
293 | #ifdef CONFIG_8xx_CPU6 | 304 | #ifdef CONFIG_8xx_CPU6 |
294 | stw r3, 8(r0) | 305 | stw r3, 8(r0) |
295 | #endif | 306 | #endif |
296 | DO_8xx_CPU6(0x3f80, r3) | 307 | EXCEPTION_PROLOG_0 |
297 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | 308 | mtspr SPRN_SPRG_SCRATCH2, r10 |
298 | mfcr r10 | ||
299 | #ifdef CONFIG_8xx_CPU6 | ||
300 | stw r10, 0(r0) | ||
301 | stw r11, 4(r0) | ||
302 | #else | ||
303 | mtspr SPRN_DAR, r10 | ||
304 | mtspr SPRN_SPRG2, r11 | ||
305 | #endif | ||
306 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ | 309 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ |
307 | #ifdef CONFIG_8xx_CPU15 | 310 | #ifdef CONFIG_8xx_CPU15 |
308 | addi r11, r10, 0x1000 | 311 | addi r11, r10, 0x1000 |
@@ -359,18 +362,11 @@ InstructionTLBMiss: | |||
359 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ | 362 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ |
360 | 363 | ||
361 | /* Restore registers */ | 364 | /* Restore registers */ |
362 | #ifndef CONFIG_8xx_CPU6 | 365 | #ifdef CONFIG_8xx_CPU6 |
363 | mfspr r10, SPRN_DAR | ||
364 | mtcr r10 | ||
365 | mtspr SPRN_DAR, r11 /* Tag DAR */ | ||
366 | mfspr r11, SPRN_SPRG2 | ||
367 | #else | ||
368 | lwz r11, 0(r0) | ||
369 | mtcr r11 | ||
370 | lwz r11, 4(r0) | ||
371 | lwz r3, 8(r0) | 366 | lwz r3, 8(r0) |
372 | #endif | 367 | #endif |
373 | mfspr r10, SPRN_M_TW | 368 | mfspr r10, SPRN_SPRG_SCRATCH2 |
369 | EXCEPTION_EPILOG_0 | ||
374 | rfi | 370 | rfi |
375 | 2: | 371 | 2: |
376 | mfspr r11, SPRN_SRR1 | 372 | mfspr r11, SPRN_SRR1 |
@@ -381,19 +377,11 @@ InstructionTLBMiss: | |||
381 | mtspr SPRN_SRR1, r11 | 377 | mtspr SPRN_SRR1, r11 |
382 | 378 | ||
383 | /* Restore registers */ | 379 | /* Restore registers */ |
384 | #ifndef CONFIG_8xx_CPU6 | 380 | #ifdef CONFIG_8xx_CPU6 |
385 | mfspr r10, SPRN_DAR | ||
386 | mtcr r10 | ||
387 | li r11, 0x00f0 | ||
388 | mtspr SPRN_DAR, r11 /* Tag DAR */ | ||
389 | mfspr r11, SPRN_SPRG2 | ||
390 | #else | ||
391 | lwz r11, 0(r0) | ||
392 | mtcr r11 | ||
393 | lwz r11, 4(r0) | ||
394 | lwz r3, 8(r0) | 381 | lwz r3, 8(r0) |
395 | #endif | 382 | #endif |
396 | mfspr r10, SPRN_M_TW | 383 | mfspr r10, SPRN_SPRG_SCRATCH2 |
384 | EXCEPTION_EPILOG_0 | ||
397 | b InstructionAccess | 385 | b InstructionAccess |
398 | 386 | ||
399 | . = 0x1200 | 387 | . = 0x1200 |
@@ -401,16 +389,8 @@ DataStoreTLBMiss: | |||
401 | #ifdef CONFIG_8xx_CPU6 | 389 | #ifdef CONFIG_8xx_CPU6 |
402 | stw r3, 8(r0) | 390 | stw r3, 8(r0) |
403 | #endif | 391 | #endif |
404 | DO_8xx_CPU6(0x3f80, r3) | 392 | EXCEPTION_PROLOG_0 |
405 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | 393 | mtspr SPRN_SPRG_SCRATCH2, r10 |
406 | mfcr r10 | ||
407 | #ifdef CONFIG_8xx_CPU6 | ||
408 | stw r10, 0(r0) | ||
409 | stw r11, 4(r0) | ||
410 | #else | ||
411 | mtspr SPRN_DAR, r10 | ||
412 | mtspr SPRN_SPRG2, r11 | ||
413 | #endif | ||
414 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | 394 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ |
415 | 395 | ||
416 | /* If we are faulting a kernel address, we have to use the | 396 | /* If we are faulting a kernel address, we have to use the |
@@ -483,19 +463,12 @@ DataStoreTLBMiss: | |||
483 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | 463 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ |
484 | 464 | ||
485 | /* Restore registers */ | 465 | /* Restore registers */ |
486 | #ifndef CONFIG_8xx_CPU6 | 466 | #ifdef CONFIG_8xx_CPU6 |
487 | mfspr r10, SPRN_DAR | ||
488 | mtcr r10 | ||
489 | mtspr SPRN_DAR, r11 /* Tag DAR */ | ||
490 | mfspr r11, SPRN_SPRG2 | ||
491 | #else | ||
492 | mtspr SPRN_DAR, r11 /* Tag DAR */ | ||
493 | lwz r11, 0(r0) | ||
494 | mtcr r11 | ||
495 | lwz r11, 4(r0) | ||
496 | lwz r3, 8(r0) | 467 | lwz r3, 8(r0) |
497 | #endif | 468 | #endif |
498 | mfspr r10, SPRN_M_TW | 469 | mtspr SPRN_DAR, r11 /* Tag DAR */ |
470 | mfspr r10, SPRN_SPRG_SCRATCH2 | ||
471 | EXCEPTION_EPILOG_0 | ||
499 | rfi | 472 | rfi |
500 | 473 | ||
501 | /* This is an instruction TLB error on the MPC8xx. This could be due | 474 | /* This is an instruction TLB error on the MPC8xx. This could be due |
@@ -507,35 +480,18 @@ InstructionTLBError: | |||
507 | b InstructionAccess | 480 | b InstructionAccess |
508 | 481 | ||
509 | /* This is the data TLB error on the MPC8xx. This could be due to | 482 | /* This is the data TLB error on the MPC8xx. This could be due to |
510 | * many reasons, including a dirty update to a pte. We can catch that | 483 | * many reasons, including a dirty update to a pte. We bail out to |
511 | * one here, but anything else is an error. First, we track down the | 484 | * a higher level function that can handle it. |
512 | * Linux pte. If it is valid, write access is allowed, but the | ||
513 | * page dirty bit is not set, we will set it and reload the TLB. For | ||
514 | * any other case, we bail out to a higher level function that can | ||
515 | * handle it. | ||
516 | */ | 485 | */ |
517 | . = 0x1400 | 486 | . = 0x1400 |
518 | DataTLBError: | 487 | DataTLBError: |
519 | #ifdef CONFIG_8xx_CPU6 | 488 | EXCEPTION_PROLOG_0 |
520 | stw r3, 8(r0) | ||
521 | #endif | ||
522 | DO_8xx_CPU6(0x3f80, r3) | ||
523 | mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ | ||
524 | mfcr r10 | ||
525 | stw r10, 0(r0) | ||
526 | stw r11, 4(r0) | ||
527 | 489 | ||
528 | mfspr r10, SPRN_DAR | 490 | mfspr r11, SPRN_DAR |
529 | cmpwi cr0, r10, 0x00f0 | 491 | cmpwi cr0, r11, 0x00f0 |
530 | beq- FixupDAR /* must be a buggy dcbX, icbi insn. */ | 492 | beq- FixupDAR /* must be a buggy dcbX, icbi insn. */ |
531 | DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR */ | 493 | DARFixed:/* Return from dcbx instruction bug workaround */ |
532 | mfspr r10, SPRN_M_TW /* Restore registers */ | 494 | EXCEPTION_EPILOG_0 |
533 | lwz r11, 0(r0) | ||
534 | mtcr r11 | ||
535 | lwz r11, 4(r0) | ||
536 | #ifdef CONFIG_8xx_CPU6 | ||
537 | lwz r3, 8(r0) | ||
538 | #endif | ||
539 | b DataAccess | 495 | b DataAccess |
540 | 496 | ||
541 | EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) | 497 | EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) |
@@ -559,11 +515,15 @@ DARFixed:/* Return from dcbx instruction bug workaround, r10 holds value of DAR | |||
559 | 515 | ||
560 | /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions | 516 | /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions |
561 | * by decoding the registers used by the dcbx instruction and adding them. | 517 | * by decoding the registers used by the dcbx instruction and adding them. |
562 | * DAR is set to the calculated address and r10 also holds the EA on exit. | 518 | * DAR is set to the calculated address. |
563 | */ | 519 | */ |
564 | /* define if you don't want to use self modifying code */ | 520 | /* define if you don't want to use self modifying code */ |
565 | #define NO_SELF_MODIFYING_CODE | 521 | #define NO_SELF_MODIFYING_CODE |
566 | FixupDAR:/* Entry point for dcbx workaround. */ | 522 | FixupDAR:/* Entry point for dcbx workaround. */ |
523 | #ifdef CONFIG_8xx_CPU6 | ||
524 | stw r3, 8(r0) | ||
525 | #endif | ||
526 | mtspr SPRN_SPRG_SCRATCH2, r10 | ||
567 | /* fetch instruction from memory. */ | 527 | /* fetch instruction from memory. */ |
568 | mfspr r10, SPRN_SRR0 | 528 | mfspr r10, SPRN_SRR0 |
569 | andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ | 529 | andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ |
@@ -579,16 +539,17 @@ FixupDAR:/* Entry point for dcbx workaround. */ | |||
579 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | 539 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ |
580 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | 540 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ |
581 | lwz r11, 0(r11) /* Get the pte */ | 541 | lwz r11, 0(r11) /* Get the pte */ |
542 | #ifdef CONFIG_8xx_CPU6 | ||
543 | lwz r3, 8(r0) /* restore r3 from memory */ | ||
544 | #endif | ||
582 | /* concat physical page address(r11) and page offset(r10) */ | 545 | /* concat physical page address(r11) and page offset(r10) */ |
583 | rlwimi r11, r10, 0, 20, 31 | 546 | rlwimi r11, r10, 0, 20, 31 |
584 | lwz r11,0(r11) | 547 | lwz r11,0(r11) |
585 | /* Check if it really is a dcbx instruction. */ | 548 | /* Check if it really is a dcbx instruction. */ |
586 | /* dcbt and dcbtst does not generate DTLB Misses/Errors, | 549 | /* dcbt and dcbtst does not generate DTLB Misses/Errors, |
587 | * no need to include them here */ | 550 | * no need to include them here */ |
588 | srwi r10, r11, 26 /* check if major OP code is 31 */ | 551 | xoris r10, r11, 0x7c00 /* check if major OP code is 31 */ |
589 | cmpwi cr0, r10, 31 | 552 | rlwinm r10, r10, 0, 21, 5 |
590 | bne- 141f | ||
591 | rlwinm r10, r11, 0, 21, 30 | ||
592 | cmpwi cr0, r10, 2028 /* Is dcbz? */ | 553 | cmpwi cr0, r10, 2028 /* Is dcbz? */ |
593 | beq+ 142f | 554 | beq+ 142f |
594 | cmpwi cr0, r10, 940 /* Is dcbi? */ | 555 | cmpwi cr0, r10, 940 /* Is dcbi? */ |
@@ -599,16 +560,13 @@ FixupDAR:/* Entry point for dcbx workaround. */ | |||
599 | beq+ 142f | 560 | beq+ 142f |
600 | cmpwi cr0, r10, 1964 /* Is icbi? */ | 561 | cmpwi cr0, r10, 1964 /* Is icbi? */ |
601 | beq+ 142f | 562 | beq+ 142f |
602 | 141: mfspr r10, SPRN_DAR /* r10 must hold DAR at exit */ | 563 | 141: mfspr r10,SPRN_SPRG_SCRATCH2 |
603 | b DARFixed /* Nope, go back to normal TLB processing */ | 564 | b DARFixed /* Nope, go back to normal TLB processing */ |
604 | 565 | ||
605 | 144: mfspr r10, SPRN_DSISR | 566 | 144: mfspr r10, SPRN_DSISR |
606 | rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ | 567 | rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ |
607 | mtspr SPRN_DSISR, r10 | 568 | mtspr SPRN_DSISR, r10 |
608 | 142: /* continue, it was a dcbx, dcbi instruction. */ | 569 | 142: /* continue, it was a dcbx, dcbi instruction. */ |
609 | #ifdef CONFIG_8xx_CPU6 | ||
610 | lwz r3, 8(r0) /* restore r3 from memory */ | ||
611 | #endif | ||
612 | #ifndef NO_SELF_MODIFYING_CODE | 570 | #ifndef NO_SELF_MODIFYING_CODE |
613 | andis. r10,r11,0x1f /* test if reg RA is r0 */ | 571 | andis. r10,r11,0x1f /* test if reg RA is r0 */ |
614 | li r10,modified_instr@l | 572 | li r10,modified_instr@l |
@@ -619,14 +577,15 @@ FixupDAR:/* Entry point for dcbx workaround. */ | |||
619 | stw r11,0(r10) /* store add/and instruction */ | 577 | stw r11,0(r10) /* store add/and instruction */ |
620 | dcbf 0,r10 /* flush new instr. to memory. */ | 578 | dcbf 0,r10 /* flush new instr. to memory. */ |
621 | icbi 0,r10 /* invalidate instr. cache line */ | 579 | icbi 0,r10 /* invalidate instr. cache line */ |
622 | lwz r11, 4(r0) /* restore r11 from memory */ | 580 | mfspr r11, SPRN_SPRG_SCRATCH1 /* restore r11 */ |
623 | mfspr r10, SPRN_M_TW /* restore r10 from M_TW */ | 581 | mfspr r10, SPRN_SPRG_SCRATCH0 /* restore r10 */ |
624 | isync /* Wait until new instr is loaded from memory */ | 582 | isync /* Wait until new instr is loaded from memory */ |
625 | modified_instr: | 583 | modified_instr: |
626 | .space 4 /* this is where the add instr. is stored */ | 584 | .space 4 /* this is where the add instr. is stored */ |
627 | bne+ 143f | 585 | bne+ 143f |
628 | subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */ | 586 | subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */ |
629 | 143: mtdar r10 /* store faulting EA in DAR */ | 587 | 143: mtdar r10 /* store faulting EA in DAR */ |
588 | mfspr r10,SPRN_SPRG_SCRATCH2 | ||
630 | b DARFixed /* Go back to normal TLB handling */ | 589 | b DARFixed /* Go back to normal TLB handling */ |
631 | #else | 590 | #else |
632 | mfctr r10 | 591 | mfctr r10 |
@@ -680,13 +639,16 @@ modified_instr: | |||
680 | mfdar r11 | 639 | mfdar r11 |
681 | mtctr r11 /* restore ctr reg from DAR */ | 640 | mtctr r11 /* restore ctr reg from DAR */ |
682 | mtdar r10 /* save fault EA to DAR */ | 641 | mtdar r10 /* save fault EA to DAR */ |
642 | mfspr r10,SPRN_SPRG_SCRATCH2 | ||
683 | b DARFixed /* Go back to normal TLB handling */ | 643 | b DARFixed /* Go back to normal TLB handling */ |
684 | 644 | ||
685 | /* special handling for r10,r11 since these are modified already */ | 645 | /* special handling for r10,r11 since these are modified already */ |
686 | 153: lwz r11, 4(r0) /* load r11 from memory */ | 646 | 153: mfspr r11, SPRN_SPRG_SCRATCH1 /* load r11 from SPRN_SPRG_SCRATCH1 */ |
687 | b 155f | 647 | add r10, r10, r11 /* add it */ |
688 | 154: mfspr r11, SPRN_M_TW /* load r10 from M_TW */ | 648 | mfctr r11 /* restore r11 */ |
689 | 155: add r10, r10, r11 /* add it */ | 649 | b 151b |
650 | 154: mfspr r11, SPRN_SPRG_SCRATCH0 /* load r10 from SPRN_SPRG_SCRATCH0 */ | ||
651 | add r10, r10, r11 /* add it */ | ||
690 | mfctr r11 /* restore r11 */ | 652 | mfctr r11 /* restore r11 */ |
691 | b 151b | 653 | b 151b |
692 | #endif | 654 | #endif |
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 0bb5918faaaf..1f7d84e2e8b2 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c | |||
@@ -293,7 +293,7 @@ out: | |||
293 | /* | 293 | /* |
294 | * Handle single-step exceptions following a DABR hit. | 294 | * Handle single-step exceptions following a DABR hit. |
295 | */ | 295 | */ |
296 | int __kprobes single_step_dabr_instruction(struct die_args *args) | 296 | static int __kprobes single_step_dabr_instruction(struct die_args *args) |
297 | { | 297 | { |
298 | struct pt_regs *regs = args->regs; | 298 | struct pt_regs *regs = args->regs; |
299 | struct perf_event *bp = NULL; | 299 | struct perf_event *bp = NULL; |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 1114d13ac19f..ac86c53e2542 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -55,7 +55,7 @@ static struct device ibmebus_bus_device = { /* fake "parent" device */ | |||
55 | struct bus_type ibmebus_bus_type; | 55 | struct bus_type ibmebus_bus_type; |
56 | 56 | ||
57 | /* These devices will automatically be added to the bus during init */ | 57 | /* These devices will automatically be added to the bus during init */ |
58 | static struct of_device_id __initdata ibmebus_matches[] = { | 58 | static const struct of_device_id ibmebus_matches[] __initconst = { |
59 | { .compatible = "IBM,lhca" }, | 59 | { .compatible = "IBM,lhca" }, |
60 | { .compatible = "IBM,lhea" }, | 60 | { .compatible = "IBM,lhea" }, |
61 | {}, | 61 | {}, |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index be05841396cf..c0754bbf8118 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -73,7 +73,7 @@ _GLOBAL(power7_powersave_common) | |||
73 | 73 | ||
74 | /* Check if something happened while soft-disabled */ | 74 | /* Check if something happened while soft-disabled */ |
75 | lbz r0,PACAIRQHAPPENED(r13) | 75 | lbz r0,PACAIRQHAPPENED(r13) |
76 | cmpwi cr0,r0,0 | 76 | andi. r0,r0,~PACA_IRQ_HARD_DIS@l |
77 | beq 1f | 77 | beq 1f |
78 | cmpwi cr0,r4,0 | 78 | cmpwi cr0,r4,0 |
79 | beq 1f | 79 | beq 1f |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4c5891de162e..8eb857f216c1 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -444,13 +444,13 @@ void migrate_irqs(void) | |||
444 | 444 | ||
445 | cpumask_and(mask, data->affinity, map); | 445 | cpumask_and(mask, data->affinity, map); |
446 | if (cpumask_any(mask) >= nr_cpu_ids) { | 446 | if (cpumask_any(mask) >= nr_cpu_ids) { |
447 | printk("Breaking affinity for irq %i\n", irq); | 447 | pr_warn("Breaking affinity for irq %i\n", irq); |
448 | cpumask_copy(mask, map); | 448 | cpumask_copy(mask, map); |
449 | } | 449 | } |
450 | if (chip->irq_set_affinity) | 450 | if (chip->irq_set_affinity) |
451 | chip->irq_set_affinity(data, mask, true); | 451 | chip->irq_set_affinity(data, mask, true); |
452 | else if (desc->action && !(warned++)) | 452 | else if (desc->action && !(warned++)) |
453 | printk("Cannot set affinity for irq %i\n", irq); | 453 | pr_err("Cannot set affinity for irq %i\n", irq); |
454 | } | 454 | } |
455 | 455 | ||
456 | free_cpumask_var(mask); | 456 | free_cpumask_var(mask); |
@@ -470,7 +470,7 @@ static inline void check_stack_overflow(void) | |||
470 | 470 | ||
471 | /* check for stack overflow: is there less than 2KB free? */ | 471 | /* check for stack overflow: is there less than 2KB free? */ |
472 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | 472 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { |
473 | printk("do_IRQ: stack overflow: %ld\n", | 473 | pr_err("do_IRQ: stack overflow: %ld\n", |
474 | sp - sizeof(struct thread_info)); | 474 | sp - sizeof(struct thread_info)); |
475 | dump_stack(); | 475 | dump_stack(); |
476 | } | 476 | } |
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 936258881c98..7b750c4ed5c7 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
@@ -35,7 +35,7 @@ static struct legacy_serial_info { | |||
35 | phys_addr_t taddr; | 35 | phys_addr_t taddr; |
36 | } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; | 36 | } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; |
37 | 37 | ||
38 | static struct of_device_id legacy_serial_parents[] __initdata = { | 38 | static const struct of_device_id legacy_serial_parents[] __initconst = { |
39 | {.type = "soc",}, | 39 | {.type = "soc",}, |
40 | {.type = "tsi-bridge",}, | 40 | {.type = "tsi-bridge",}, |
41 | {.type = "opb", }, | 41 | {.type = "opb", }, |
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 6cff040bf456..c94d2e018d84 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c | |||
@@ -15,6 +15,9 @@ | |||
15 | along with this program; if not, write to the Free Software | 15 | along with this program; if not, write to the Free Software |
16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | */ | 17 | */ |
18 | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | |||
18 | #include <linux/module.h> | 21 | #include <linux/module.h> |
19 | #include <linux/moduleloader.h> | 22 | #include <linux/moduleloader.h> |
20 | #include <linux/elf.h> | 23 | #include <linux/elf.h> |
@@ -28,12 +31,6 @@ | |||
28 | #include <linux/sort.h> | 31 | #include <linux/sort.h> |
29 | #include <asm/setup.h> | 32 | #include <asm/setup.h> |
30 | 33 | ||
31 | #if 0 | ||
32 | #define DEBUGP printk | ||
33 | #else | ||
34 | #define DEBUGP(fmt , ...) | ||
35 | #endif | ||
36 | |||
37 | /* Count how many different relocations (different symbol, different | 34 | /* Count how many different relocations (different symbol, different |
38 | addend) */ | 35 | addend) */ |
39 | static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) | 36 | static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) |
@@ -121,8 +118,8 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr, | |||
121 | continue; | 118 | continue; |
122 | 119 | ||
123 | if (sechdrs[i].sh_type == SHT_RELA) { | 120 | if (sechdrs[i].sh_type == SHT_RELA) { |
124 | DEBUGP("Found relocations in section %u\n", i); | 121 | pr_debug("Found relocations in section %u\n", i); |
125 | DEBUGP("Ptr: %p. Number: %u\n", | 122 | pr_debug("Ptr: %p. Number: %u\n", |
126 | (void *)hdr + sechdrs[i].sh_offset, | 123 | (void *)hdr + sechdrs[i].sh_offset, |
127 | sechdrs[i].sh_size / sizeof(Elf32_Rela)); | 124 | sechdrs[i].sh_size / sizeof(Elf32_Rela)); |
128 | 125 | ||
@@ -161,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, | |||
161 | me->arch.core_plt_section = i; | 158 | me->arch.core_plt_section = i; |
162 | } | 159 | } |
163 | if (!me->arch.core_plt_section || !me->arch.init_plt_section) { | 160 | if (!me->arch.core_plt_section || !me->arch.init_plt_section) { |
164 | printk("Module doesn't contain .plt or .init.plt sections.\n"); | 161 | pr_err("Module doesn't contain .plt or .init.plt sections.\n"); |
165 | return -ENOEXEC; | 162 | return -ENOEXEC; |
166 | } | 163 | } |
167 | 164 | ||
@@ -189,7 +186,7 @@ static uint32_t do_plt_call(void *location, | |||
189 | { | 186 | { |
190 | struct ppc_plt_entry *entry; | 187 | struct ppc_plt_entry *entry; |
191 | 188 | ||
192 | DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); | 189 | pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); |
193 | /* Init, or core PLT? */ | 190 | /* Init, or core PLT? */ |
194 | if (location >= mod->module_core | 191 | if (location >= mod->module_core |
195 | && location < mod->module_core + mod->core_size) | 192 | && location < mod->module_core + mod->core_size) |
@@ -208,7 +205,7 @@ static uint32_t do_plt_call(void *location, | |||
208 | entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ | 205 | entry->jump[2] = 0x7d8903a6; /* mtctr r12 */ |
209 | entry->jump[3] = 0x4e800420; /* bctr */ | 206 | entry->jump[3] = 0x4e800420; /* bctr */ |
210 | 207 | ||
211 | DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); | 208 | pr_debug("Initialized plt for 0x%x at %p\n", val, entry); |
212 | return (uint32_t)entry; | 209 | return (uint32_t)entry; |
213 | } | 210 | } |
214 | 211 | ||
@@ -224,7 +221,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
224 | uint32_t *location; | 221 | uint32_t *location; |
225 | uint32_t value; | 222 | uint32_t value; |
226 | 223 | ||
227 | DEBUGP("Applying ADD relocate section %u to %u\n", relsec, | 224 | pr_debug("Applying ADD relocate section %u to %u\n", relsec, |
228 | sechdrs[relsec].sh_info); | 225 | sechdrs[relsec].sh_info); |
229 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { | 226 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { |
230 | /* This is where to make the change */ | 227 | /* This is where to make the change */ |
@@ -268,17 +265,17 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
268 | sechdrs, module); | 265 | sechdrs, module); |
269 | 266 | ||
270 | /* Only replace bits 2 through 26 */ | 267 | /* Only replace bits 2 through 26 */ |
271 | DEBUGP("REL24 value = %08X. location = %08X\n", | 268 | pr_debug("REL24 value = %08X. location = %08X\n", |
272 | value, (uint32_t)location); | 269 | value, (uint32_t)location); |
273 | DEBUGP("Location before: %08X.\n", | 270 | pr_debug("Location before: %08X.\n", |
274 | *(uint32_t *)location); | 271 | *(uint32_t *)location); |
275 | *(uint32_t *)location | 272 | *(uint32_t *)location |
276 | = (*(uint32_t *)location & ~0x03fffffc) | 273 | = (*(uint32_t *)location & ~0x03fffffc) |
277 | | ((value - (uint32_t)location) | 274 | | ((value - (uint32_t)location) |
278 | & 0x03fffffc); | 275 | & 0x03fffffc); |
279 | DEBUGP("Location after: %08X.\n", | 276 | pr_debug("Location after: %08X.\n", |
280 | *(uint32_t *)location); | 277 | *(uint32_t *)location); |
281 | DEBUGP("ie. jump to %08X+%08X = %08X\n", | 278 | pr_debug("ie. jump to %08X+%08X = %08X\n", |
282 | *(uint32_t *)location & 0x03fffffc, | 279 | *(uint32_t *)location & 0x03fffffc, |
283 | (uint32_t)location, | 280 | (uint32_t)location, |
284 | (*(uint32_t *)location & 0x03fffffc) | 281 | (*(uint32_t *)location & 0x03fffffc) |
@@ -291,7 +288,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
291 | break; | 288 | break; |
292 | 289 | ||
293 | default: | 290 | default: |
294 | printk("%s: unknown ADD relocation: %u\n", | 291 | pr_err("%s: unknown ADD relocation: %u\n", |
295 | module->name, | 292 | module->name, |
296 | ELF32_R_TYPE(rela[i].r_info)); | 293 | ELF32_R_TYPE(rela[i].r_info)); |
297 | return -ENOEXEC; | 294 | return -ENOEXEC; |
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index d807ee626af9..68384514506b 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
@@ -15,6 +15,9 @@ | |||
15 | along with this program; if not, write to the Free Software | 15 | along with this program; if not, write to the Free Software |
16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | */ | 17 | */ |
18 | |||
19 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
20 | |||
18 | #include <linux/module.h> | 21 | #include <linux/module.h> |
19 | #include <linux/elf.h> | 22 | #include <linux/elf.h> |
20 | #include <linux/moduleloader.h> | 23 | #include <linux/moduleloader.h> |
@@ -36,11 +39,6 @@ | |||
36 | Using a magic allocator which places modules within 32MB solves | 39 | Using a magic allocator which places modules within 32MB solves |
37 | this, and makes other things simpler. Anton? | 40 | this, and makes other things simpler. Anton? |
38 | --RR. */ | 41 | --RR. */ |
39 | #if 0 | ||
40 | #define DEBUGP printk | ||
41 | #else | ||
42 | #define DEBUGP(fmt , ...) | ||
43 | #endif | ||
44 | 42 | ||
45 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | 43 | #if defined(_CALL_ELF) && _CALL_ELF == 2 |
46 | #define R2_STACK_OFFSET 24 | 44 | #define R2_STACK_OFFSET 24 |
@@ -279,8 +277,8 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, | |||
279 | /* Every relocated section... */ | 277 | /* Every relocated section... */ |
280 | for (i = 1; i < hdr->e_shnum; i++) { | 278 | for (i = 1; i < hdr->e_shnum; i++) { |
281 | if (sechdrs[i].sh_type == SHT_RELA) { | 279 | if (sechdrs[i].sh_type == SHT_RELA) { |
282 | DEBUGP("Found relocations in section %u\n", i); | 280 | pr_debug("Found relocations in section %u\n", i); |
283 | DEBUGP("Ptr: %p. Number: %lu\n", | 281 | pr_debug("Ptr: %p. Number: %Lu\n", |
284 | (void *)sechdrs[i].sh_addr, | 282 | (void *)sechdrs[i].sh_addr, |
285 | sechdrs[i].sh_size / sizeof(Elf64_Rela)); | 283 | sechdrs[i].sh_size / sizeof(Elf64_Rela)); |
286 | 284 | ||
@@ -304,7 +302,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, | |||
304 | relocs++; | 302 | relocs++; |
305 | #endif | 303 | #endif |
306 | 304 | ||
307 | DEBUGP("Looks like a total of %lu stubs, max\n", relocs); | 305 | pr_debug("Looks like a total of %lu stubs, max\n", relocs); |
308 | return relocs * sizeof(struct ppc64_stub_entry); | 306 | return relocs * sizeof(struct ppc64_stub_entry); |
309 | } | 307 | } |
310 | 308 | ||
@@ -390,7 +388,7 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, | |||
390 | } | 388 | } |
391 | 389 | ||
392 | if (!me->arch.stubs_section) { | 390 | if (!me->arch.stubs_section) { |
393 | printk("%s: doesn't contain .stubs.\n", me->name); | 391 | pr_err("%s: doesn't contain .stubs.\n", me->name); |
394 | return -ENOEXEC; | 392 | return -ENOEXEC; |
395 | } | 393 | } |
396 | 394 | ||
@@ -434,11 +432,11 @@ static inline int create_stub(Elf64_Shdr *sechdrs, | |||
434 | /* Stub uses address relative to r2. */ | 432 | /* Stub uses address relative to r2. */ |
435 | reladdr = (unsigned long)entry - my_r2(sechdrs, me); | 433 | reladdr = (unsigned long)entry - my_r2(sechdrs, me); |
436 | if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { | 434 | if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { |
437 | printk("%s: Address %p of stub out of range of %p.\n", | 435 | pr_err("%s: Address %p of stub out of range of %p.\n", |
438 | me->name, (void *)reladdr, (void *)my_r2); | 436 | me->name, (void *)reladdr, (void *)my_r2); |
439 | return 0; | 437 | return 0; |
440 | } | 438 | } |
441 | DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); | 439 | pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr); |
442 | 440 | ||
443 | entry->jump[0] |= PPC_HA(reladdr); | 441 | entry->jump[0] |= PPC_HA(reladdr); |
444 | entry->jump[1] |= PPC_LO(reladdr); | 442 | entry->jump[1] |= PPC_LO(reladdr); |
@@ -477,7 +475,7 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, | |||
477 | static int restore_r2(u32 *instruction, struct module *me) | 475 | static int restore_r2(u32 *instruction, struct module *me) |
478 | { | 476 | { |
479 | if (*instruction != PPC_INST_NOP) { | 477 | if (*instruction != PPC_INST_NOP) { |
480 | printk("%s: Expect noop after relocate, got %08x\n", | 478 | pr_err("%s: Expect noop after relocate, got %08x\n", |
481 | me->name, *instruction); | 479 | me->name, *instruction); |
482 | return 0; | 480 | return 0; |
483 | } | 481 | } |
@@ -498,7 +496,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
498 | unsigned long *location; | 496 | unsigned long *location; |
499 | unsigned long value; | 497 | unsigned long value; |
500 | 498 | ||
501 | DEBUGP("Applying ADD relocate section %u to %u\n", relsec, | 499 | pr_debug("Applying ADD relocate section %u to %u\n", relsec, |
502 | sechdrs[relsec].sh_info); | 500 | sechdrs[relsec].sh_info); |
503 | 501 | ||
504 | /* First time we're called, we can fix up .TOC. */ | 502 | /* First time we're called, we can fix up .TOC. */ |
@@ -519,7 +517,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
519 | sym = (Elf64_Sym *)sechdrs[symindex].sh_addr | 517 | sym = (Elf64_Sym *)sechdrs[symindex].sh_addr |
520 | + ELF64_R_SYM(rela[i].r_info); | 518 | + ELF64_R_SYM(rela[i].r_info); |
521 | 519 | ||
522 | DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n", | 520 | pr_debug("RELOC at %p: %li-type as %s (0x%lx) + %li\n", |
523 | location, (long)ELF64_R_TYPE(rela[i].r_info), | 521 | location, (long)ELF64_R_TYPE(rela[i].r_info), |
524 | strtab + sym->st_name, (unsigned long)sym->st_value, | 522 | strtab + sym->st_name, (unsigned long)sym->st_value, |
525 | (long)rela[i].r_addend); | 523 | (long)rela[i].r_addend); |
@@ -546,7 +544,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
546 | /* Subtract TOC pointer */ | 544 | /* Subtract TOC pointer */ |
547 | value -= my_r2(sechdrs, me); | 545 | value -= my_r2(sechdrs, me); |
548 | if (value + 0x8000 > 0xffff) { | 546 | if (value + 0x8000 > 0xffff) { |
549 | printk("%s: bad TOC16 relocation (%lu)\n", | 547 | pr_err("%s: bad TOC16 relocation (0x%lx)\n", |
550 | me->name, value); | 548 | me->name, value); |
551 | return -ENOEXEC; | 549 | return -ENOEXEC; |
552 | } | 550 | } |
@@ -567,7 +565,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
567 | /* Subtract TOC pointer */ | 565 | /* Subtract TOC pointer */ |
568 | value -= my_r2(sechdrs, me); | 566 | value -= my_r2(sechdrs, me); |
569 | if ((value & 3) != 0 || value + 0x8000 > 0xffff) { | 567 | if ((value & 3) != 0 || value + 0x8000 > 0xffff) { |
570 | printk("%s: bad TOC16_DS relocation (%lu)\n", | 568 | pr_err("%s: bad TOC16_DS relocation (0x%lx)\n", |
571 | me->name, value); | 569 | me->name, value); |
572 | return -ENOEXEC; | 570 | return -ENOEXEC; |
573 | } | 571 | } |
@@ -580,7 +578,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
580 | /* Subtract TOC pointer */ | 578 | /* Subtract TOC pointer */ |
581 | value -= my_r2(sechdrs, me); | 579 | value -= my_r2(sechdrs, me); |
582 | if ((value & 3) != 0) { | 580 | if ((value & 3) != 0) { |
583 | printk("%s: bad TOC16_LO_DS relocation (%lu)\n", | 581 | pr_err("%s: bad TOC16_LO_DS relocation (0x%lx)\n", |
584 | me->name, value); | 582 | me->name, value); |
585 | return -ENOEXEC; | 583 | return -ENOEXEC; |
586 | } | 584 | } |
@@ -613,7 +611,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
613 | /* Convert value to relative */ | 611 | /* Convert value to relative */ |
614 | value -= (unsigned long)location; | 612 | value -= (unsigned long)location; |
615 | if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){ | 613 | if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){ |
616 | printk("%s: REL24 %li out of range!\n", | 614 | pr_err("%s: REL24 %li out of range!\n", |
617 | me->name, (long int)value); | 615 | me->name, (long int)value); |
618 | return -ENOEXEC; | 616 | return -ENOEXEC; |
619 | } | 617 | } |
@@ -655,7 +653,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, | |||
655 | break; | 653 | break; |
656 | 654 | ||
657 | default: | 655 | default: |
658 | printk("%s: Unknown ADD relocation: %lu\n", | 656 | pr_err("%s: Unknown ADD relocation: %lu\n", |
659 | me->name, | 657 | me->name, |
660 | (unsigned long)ELF64_R_TYPE(rela[i].r_info)); | 658 | (unsigned long)ELF64_R_TYPE(rela[i].r_info)); |
661 | return -ENOEXEC; | 659 | return -ENOEXEC; |
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 28b898e68185..34f7c9b7cd96 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c | |||
@@ -567,7 +567,7 @@ static int __init nvram_init(void) | |||
567 | return rc; | 567 | return rc; |
568 | } | 568 | } |
569 | 569 | ||
570 | void __exit nvram_cleanup(void) | 570 | static void __exit nvram_cleanup(void) |
571 | { | 571 | { |
572 | misc_deregister( &nvram_dev ); | 572 | misc_deregister( &nvram_dev ); |
573 | } | 573 | } |
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index a7b743076720..f87bc1b4bdda 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c | |||
@@ -97,7 +97,7 @@ static int of_pci_phb_probe(struct platform_device *dev) | |||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | static struct of_device_id of_pci_phb_ids[] = { | 100 | static const struct of_device_id of_pci_phb_ids[] = { |
101 | { .type = "pci", }, | 101 | { .type = "pci", }, |
102 | { .type = "pcix", }, | 102 | { .type = "pcix", }, |
103 | { .type = "pcie", }, | 103 | { .type = "pcie", }, |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index b2814e23e1ed..bd70a51d5747 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -1140,7 +1140,7 @@ static int reparent_resources(struct resource *parent, | |||
1140 | * as well. | 1140 | * as well. |
1141 | */ | 1141 | */ |
1142 | 1142 | ||
1143 | void pcibios_allocate_bus_resources(struct pci_bus *bus) | 1143 | static void pcibios_allocate_bus_resources(struct pci_bus *bus) |
1144 | { | 1144 | { |
1145 | struct pci_bus *b; | 1145 | struct pci_bus *b; |
1146 | int i; | 1146 | int i; |
@@ -1561,7 +1561,6 @@ EARLY_PCI_OP(write, byte, u8) | |||
1561 | EARLY_PCI_OP(write, word, u16) | 1561 | EARLY_PCI_OP(write, word, u16) |
1562 | EARLY_PCI_OP(write, dword, u32) | 1562 | EARLY_PCI_OP(write, dword, u32) |
1563 | 1563 | ||
1564 | extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap); | ||
1565 | int early_find_capability(struct pci_controller *hose, int bus, int devfn, | 1564 | int early_find_capability(struct pci_controller *hose, int bus, int devfn, |
1566 | int cap) | 1565 | int cap) |
1567 | { | 1566 | { |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 44562aa97f16..e6245e9c7d8d 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -38,7 +38,7 @@ static u32 get_int_prop(struct device_node *np, const char *name, u32 def) | |||
38 | * @addr0: value of 1st cell of a device tree PCI address. | 38 | * @addr0: value of 1st cell of a device tree PCI address. |
39 | * @bridge: Set this flag if the address is from a bridge 'ranges' property | 39 | * @bridge: Set this flag if the address is from a bridge 'ranges' property |
40 | */ | 40 | */ |
41 | unsigned int pci_parse_of_flags(u32 addr0, int bridge) | 41 | static unsigned int pci_parse_of_flags(u32 addr0, int bridge) |
42 | { | 42 | { |
43 | unsigned int flags = 0; | 43 | unsigned int flags = 0; |
44 | 44 | ||
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 48d17d6fca5b..c4dfff6c2719 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -1,207 +1,41 @@ | |||
1 | #include <linux/export.h> | 1 | #include <linux/ftrace.h> |
2 | #include <linux/threads.h> | 2 | #include <linux/mm.h> |
3 | #include <linux/smp.h> | ||
4 | #include <linux/sched.h> | ||
5 | #include <linux/elfcore.h> | ||
6 | #include <linux/string.h> | ||
7 | #include <linux/interrupt.h> | ||
8 | #include <linux/screen_info.h> | ||
9 | #include <linux/vt_kern.h> | ||
10 | #include <linux/nvram.h> | ||
11 | #include <linux/irq.h> | ||
12 | #include <linux/pci.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/bitops.h> | ||
15 | 3 | ||
16 | #include <asm/page.h> | ||
17 | #include <asm/processor.h> | 4 | #include <asm/processor.h> |
18 | #include <asm/cacheflush.h> | ||
19 | #include <asm/uaccess.h> | ||
20 | #include <asm/io.h> | ||
21 | #include <linux/atomic.h> | ||
22 | #include <asm/checksum.h> | ||
23 | #include <asm/pgtable.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | #include <linux/adb.h> | ||
26 | #include <linux/cuda.h> | ||
27 | #include <linux/pmu.h> | ||
28 | #include <asm/prom.h> | ||
29 | #include <asm/pci-bridge.h> | ||
30 | #include <asm/irq.h> | ||
31 | #include <asm/pmac_feature.h> | ||
32 | #include <asm/dma.h> | ||
33 | #include <asm/machdep.h> | ||
34 | #include <asm/hw_irq.h> | ||
35 | #include <asm/nvram.h> | ||
36 | #include <asm/mmu_context.h> | ||
37 | #include <asm/backlight.h> | ||
38 | #include <asm/time.h> | ||
39 | #include <asm/cputable.h> | ||
40 | #include <asm/btext.h> | ||
41 | #include <asm/div64.h> | ||
42 | #include <asm/signal.h> | ||
43 | #include <asm/dcr.h> | ||
44 | #include <asm/ftrace.h> | ||
45 | #include <asm/switch_to.h> | 5 | #include <asm/switch_to.h> |
6 | #include <asm/cacheflush.h> | ||
46 | #include <asm/epapr_hcalls.h> | 7 | #include <asm/epapr_hcalls.h> |
47 | 8 | ||
48 | #ifdef CONFIG_PPC32 | 9 | EXPORT_SYMBOL(flush_dcache_range); |
49 | extern void transfer_to_handler(void); | 10 | EXPORT_SYMBOL(flush_icache_range); |
50 | extern void do_IRQ(struct pt_regs *regs); | ||
51 | extern void machine_check_exception(struct pt_regs *regs); | ||
52 | extern void alignment_exception(struct pt_regs *regs); | ||
53 | extern void program_check_exception(struct pt_regs *regs); | ||
54 | extern void single_step_exception(struct pt_regs *regs); | ||
55 | extern int sys_sigreturn(struct pt_regs *regs); | ||
56 | 11 | ||
57 | EXPORT_SYMBOL(clear_pages); | 12 | EXPORT_SYMBOL(empty_zero_page); |
58 | EXPORT_SYMBOL(ISA_DMA_THRESHOLD); | ||
59 | EXPORT_SYMBOL(DMA_MODE_READ); | ||
60 | EXPORT_SYMBOL(DMA_MODE_WRITE); | ||
61 | 13 | ||
62 | EXPORT_SYMBOL(transfer_to_handler); | 14 | long long __bswapdi2(long long); |
63 | EXPORT_SYMBOL(do_IRQ); | 15 | EXPORT_SYMBOL(__bswapdi2); |
64 | EXPORT_SYMBOL(machine_check_exception); | ||
65 | EXPORT_SYMBOL(alignment_exception); | ||
66 | EXPORT_SYMBOL(program_check_exception); | ||
67 | EXPORT_SYMBOL(single_step_exception); | ||
68 | EXPORT_SYMBOL(sys_sigreturn); | ||
69 | #endif | ||
70 | 16 | ||
71 | #ifdef CONFIG_FUNCTION_TRACER | 17 | #ifdef CONFIG_FUNCTION_TRACER |
72 | EXPORT_SYMBOL(_mcount); | 18 | EXPORT_SYMBOL(_mcount); |
73 | #endif | 19 | #endif |
74 | 20 | ||
75 | EXPORT_SYMBOL(strcpy); | ||
76 | EXPORT_SYMBOL(strncpy); | ||
77 | EXPORT_SYMBOL(strcat); | ||
78 | EXPORT_SYMBOL(strlen); | ||
79 | EXPORT_SYMBOL(strcmp); | ||
80 | EXPORT_SYMBOL(strncmp); | ||
81 | |||
82 | #ifndef CONFIG_GENERIC_CSUM | ||
83 | EXPORT_SYMBOL(csum_partial); | ||
84 | EXPORT_SYMBOL(csum_partial_copy_generic); | ||
85 | EXPORT_SYMBOL(ip_fast_csum); | ||
86 | EXPORT_SYMBOL(csum_tcpudp_magic); | ||
87 | #endif | ||
88 | |||
89 | EXPORT_SYMBOL(__copy_tofrom_user); | ||
90 | EXPORT_SYMBOL(__clear_user); | ||
91 | EXPORT_SYMBOL(copy_page); | ||
92 | |||
93 | #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) | ||
94 | EXPORT_SYMBOL(isa_io_base); | ||
95 | EXPORT_SYMBOL(isa_mem_base); | ||
96 | EXPORT_SYMBOL(pci_dram_offset); | ||
97 | #endif /* CONFIG_PCI */ | ||
98 | |||
99 | EXPORT_SYMBOL(start_thread); | ||
100 | |||
101 | #ifdef CONFIG_PPC_FPU | 21 | #ifdef CONFIG_PPC_FPU |
102 | EXPORT_SYMBOL(giveup_fpu); | 22 | EXPORT_SYMBOL(giveup_fpu); |
103 | EXPORT_SYMBOL(load_fp_state); | 23 | EXPORT_SYMBOL(load_fp_state); |
104 | EXPORT_SYMBOL(store_fp_state); | 24 | EXPORT_SYMBOL(store_fp_state); |
105 | #endif | 25 | #endif |
26 | |||
106 | #ifdef CONFIG_ALTIVEC | 27 | #ifdef CONFIG_ALTIVEC |
107 | EXPORT_SYMBOL(giveup_altivec); | 28 | EXPORT_SYMBOL(giveup_altivec); |
108 | EXPORT_SYMBOL(load_vr_state); | 29 | EXPORT_SYMBOL(load_vr_state); |
109 | EXPORT_SYMBOL(store_vr_state); | 30 | EXPORT_SYMBOL(store_vr_state); |
110 | #endif /* CONFIG_ALTIVEC */ | ||
111 | #ifdef CONFIG_VSX | ||
112 | EXPORT_SYMBOL(giveup_vsx); | ||
113 | EXPORT_SYMBOL_GPL(__giveup_vsx); | ||
114 | #endif /* CONFIG_VSX */ | ||
115 | #ifdef CONFIG_SPE | ||
116 | EXPORT_SYMBOL(giveup_spe); | ||
117 | #endif /* CONFIG_SPE */ | ||
118 | |||
119 | #ifndef CONFIG_PPC64 | ||
120 | EXPORT_SYMBOL(flush_instruction_cache); | ||
121 | #endif | 31 | #endif |
122 | EXPORT_SYMBOL(flush_dcache_range); | ||
123 | EXPORT_SYMBOL(flush_icache_range); | ||
124 | 32 | ||
125 | #ifdef CONFIG_SMP | 33 | #ifdef CONFIG_VSX |
126 | #ifdef CONFIG_PPC32 | 34 | EXPORT_SYMBOL_GPL(__giveup_vsx); |
127 | EXPORT_SYMBOL(smp_hw_index); | ||
128 | #endif | ||
129 | #endif | ||
130 | |||
131 | #ifdef CONFIG_ADB | ||
132 | EXPORT_SYMBOL(adb_request); | ||
133 | EXPORT_SYMBOL(adb_register); | ||
134 | EXPORT_SYMBOL(adb_unregister); | ||
135 | EXPORT_SYMBOL(adb_poll); | ||
136 | EXPORT_SYMBOL(adb_try_handler_change); | ||
137 | #endif /* CONFIG_ADB */ | ||
138 | #ifdef CONFIG_ADB_CUDA | ||
139 | EXPORT_SYMBOL(cuda_request); | ||
140 | EXPORT_SYMBOL(cuda_poll); | ||
141 | #endif /* CONFIG_ADB_CUDA */ | ||
142 | EXPORT_SYMBOL(to_tm); | ||
143 | |||
144 | #ifdef CONFIG_PPC32 | ||
145 | long long __ashrdi3(long long, int); | ||
146 | long long __ashldi3(long long, int); | ||
147 | long long __lshrdi3(long long, int); | ||
148 | EXPORT_SYMBOL(__ashrdi3); | ||
149 | EXPORT_SYMBOL(__ashldi3); | ||
150 | EXPORT_SYMBOL(__lshrdi3); | ||
151 | int __ucmpdi2(unsigned long long, unsigned long long); | ||
152 | EXPORT_SYMBOL(__ucmpdi2); | ||
153 | int __cmpdi2(long long, long long); | ||
154 | EXPORT_SYMBOL(__cmpdi2); | ||
155 | #endif | ||
156 | long long __bswapdi2(long long); | ||
157 | EXPORT_SYMBOL(__bswapdi2); | ||
158 | EXPORT_SYMBOL(memcpy); | ||
159 | EXPORT_SYMBOL(memset); | ||
160 | EXPORT_SYMBOL(memmove); | ||
161 | EXPORT_SYMBOL(memcmp); | ||
162 | EXPORT_SYMBOL(memchr); | ||
163 | |||
164 | #if defined(CONFIG_FB_VGA16_MODULE) | ||
165 | EXPORT_SYMBOL(screen_info); | ||
166 | #endif | ||
167 | |||
168 | #ifdef CONFIG_PPC32 | ||
169 | EXPORT_SYMBOL(timer_interrupt); | ||
170 | EXPORT_SYMBOL(tb_ticks_per_jiffy); | ||
171 | EXPORT_SYMBOL(cacheable_memcpy); | ||
172 | EXPORT_SYMBOL(cacheable_memzero); | ||
173 | #endif | ||
174 | |||
175 | #ifdef CONFIG_PPC32 | ||
176 | EXPORT_SYMBOL(switch_mmu_context); | ||
177 | #endif | ||
178 | |||
179 | #ifdef CONFIG_PPC_STD_MMU_32 | ||
180 | extern long mol_trampoline; | ||
181 | EXPORT_SYMBOL(mol_trampoline); /* For MOL */ | ||
182 | EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ | ||
183 | #ifdef CONFIG_SMP | ||
184 | extern int mmu_hash_lock; | ||
185 | EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ | ||
186 | #endif /* CONFIG_SMP */ | ||
187 | extern long *intercept_table; | ||
188 | EXPORT_SYMBOL(intercept_table); | ||
189 | #endif /* CONFIG_PPC_STD_MMU_32 */ | ||
190 | #ifdef CONFIG_PPC_DCR_NATIVE | ||
191 | EXPORT_SYMBOL(__mtdcr); | ||
192 | EXPORT_SYMBOL(__mfdcr); | ||
193 | #endif | ||
194 | EXPORT_SYMBOL(empty_zero_page); | ||
195 | |||
196 | #ifdef CONFIG_PPC64 | ||
197 | EXPORT_SYMBOL(__arch_hweight8); | ||
198 | EXPORT_SYMBOL(__arch_hweight16); | ||
199 | EXPORT_SYMBOL(__arch_hweight32); | ||
200 | EXPORT_SYMBOL(__arch_hweight64); | ||
201 | #endif | 35 | #endif |
202 | 36 | ||
203 | #ifdef CONFIG_PPC_BOOK3S_64 | 37 | #ifdef CONFIG_SPE |
204 | EXPORT_SYMBOL_GPL(mmu_psize_defs); | 38 | EXPORT_SYMBOL(giveup_spe); |
205 | #endif | 39 | #endif |
206 | 40 | ||
207 | #ifdef CONFIG_EPAPR_PARAVIRT | 41 | #ifdef CONFIG_EPAPR_PARAVIRT |
diff --git a/arch/powerpc/kernel/ppc_ksyms_32.c b/arch/powerpc/kernel/ppc_ksyms_32.c new file mode 100644 index 000000000000..30ddd8a24eee --- /dev/null +++ b/arch/powerpc/kernel/ppc_ksyms_32.c | |||
@@ -0,0 +1,61 @@ | |||
1 | #include <linux/export.h> | ||
2 | #include <linux/smp.h> | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <asm/dma.h> | ||
6 | #include <asm/io.h> | ||
7 | #include <asm/hw_irq.h> | ||
8 | #include <asm/time.h> | ||
9 | #include <asm/mmu_context.h> | ||
10 | #include <asm/pgtable.h> | ||
11 | #include <asm/dcr.h> | ||
12 | |||
13 | EXPORT_SYMBOL(clear_pages); | ||
14 | EXPORT_SYMBOL(ISA_DMA_THRESHOLD); | ||
15 | EXPORT_SYMBOL(DMA_MODE_READ); | ||
16 | EXPORT_SYMBOL(DMA_MODE_WRITE); | ||
17 | |||
18 | #if defined(CONFIG_PCI) | ||
19 | EXPORT_SYMBOL(isa_io_base); | ||
20 | EXPORT_SYMBOL(isa_mem_base); | ||
21 | EXPORT_SYMBOL(pci_dram_offset); | ||
22 | #endif | ||
23 | |||
24 | #ifdef CONFIG_SMP | ||
25 | EXPORT_SYMBOL(smp_hw_index); | ||
26 | #endif | ||
27 | |||
28 | long long __ashrdi3(long long, int); | ||
29 | long long __ashldi3(long long, int); | ||
30 | long long __lshrdi3(long long, int); | ||
31 | int __ucmpdi2(unsigned long long, unsigned long long); | ||
32 | int __cmpdi2(long long, long long); | ||
33 | EXPORT_SYMBOL(__ashrdi3); | ||
34 | EXPORT_SYMBOL(__ashldi3); | ||
35 | EXPORT_SYMBOL(__lshrdi3); | ||
36 | EXPORT_SYMBOL(__ucmpdi2); | ||
37 | EXPORT_SYMBOL(__cmpdi2); | ||
38 | |||
39 | EXPORT_SYMBOL(timer_interrupt); | ||
40 | EXPORT_SYMBOL(tb_ticks_per_jiffy); | ||
41 | |||
42 | EXPORT_SYMBOL(switch_mmu_context); | ||
43 | |||
44 | #ifdef CONFIG_PPC_STD_MMU_32 | ||
45 | extern long mol_trampoline; | ||
46 | EXPORT_SYMBOL(mol_trampoline); /* For MOL */ | ||
47 | EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ | ||
48 | #ifdef CONFIG_SMP | ||
49 | extern int mmu_hash_lock; | ||
50 | EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ | ||
51 | #endif /* CONFIG_SMP */ | ||
52 | extern long *intercept_table; | ||
53 | EXPORT_SYMBOL(intercept_table); | ||
54 | #endif /* CONFIG_PPC_STD_MMU_32 */ | ||
55 | |||
56 | #ifdef CONFIG_PPC_DCR_NATIVE | ||
57 | EXPORT_SYMBOL(__mtdcr); | ||
58 | EXPORT_SYMBOL(__mfdcr); | ||
59 | #endif | ||
60 | |||
61 | EXPORT_SYMBOL(flush_instruction_cache); | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index bf44ae962ab8..aa1df89c8b2a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -228,6 +228,7 @@ void giveup_vsx(struct task_struct *tsk) | |||
228 | giveup_altivec_maybe_transactional(tsk); | 228 | giveup_altivec_maybe_transactional(tsk); |
229 | __giveup_vsx(tsk); | 229 | __giveup_vsx(tsk); |
230 | } | 230 | } |
231 | EXPORT_SYMBOL(giveup_vsx); | ||
231 | 232 | ||
232 | void flush_vsx_to_thread(struct task_struct *tsk) | 233 | void flush_vsx_to_thread(struct task_struct *tsk) |
233 | { | 234 | { |
@@ -1316,6 +1317,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | |||
1316 | current->thread.tm_tfiar = 0; | 1317 | current->thread.tm_tfiar = 0; |
1317 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 1318 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
1318 | } | 1319 | } |
1320 | EXPORT_SYMBOL(start_thread); | ||
1319 | 1321 | ||
1320 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | 1322 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ |
1321 | | PR_FP_EXC_RES | PR_FP_EXC_INV) | 1323 | | PR_FP_EXC_RES | PR_FP_EXC_INV) |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 4e139f8a69ef..099f27e6d1b0 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -386,8 +386,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
386 | return 0; | 386 | return 0; |
387 | } | 387 | } |
388 | 388 | ||
389 | int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, | 389 | static int __init early_init_dt_scan_chosen_ppc(unsigned long node, |
390 | int depth, void *data) | 390 | const char *uname, |
391 | int depth, void *data) | ||
391 | { | 392 | { |
392 | const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */ | 393 | const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */ |
393 | 394 | ||
@@ -641,6 +642,10 @@ void __init early_init_devtree(void *params) | |||
641 | 642 | ||
642 | DBG(" -> early_init_devtree(%p)\n", params); | 643 | DBG(" -> early_init_devtree(%p)\n", params); |
643 | 644 | ||
645 | /* Too early to BUG_ON(), do it by hand */ | ||
646 | if (!early_init_dt_verify(params)) | ||
647 | panic("BUG: Failed verifying flat device tree, bad version?"); | ||
648 | |||
644 | /* Setup flat device-tree pointer */ | 649 | /* Setup flat device-tree pointer */ |
645 | initial_boot_params = params; | 650 | initial_boot_params = params; |
646 | 651 | ||
@@ -663,14 +668,12 @@ void __init early_init_devtree(void *params) | |||
663 | * device-tree, including the platform type, initrd location and | 668 | * device-tree, including the platform type, initrd location and |
664 | * size, TCE reserve, and more ... | 669 | * size, TCE reserve, and more ... |
665 | */ | 670 | */ |
666 | of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line); | 671 | of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line); |
667 | 672 | ||
668 | /* Scan memory nodes and rebuild MEMBLOCKs */ | 673 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
669 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 674 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
670 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); | 675 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); |
671 | 676 | ||
672 | /* Save command line for /proc/cmdline and then parse parameters */ | ||
673 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); | ||
674 | parse_early_param(); | 677 | parse_early_param(); |
675 | 678 | ||
676 | /* make sure we've parsed cmdline for mem= before this */ | 679 | /* make sure we've parsed cmdline for mem= before this */ |
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index fe8e54b9ef7d..12640f7e726b 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh | |||
@@ -50,24 +50,14 @@ do | |||
50 | done | 50 | done |
51 | 51 | ||
52 | # ignore register save/restore funcitons | 52 | # ignore register save/restore funcitons |
53 | if [ "${UNDEF:0:9}" = "_restgpr_" ]; then | 53 | case $UNDEF in |
54 | _restgpr_*|_restgpr0_*|_rest32gpr_*) | ||
54 | OK=1 | 55 | OK=1 |
55 | fi | 56 | ;; |
56 | if [ "${UNDEF:0:10}" = "_restgpr0_" ]; then | 57 | _savegpr_*|_savegpr0_*|_save32gpr_*) |
57 | OK=1 | ||
58 | fi | ||
59 | if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then | ||
60 | OK=1 | ||
61 | fi | ||
62 | if [ "${UNDEF:0:9}" = "_savegpr_" ]; then | ||
63 | OK=1 | 58 | OK=1 |
64 | fi | 59 | ;; |
65 | if [ "${UNDEF:0:10}" = "_savegpr0_" ]; then | 60 | esac |
66 | OK=1 | ||
67 | fi | ||
68 | if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then | ||
69 | OK=1 | ||
70 | fi | ||
71 | 61 | ||
72 | if [ $OK -eq 0 ]; then | 62 | if [ $OK -eq 0 ]; then |
73 | ERROR=1 | 63 | ERROR=1 |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 2e3d2bf536c5..cdb404ea3468 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -932,7 +932,7 @@ void ptrace_triggered(struct perf_event *bp, | |||
932 | } | 932 | } |
933 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 933 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
934 | 934 | ||
935 | int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | 935 | static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, |
936 | unsigned long data) | 936 | unsigned long data) |
937 | { | 937 | { |
938 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 938 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index e736387fee6a..5a2c049c1c61 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c | |||
@@ -286,7 +286,7 @@ static void prrn_work_fn(struct work_struct *work) | |||
286 | 286 | ||
287 | static DECLARE_WORK(prrn_work, prrn_work_fn); | 287 | static DECLARE_WORK(prrn_work, prrn_work_fn); |
288 | 288 | ||
289 | void prrn_schedule_update(u32 scope) | 289 | static void prrn_schedule_update(u32 scope) |
290 | { | 290 | { |
291 | flush_work(&prrn_work); | 291 | flush_work(&prrn_work); |
292 | prrn_update_scope = scope; | 292 | prrn_update_scope = scope; |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 1b0e26013a62..1362cd62b3fa 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -81,8 +81,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid); | |||
81 | 81 | ||
82 | unsigned long klimit = (unsigned long) _end; | 82 | unsigned long klimit = (unsigned long) _end; |
83 | 83 | ||
84 | char cmd_line[COMMAND_LINE_SIZE]; | ||
85 | |||
86 | /* | 84 | /* |
87 | * This still seems to be needed... -- paulus | 85 | * This still seems to be needed... -- paulus |
88 | */ | 86 | */ |
@@ -94,6 +92,9 @@ struct screen_info screen_info = { | |||
94 | .orig_video_isVGA = 1, | 92 | .orig_video_isVGA = 1, |
95 | .orig_video_points = 16 | 93 | .orig_video_points = 16 |
96 | }; | 94 | }; |
95 | #if defined(CONFIG_FB_VGA16_MODULE) | ||
96 | EXPORT_SYMBOL(screen_info); | ||
97 | #endif | ||
97 | 98 | ||
98 | /* Variables required to store legacy IO irq routing */ | 99 | /* Variables required to store legacy IO irq routing */ |
99 | int of_i8042_kbd_irq; | 100 | int of_i8042_kbd_irq; |
@@ -382,7 +383,7 @@ void __init check_for_initrd(void) | |||
382 | initrd_start = initrd_end = 0; | 383 | initrd_start = initrd_end = 0; |
383 | 384 | ||
384 | if (initrd_start) | 385 | if (initrd_start) |
385 | printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); | 386 | pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); |
386 | 387 | ||
387 | DBG(" <- check_for_initrd()\n"); | 388 | DBG(" <- check_for_initrd()\n"); |
388 | #endif /* CONFIG_BLK_DEV_INITRD */ | 389 | #endif /* CONFIG_BLK_DEV_INITRD */ |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index ea4fda60e57b..07831ed0d9ef 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -268,7 +268,7 @@ static void __init exc_lvl_early_init(void) | |||
268 | /* Warning, IO base is not yet inited */ | 268 | /* Warning, IO base is not yet inited */ |
269 | void __init setup_arch(char **cmdline_p) | 269 | void __init setup_arch(char **cmdline_p) |
270 | { | 270 | { |
271 | *cmdline_p = cmd_line; | 271 | *cmdline_p = boot_command_line; |
272 | 272 | ||
273 | /* so udelay does something sensible, assume <= 1000 bogomips */ | 273 | /* so udelay does something sensible, assume <= 1000 bogomips */ |
274 | loops_per_jiffy = 500000000 / HZ; | 274 | loops_per_jiffy = 500000000 / HZ; |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 75d62d63fe68..cd07d79ad21c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -525,21 +525,31 @@ void __init setup_system(void) | |||
525 | printk("Starting Linux PPC64 %s\n", init_utsname()->version); | 525 | printk("Starting Linux PPC64 %s\n", init_utsname()->version); |
526 | 526 | ||
527 | printk("-----------------------------------------------------\n"); | 527 | printk("-----------------------------------------------------\n"); |
528 | printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); | 528 | printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); |
529 | printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size()); | 529 | printk("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); |
530 | |||
530 | if (ppc64_caches.dline_size != 0x80) | 531 | if (ppc64_caches.dline_size != 0x80) |
531 | printk("ppc64_caches.dcache_line_size = 0x%x\n", | 532 | printk("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); |
532 | ppc64_caches.dline_size); | ||
533 | if (ppc64_caches.iline_size != 0x80) | 533 | if (ppc64_caches.iline_size != 0x80) |
534 | printk("ppc64_caches.icache_line_size = 0x%x\n", | 534 | printk("icache_line_size = 0x%x\n", ppc64_caches.iline_size); |
535 | ppc64_caches.iline_size); | 535 | |
536 | printk("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); | ||
537 | printk(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); | ||
538 | printk(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); | ||
539 | printk("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, | ||
540 | cur_cpu_spec->cpu_user_features2); | ||
541 | printk("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); | ||
542 | printk("firmware_features = 0x%016lx\n", powerpc_firmware_features); | ||
543 | |||
536 | #ifdef CONFIG_PPC_STD_MMU_64 | 544 | #ifdef CONFIG_PPC_STD_MMU_64 |
537 | if (htab_address) | 545 | if (htab_address) |
538 | printk("htab_address = 0x%p\n", htab_address); | 546 | printk("htab_address = 0x%p\n", htab_address); |
539 | printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); | 547 | |
540 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 548 | printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); |
549 | #endif | ||
550 | |||
541 | if (PHYSICAL_START > 0) | 551 | if (PHYSICAL_START > 0) |
542 | printk("physical_start = 0x%llx\n", | 552 | printk("physical_start = 0x%llx\n", |
543 | (unsigned long long)PHYSICAL_START); | 553 | (unsigned long long)PHYSICAL_START); |
544 | printk("-----------------------------------------------------\n"); | 554 | printk("-----------------------------------------------------\n"); |
545 | 555 | ||
@@ -657,7 +667,7 @@ void __init setup_arch(char **cmdline_p) | |||
657 | { | 667 | { |
658 | ppc64_boot_msg(0x12, "Setup Arch"); | 668 | ppc64_boot_msg(0x12, "Setup Arch"); |
659 | 669 | ||
660 | *cmdline_p = cmd_line; | 670 | *cmdline_p = boot_command_line; |
661 | 671 | ||
662 | /* | 672 | /* |
663 | * Set cache line size based on type of cpu as a default. | 673 | * Set cache line size based on type of cpu as a default. |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a0738af4aba6..71e186d5f331 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #endif | 52 | #endif |
53 | #include <asm/vdso.h> | 53 | #include <asm/vdso.h> |
54 | #include <asm/debug.h> | 54 | #include <asm/debug.h> |
55 | #include <asm/kexec.h> | ||
55 | 56 | ||
56 | #ifdef DEBUG | 57 | #ifdef DEBUG |
57 | #include <asm/udbg.h> | 58 | #include <asm/udbg.h> |
@@ -379,8 +380,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
379 | /* | 380 | /* |
380 | * numa_node_id() works after this. | 381 | * numa_node_id() works after this. |
381 | */ | 382 | */ |
382 | set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); | 383 | if (cpu_present(cpu)) { |
383 | set_cpu_numa_mem(cpu, local_memory_node(numa_cpu_lookup_table[cpu])); | 384 | set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); |
385 | set_cpu_numa_mem(cpu, | ||
386 | local_memory_node(numa_cpu_lookup_table[cpu])); | ||
387 | } | ||
384 | } | 388 | } |
385 | 389 | ||
386 | cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); | 390 | cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); |
@@ -728,6 +732,9 @@ void start_secondary(void *unused) | |||
728 | } | 732 | } |
729 | traverse_core_siblings(cpu, true); | 733 | traverse_core_siblings(cpu, true); |
730 | 734 | ||
735 | set_numa_node(numa_cpu_lookup_table[cpu]); | ||
736 | set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); | ||
737 | |||
731 | smp_wmb(); | 738 | smp_wmb(); |
732 | notify_cpu_starting(cpu); | 739 | notify_cpu_starting(cpu); |
733 | set_cpu_online(cpu, true); | 740 | set_cpu_online(cpu, true); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 368ab374d33c..7505599c2593 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -479,7 +479,7 @@ void arch_irq_work_raise(void) | |||
479 | 479 | ||
480 | #endif /* CONFIG_IRQ_WORK */ | 480 | #endif /* CONFIG_IRQ_WORK */ |
481 | 481 | ||
482 | void __timer_interrupt(void) | 482 | static void __timer_interrupt(void) |
483 | { | 483 | { |
484 | struct pt_regs *regs = get_irq_regs(); | 484 | struct pt_regs *regs = get_irq_regs(); |
485 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 485 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); |
@@ -643,7 +643,7 @@ static int __init get_freq(char *name, int cells, unsigned long *val) | |||
643 | return found; | 643 | return found; |
644 | } | 644 | } |
645 | 645 | ||
646 | void start_cpu_decrementer(void) | 646 | static void start_cpu_decrementer(void) |
647 | { | 647 | { |
648 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 648 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
649 | /* Clear any pending timer interrupts */ | 649 | /* Clear any pending timer interrupts */ |
@@ -1024,6 +1024,7 @@ void to_tm(int tim, struct rtc_time * tm) | |||
1024 | */ | 1024 | */ |
1025 | GregorianDay(tm); | 1025 | GregorianDay(tm); |
1026 | } | 1026 | } |
1027 | EXPORT_SYMBOL(to_tm); | ||
1027 | 1028 | ||
1028 | /* | 1029 | /* |
1029 | * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit | 1030 | * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit |
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 59fa2de9546d..9f342f134ae4 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile | |||
@@ -10,7 +10,7 @@ CFLAGS_REMOVE_code-patching.o = -pg | |||
10 | CFLAGS_REMOVE_feature-fixups.o = -pg | 10 | CFLAGS_REMOVE_feature-fixups.o = -pg |
11 | 11 | ||
12 | obj-y := string.o alloc.o \ | 12 | obj-y := string.o alloc.o \ |
13 | crtsavres.o | 13 | crtsavres.o ppc_ksyms.o |
14 | obj-$(CONFIG_PPC32) += div64.o copy_32.o | 14 | obj-$(CONFIG_PPC32) += div64.o copy_32.o |
15 | obj-$(CONFIG_HAS_IOMEM) += devres.o | 15 | obj-$(CONFIG_HAS_IOMEM) += devres.o |
16 | 16 | ||
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 7a8a7487cee8..7ce3870d7ddd 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
@@ -164,7 +164,7 @@ static long calc_offset(struct fixup_entry *entry, unsigned int *p) | |||
164 | return (unsigned long)p - (unsigned long)entry; | 164 | return (unsigned long)p - (unsigned long)entry; |
165 | } | 165 | } |
166 | 166 | ||
167 | void test_basic_patching(void) | 167 | static void test_basic_patching(void) |
168 | { | 168 | { |
169 | extern unsigned int ftr_fixup_test1; | 169 | extern unsigned int ftr_fixup_test1; |
170 | extern unsigned int end_ftr_fixup_test1; | 170 | extern unsigned int end_ftr_fixup_test1; |
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c new file mode 100644 index 000000000000..f993959647b5 --- /dev/null +++ b/arch/powerpc/lib/ppc_ksyms.c | |||
@@ -0,0 +1,39 @@ | |||
1 | #include <linux/string.h> | ||
2 | #include <linux/uaccess.h> | ||
3 | #include <linux/bitops.h> | ||
4 | #include <net/checksum.h> | ||
5 | |||
6 | EXPORT_SYMBOL(memcpy); | ||
7 | EXPORT_SYMBOL(memset); | ||
8 | EXPORT_SYMBOL(memmove); | ||
9 | EXPORT_SYMBOL(memcmp); | ||
10 | EXPORT_SYMBOL(memchr); | ||
11 | #ifdef CONFIG_PPC32 | ||
12 | EXPORT_SYMBOL(cacheable_memcpy); | ||
13 | EXPORT_SYMBOL(cacheable_memzero); | ||
14 | #endif | ||
15 | |||
16 | EXPORT_SYMBOL(strcpy); | ||
17 | EXPORT_SYMBOL(strncpy); | ||
18 | EXPORT_SYMBOL(strcat); | ||
19 | EXPORT_SYMBOL(strlen); | ||
20 | EXPORT_SYMBOL(strcmp); | ||
21 | EXPORT_SYMBOL(strncmp); | ||
22 | |||
23 | #ifndef CONFIG_GENERIC_CSUM | ||
24 | EXPORT_SYMBOL(csum_partial); | ||
25 | EXPORT_SYMBOL(csum_partial_copy_generic); | ||
26 | EXPORT_SYMBOL(ip_fast_csum); | ||
27 | EXPORT_SYMBOL(csum_tcpudp_magic); | ||
28 | #endif | ||
29 | |||
30 | EXPORT_SYMBOL(__copy_tofrom_user); | ||
31 | EXPORT_SYMBOL(__clear_user); | ||
32 | EXPORT_SYMBOL(copy_page); | ||
33 | |||
34 | #ifdef CONFIG_PPC64 | ||
35 | EXPORT_SYMBOL(__arch_hweight8); | ||
36 | EXPORT_SYMBOL(__arch_hweight16); | ||
37 | EXPORT_SYMBOL(__arch_hweight32); | ||
38 | EXPORT_SYMBOL(__arch_hweight64); | ||
39 | #endif | ||
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5c09f365c842..54651fc2d412 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
@@ -98,13 +98,8 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs | |||
98 | 98 | ||
99 | ra = (instr >> 16) & 0x1f; | 99 | ra = (instr >> 16) & 0x1f; |
100 | ea = (signed short) instr; /* sign-extend */ | 100 | ea = (signed short) instr; /* sign-extend */ |
101 | if (ra) { | 101 | if (ra) |
102 | ea += regs->gpr[ra]; | 102 | ea += regs->gpr[ra]; |
103 | if (instr & 0x04000000) { /* update forms */ | ||
104 | if ((instr>>26) != 47) /* stmw is not an update form */ | ||
105 | regs->gpr[ra] = ea; | ||
106 | } | ||
107 | } | ||
108 | 103 | ||
109 | return truncate_if_32bit(regs->msr, ea); | 104 | return truncate_if_32bit(regs->msr, ea); |
110 | } | 105 | } |
@@ -120,11 +115,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg | |||
120 | 115 | ||
121 | ra = (instr >> 16) & 0x1f; | 116 | ra = (instr >> 16) & 0x1f; |
122 | ea = (signed short) (instr & ~3); /* sign-extend */ | 117 | ea = (signed short) (instr & ~3); /* sign-extend */ |
123 | if (ra) { | 118 | if (ra) |
124 | ea += regs->gpr[ra]; | 119 | ea += regs->gpr[ra]; |
125 | if ((instr & 3) == 1) /* update forms */ | ||
126 | regs->gpr[ra] = ea; | ||
127 | } | ||
128 | 120 | ||
129 | return truncate_if_32bit(regs->msr, ea); | 121 | return truncate_if_32bit(regs->msr, ea); |
130 | } | 122 | } |
@@ -133,8 +125,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg | |||
133 | /* | 125 | /* |
134 | * Calculate effective address for an X-form instruction | 126 | * Calculate effective address for an X-form instruction |
135 | */ | 127 | */ |
136 | static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs, | 128 | static unsigned long __kprobes xform_ea(unsigned int instr, |
137 | int do_update) | 129 | struct pt_regs *regs) |
138 | { | 130 | { |
139 | int ra, rb; | 131 | int ra, rb; |
140 | unsigned long ea; | 132 | unsigned long ea; |
@@ -142,11 +134,8 @@ static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs | |||
142 | ra = (instr >> 16) & 0x1f; | 134 | ra = (instr >> 16) & 0x1f; |
143 | rb = (instr >> 11) & 0x1f; | 135 | rb = (instr >> 11) & 0x1f; |
144 | ea = regs->gpr[rb]; | 136 | ea = regs->gpr[rb]; |
145 | if (ra) { | 137 | if (ra) |
146 | ea += regs->gpr[ra]; | 138 | ea += regs->gpr[ra]; |
147 | if (do_update) /* update forms */ | ||
148 | regs->gpr[ra] = ea; | ||
149 | } | ||
150 | 139 | ||
151 | return truncate_if_32bit(regs->msr, ea); | 140 | return truncate_if_32bit(regs->msr, ea); |
152 | } | 141 | } |
@@ -611,6 +600,23 @@ static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1, | |||
611 | regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); | 600 | regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); |
612 | } | 601 | } |
613 | 602 | ||
603 | static int __kprobes trap_compare(long v1, long v2) | ||
604 | { | ||
605 | int ret = 0; | ||
606 | |||
607 | if (v1 < v2) | ||
608 | ret |= 0x10; | ||
609 | else if (v1 > v2) | ||
610 | ret |= 0x08; | ||
611 | else | ||
612 | ret |= 0x04; | ||
613 | if ((unsigned long)v1 < (unsigned long)v2) | ||
614 | ret |= 0x02; | ||
615 | else if ((unsigned long)v1 > (unsigned long)v2) | ||
616 | ret |= 0x01; | ||
617 | return ret; | ||
618 | } | ||
619 | |||
614 | /* | 620 | /* |
615 | * Elements of 32-bit rotate and mask instructions. | 621 | * Elements of 32-bit rotate and mask instructions. |
616 | */ | 622 | */ |
@@ -627,26 +633,27 @@ static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1, | |||
627 | #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) | 633 | #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) |
628 | 634 | ||
629 | /* | 635 | /* |
630 | * Emulate instructions that cause a transfer of control, | 636 | * Decode an instruction, and execute it if that can be done just by |
631 | * loads and stores, and a few other instructions. | 637 | * modifying *regs (i.e. integer arithmetic and logical instructions, |
632 | * Returns 1 if the step was emulated, 0 if not, | 638 | * branches, and barrier instructions). |
633 | * or -1 if the instruction is one that should not be stepped, | 639 | * Returns 1 if the instruction has been executed, or 0 if not. |
634 | * such as an rfid, or a mtmsrd that would clear MSR_RI. | 640 | * Sets *op to indicate what the instruction does. |
635 | */ | 641 | */ |
636 | int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | 642 | int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs, |
643 | unsigned int instr) | ||
637 | { | 644 | { |
638 | unsigned int opcode, ra, rb, rd, spr, u; | 645 | unsigned int opcode, ra, rb, rd, spr, u; |
639 | unsigned long int imm; | 646 | unsigned long int imm; |
640 | unsigned long int val, val2; | 647 | unsigned long int val, val2; |
641 | unsigned long int ea; | 648 | unsigned int mb, me, sh; |
642 | unsigned int cr, mb, me, sh; | ||
643 | int err; | ||
644 | unsigned long old_ra, val3; | ||
645 | long ival; | 649 | long ival; |
646 | 650 | ||
651 | op->type = COMPUTE; | ||
652 | |||
647 | opcode = instr >> 26; | 653 | opcode = instr >> 26; |
648 | switch (opcode) { | 654 | switch (opcode) { |
649 | case 16: /* bc */ | 655 | case 16: /* bc */ |
656 | op->type = BRANCH; | ||
650 | imm = (signed short)(instr & 0xfffc); | 657 | imm = (signed short)(instr & 0xfffc); |
651 | if ((instr & 2) == 0) | 658 | if ((instr & 2) == 0) |
652 | imm += regs->nip; | 659 | imm += regs->nip; |
@@ -659,26 +666,14 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
659 | return 1; | 666 | return 1; |
660 | #ifdef CONFIG_PPC64 | 667 | #ifdef CONFIG_PPC64 |
661 | case 17: /* sc */ | 668 | case 17: /* sc */ |
662 | /* | 669 | if ((instr & 0xfe2) == 2) |
663 | * N.B. this uses knowledge about how the syscall | 670 | op->type = SYSCALL; |
664 | * entry code works. If that is changed, this will | 671 | else |
665 | * need to be changed also. | 672 | op->type = UNKNOWN; |
666 | */ | 673 | return 0; |
667 | if (regs->gpr[0] == 0x1ebe && | ||
668 | cpu_has_feature(CPU_FTR_REAL_LE)) { | ||
669 | regs->msr ^= MSR_LE; | ||
670 | goto instr_done; | ||
671 | } | ||
672 | regs->gpr[9] = regs->gpr[13]; | ||
673 | regs->gpr[10] = MSR_KERNEL; | ||
674 | regs->gpr[11] = regs->nip + 4; | ||
675 | regs->gpr[12] = regs->msr & MSR_MASK; | ||
676 | regs->gpr[13] = (unsigned long) get_paca(); | ||
677 | regs->nip = (unsigned long) &system_call_common; | ||
678 | regs->msr = MSR_KERNEL; | ||
679 | return 1; | ||
680 | #endif | 674 | #endif |
681 | case 18: /* b */ | 675 | case 18: /* b */ |
676 | op->type = BRANCH; | ||
682 | imm = instr & 0x03fffffc; | 677 | imm = instr & 0x03fffffc; |
683 | if (imm & 0x02000000) | 678 | if (imm & 0x02000000) |
684 | imm -= 0x04000000; | 679 | imm -= 0x04000000; |
@@ -691,8 +686,16 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
691 | return 1; | 686 | return 1; |
692 | case 19: | 687 | case 19: |
693 | switch ((instr >> 1) & 0x3ff) { | 688 | switch ((instr >> 1) & 0x3ff) { |
689 | case 0: /* mcrf */ | ||
690 | rd = (instr >> 21) & 0x1c; | ||
691 | ra = (instr >> 16) & 0x1c; | ||
692 | val = (regs->ccr >> ra) & 0xf; | ||
693 | regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); | ||
694 | goto instr_done; | ||
695 | |||
694 | case 16: /* bclr */ | 696 | case 16: /* bclr */ |
695 | case 528: /* bcctr */ | 697 | case 528: /* bcctr */ |
698 | op->type = BRANCH; | ||
696 | imm = (instr & 0x400)? regs->ctr: regs->link; | 699 | imm = (instr & 0x400)? regs->ctr: regs->link; |
697 | regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); | 700 | regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); |
698 | imm = truncate_if_32bit(regs->msr, imm); | 701 | imm = truncate_if_32bit(regs->msr, imm); |
@@ -703,9 +706,13 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
703 | return 1; | 706 | return 1; |
704 | 707 | ||
705 | case 18: /* rfid, scary */ | 708 | case 18: /* rfid, scary */ |
706 | return -1; | 709 | if (regs->msr & MSR_PR) |
710 | goto priv; | ||
711 | op->type = RFI; | ||
712 | return 0; | ||
707 | 713 | ||
708 | case 150: /* isync */ | 714 | case 150: /* isync */ |
715 | op->type = BARRIER; | ||
709 | isync(); | 716 | isync(); |
710 | goto instr_done; | 717 | goto instr_done; |
711 | 718 | ||
@@ -731,6 +738,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
731 | case 31: | 738 | case 31: |
732 | switch ((instr >> 1) & 0x3ff) { | 739 | switch ((instr >> 1) & 0x3ff) { |
733 | case 598: /* sync */ | 740 | case 598: /* sync */ |
741 | op->type = BARRIER; | ||
734 | #ifdef __powerpc64__ | 742 | #ifdef __powerpc64__ |
735 | switch ((instr >> 21) & 3) { | 743 | switch ((instr >> 21) & 3) { |
736 | case 1: /* lwsync */ | 744 | case 1: /* lwsync */ |
@@ -745,6 +753,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
745 | goto instr_done; | 753 | goto instr_done; |
746 | 754 | ||
747 | case 854: /* eieio */ | 755 | case 854: /* eieio */ |
756 | op->type = BARRIER; | ||
748 | eieio(); | 757 | eieio(); |
749 | goto instr_done; | 758 | goto instr_done; |
750 | } | 759 | } |
@@ -760,6 +769,17 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
760 | rb = (instr >> 11) & 0x1f; | 769 | rb = (instr >> 11) & 0x1f; |
761 | 770 | ||
762 | switch (opcode) { | 771 | switch (opcode) { |
772 | #ifdef __powerpc64__ | ||
773 | case 2: /* tdi */ | ||
774 | if (rd & trap_compare(regs->gpr[ra], (short) instr)) | ||
775 | goto trap; | ||
776 | goto instr_done; | ||
777 | #endif | ||
778 | case 3: /* twi */ | ||
779 | if (rd & trap_compare((int)regs->gpr[ra], (short) instr)) | ||
780 | goto trap; | ||
781 | goto instr_done; | ||
782 | |||
763 | case 7: /* mulli */ | 783 | case 7: /* mulli */ |
764 | regs->gpr[rd] = regs->gpr[ra] * (short) instr; | 784 | regs->gpr[rd] = regs->gpr[ra] * (short) instr; |
765 | goto instr_done; | 785 | goto instr_done; |
@@ -908,35 +928,44 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
908 | 928 | ||
909 | case 31: | 929 | case 31: |
910 | switch ((instr >> 1) & 0x3ff) { | 930 | switch ((instr >> 1) & 0x3ff) { |
931 | case 4: /* tw */ | ||
932 | if (rd == 0x1f || | ||
933 | (rd & trap_compare((int)regs->gpr[ra], | ||
934 | (int)regs->gpr[rb]))) | ||
935 | goto trap; | ||
936 | goto instr_done; | ||
937 | #ifdef __powerpc64__ | ||
938 | case 68: /* td */ | ||
939 | if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) | ||
940 | goto trap; | ||
941 | goto instr_done; | ||
942 | #endif | ||
911 | case 83: /* mfmsr */ | 943 | case 83: /* mfmsr */ |
912 | if (regs->msr & MSR_PR) | 944 | if (regs->msr & MSR_PR) |
913 | break; | 945 | goto priv; |
914 | regs->gpr[rd] = regs->msr & MSR_MASK; | 946 | op->type = MFMSR; |
915 | goto instr_done; | 947 | op->reg = rd; |
948 | return 0; | ||
916 | case 146: /* mtmsr */ | 949 | case 146: /* mtmsr */ |
917 | if (regs->msr & MSR_PR) | 950 | if (regs->msr & MSR_PR) |
918 | break; | 951 | goto priv; |
919 | imm = regs->gpr[rd]; | 952 | op->type = MTMSR; |
920 | if ((imm & MSR_RI) == 0) | 953 | op->reg = rd; |
921 | /* can't step mtmsr that would clear MSR_RI */ | 954 | op->val = 0xffffffff & ~(MSR_ME | MSR_LE); |
922 | return -1; | 955 | return 0; |
923 | regs->msr = imm; | ||
924 | goto instr_done; | ||
925 | #ifdef CONFIG_PPC64 | 956 | #ifdef CONFIG_PPC64 |
926 | case 178: /* mtmsrd */ | 957 | case 178: /* mtmsrd */ |
927 | /* only MSR_EE and MSR_RI get changed if bit 15 set */ | ||
928 | /* mtmsrd doesn't change MSR_HV and MSR_ME */ | ||
929 | if (regs->msr & MSR_PR) | 958 | if (regs->msr & MSR_PR) |
930 | break; | 959 | goto priv; |
931 | imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; | 960 | op->type = MTMSR; |
932 | imm = (regs->msr & MSR_MASK & ~imm) | 961 | op->reg = rd; |
933 | | (regs->gpr[rd] & imm); | 962 | /* only MSR_EE and MSR_RI get changed if bit 15 set */ |
934 | if ((imm & MSR_RI) == 0) | 963 | /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ |
935 | /* can't step mtmsrd that would clear MSR_RI */ | 964 | imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL; |
936 | return -1; | 965 | op->val = imm; |
937 | regs->msr = imm; | 966 | return 0; |
938 | goto instr_done; | ||
939 | #endif | 967 | #endif |
968 | |||
940 | case 19: /* mfcr */ | 969 | case 19: /* mfcr */ |
941 | regs->gpr[rd] = regs->ccr; | 970 | regs->gpr[rd] = regs->ccr; |
942 | regs->gpr[rd] &= 0xffffffffUL; | 971 | regs->gpr[rd] &= 0xffffffffUL; |
@@ -954,33 +983,43 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
954 | goto instr_done; | 983 | goto instr_done; |
955 | 984 | ||
956 | case 339: /* mfspr */ | 985 | case 339: /* mfspr */ |
957 | spr = (instr >> 11) & 0x3ff; | 986 | spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); |
958 | switch (spr) { | 987 | switch (spr) { |
959 | case 0x20: /* mfxer */ | 988 | case SPRN_XER: /* mfxer */ |
960 | regs->gpr[rd] = regs->xer; | 989 | regs->gpr[rd] = regs->xer; |
961 | regs->gpr[rd] &= 0xffffffffUL; | 990 | regs->gpr[rd] &= 0xffffffffUL; |
962 | goto instr_done; | 991 | goto instr_done; |
963 | case 0x100: /* mflr */ | 992 | case SPRN_LR: /* mflr */ |
964 | regs->gpr[rd] = regs->link; | 993 | regs->gpr[rd] = regs->link; |
965 | goto instr_done; | 994 | goto instr_done; |
966 | case 0x120: /* mfctr */ | 995 | case SPRN_CTR: /* mfctr */ |
967 | regs->gpr[rd] = regs->ctr; | 996 | regs->gpr[rd] = regs->ctr; |
968 | goto instr_done; | 997 | goto instr_done; |
998 | default: | ||
999 | op->type = MFSPR; | ||
1000 | op->reg = rd; | ||
1001 | op->spr = spr; | ||
1002 | return 0; | ||
969 | } | 1003 | } |
970 | break; | 1004 | break; |
971 | 1005 | ||
972 | case 467: /* mtspr */ | 1006 | case 467: /* mtspr */ |
973 | spr = (instr >> 11) & 0x3ff; | 1007 | spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); |
974 | switch (spr) { | 1008 | switch (spr) { |
975 | case 0x20: /* mtxer */ | 1009 | case SPRN_XER: /* mtxer */ |
976 | regs->xer = (regs->gpr[rd] & 0xffffffffUL); | 1010 | regs->xer = (regs->gpr[rd] & 0xffffffffUL); |
977 | goto instr_done; | 1011 | goto instr_done; |
978 | case 0x100: /* mtlr */ | 1012 | case SPRN_LR: /* mtlr */ |
979 | regs->link = regs->gpr[rd]; | 1013 | regs->link = regs->gpr[rd]; |
980 | goto instr_done; | 1014 | goto instr_done; |
981 | case 0x120: /* mtctr */ | 1015 | case SPRN_CTR: /* mtctr */ |
982 | regs->ctr = regs->gpr[rd]; | 1016 | regs->ctr = regs->gpr[rd]; |
983 | goto instr_done; | 1017 | goto instr_done; |
1018 | default: | ||
1019 | op->type = MTSPR; | ||
1020 | op->val = regs->gpr[rd]; | ||
1021 | op->spr = spr; | ||
1022 | return 0; | ||
984 | } | 1023 | } |
985 | break; | 1024 | break; |
986 | 1025 | ||
@@ -1257,294 +1296,242 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1257 | * Cache instructions | 1296 | * Cache instructions |
1258 | */ | 1297 | */ |
1259 | case 54: /* dcbst */ | 1298 | case 54: /* dcbst */ |
1260 | ea = xform_ea(instr, regs, 0); | 1299 | op->type = MKOP(CACHEOP, DCBST, 0); |
1261 | if (!address_ok(regs, ea, 8)) | 1300 | op->ea = xform_ea(instr, regs); |
1262 | return 0; | 1301 | return 0; |
1263 | err = 0; | ||
1264 | __cacheop_user_asmx(ea, err, "dcbst"); | ||
1265 | if (err) | ||
1266 | return 0; | ||
1267 | goto instr_done; | ||
1268 | 1302 | ||
1269 | case 86: /* dcbf */ | 1303 | case 86: /* dcbf */ |
1270 | ea = xform_ea(instr, regs, 0); | 1304 | op->type = MKOP(CACHEOP, DCBF, 0); |
1271 | if (!address_ok(regs, ea, 8)) | 1305 | op->ea = xform_ea(instr, regs); |
1272 | return 0; | 1306 | return 0; |
1273 | err = 0; | ||
1274 | __cacheop_user_asmx(ea, err, "dcbf"); | ||
1275 | if (err) | ||
1276 | return 0; | ||
1277 | goto instr_done; | ||
1278 | 1307 | ||
1279 | case 246: /* dcbtst */ | 1308 | case 246: /* dcbtst */ |
1280 | if (rd == 0) { | 1309 | op->type = MKOP(CACHEOP, DCBTST, 0); |
1281 | ea = xform_ea(instr, regs, 0); | 1310 | op->ea = xform_ea(instr, regs); |
1282 | prefetchw((void *) ea); | 1311 | op->reg = rd; |
1283 | } | 1312 | return 0; |
1284 | goto instr_done; | ||
1285 | 1313 | ||
1286 | case 278: /* dcbt */ | 1314 | case 278: /* dcbt */ |
1287 | if (rd == 0) { | 1315 | op->type = MKOP(CACHEOP, DCBTST, 0); |
1288 | ea = xform_ea(instr, regs, 0); | 1316 | op->ea = xform_ea(instr, regs); |
1289 | prefetch((void *) ea); | 1317 | op->reg = rd; |
1290 | } | 1318 | return 0; |
1291 | goto instr_done; | ||
1292 | 1319 | ||
1320 | case 982: /* icbi */ | ||
1321 | op->type = MKOP(CACHEOP, ICBI, 0); | ||
1322 | op->ea = xform_ea(instr, regs); | ||
1323 | return 0; | ||
1293 | } | 1324 | } |
1294 | break; | 1325 | break; |
1295 | } | 1326 | } |
1296 | 1327 | ||
1297 | /* | 1328 | /* |
1298 | * Following cases are for loads and stores, so bail out | 1329 | * Loads and stores. |
1299 | * if we're in little-endian mode. | ||
1300 | */ | 1330 | */ |
1301 | if (regs->msr & MSR_LE) | 1331 | op->type = UNKNOWN; |
1302 | return 0; | 1332 | op->update_reg = ra; |
1303 | 1333 | op->reg = rd; | |
1304 | /* | 1334 | op->val = regs->gpr[rd]; |
1305 | * Save register RA in case it's an update form load or store | 1335 | u = (instr >> 20) & UPDATE; |
1306 | * and the access faults. | ||
1307 | */ | ||
1308 | old_ra = regs->gpr[ra]; | ||
1309 | 1336 | ||
1310 | switch (opcode) { | 1337 | switch (opcode) { |
1311 | case 31: | 1338 | case 31: |
1312 | u = instr & 0x40; | 1339 | u = instr & UPDATE; |
1340 | op->ea = xform_ea(instr, regs); | ||
1313 | switch ((instr >> 1) & 0x3ff) { | 1341 | switch ((instr >> 1) & 0x3ff) { |
1314 | case 20: /* lwarx */ | 1342 | case 20: /* lwarx */ |
1315 | ea = xform_ea(instr, regs, 0); | 1343 | op->type = MKOP(LARX, 0, 4); |
1316 | if (ea & 3) | 1344 | break; |
1317 | break; /* can't handle misaligned */ | ||
1318 | err = -EFAULT; | ||
1319 | if (!address_ok(regs, ea, 4)) | ||
1320 | goto ldst_done; | ||
1321 | err = 0; | ||
1322 | __get_user_asmx(val, ea, err, "lwarx"); | ||
1323 | if (!err) | ||
1324 | regs->gpr[rd] = val; | ||
1325 | goto ldst_done; | ||
1326 | 1345 | ||
1327 | case 150: /* stwcx. */ | 1346 | case 150: /* stwcx. */ |
1328 | ea = xform_ea(instr, regs, 0); | 1347 | op->type = MKOP(STCX, 0, 4); |
1329 | if (ea & 3) | 1348 | break; |
1330 | break; /* can't handle misaligned */ | ||
1331 | err = -EFAULT; | ||
1332 | if (!address_ok(regs, ea, 4)) | ||
1333 | goto ldst_done; | ||
1334 | err = 0; | ||
1335 | __put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr); | ||
1336 | if (!err) | ||
1337 | regs->ccr = (regs->ccr & 0x0fffffff) | | ||
1338 | (cr & 0xe0000000) | | ||
1339 | ((regs->xer >> 3) & 0x10000000); | ||
1340 | goto ldst_done; | ||
1341 | 1349 | ||
1342 | #ifdef __powerpc64__ | 1350 | #ifdef __powerpc64__ |
1343 | case 84: /* ldarx */ | 1351 | case 84: /* ldarx */ |
1344 | ea = xform_ea(instr, regs, 0); | 1352 | op->type = MKOP(LARX, 0, 8); |
1345 | if (ea & 7) | 1353 | break; |
1346 | break; /* can't handle misaligned */ | ||
1347 | err = -EFAULT; | ||
1348 | if (!address_ok(regs, ea, 8)) | ||
1349 | goto ldst_done; | ||
1350 | err = 0; | ||
1351 | __get_user_asmx(val, ea, err, "ldarx"); | ||
1352 | if (!err) | ||
1353 | regs->gpr[rd] = val; | ||
1354 | goto ldst_done; | ||
1355 | 1354 | ||
1356 | case 214: /* stdcx. */ | 1355 | case 214: /* stdcx. */ |
1357 | ea = xform_ea(instr, regs, 0); | 1356 | op->type = MKOP(STCX, 0, 8); |
1358 | if (ea & 7) | 1357 | break; |
1359 | break; /* can't handle misaligned */ | ||
1360 | err = -EFAULT; | ||
1361 | if (!address_ok(regs, ea, 8)) | ||
1362 | goto ldst_done; | ||
1363 | err = 0; | ||
1364 | __put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr); | ||
1365 | if (!err) | ||
1366 | regs->ccr = (regs->ccr & 0x0fffffff) | | ||
1367 | (cr & 0xe0000000) | | ||
1368 | ((regs->xer >> 3) & 0x10000000); | ||
1369 | goto ldst_done; | ||
1370 | 1358 | ||
1371 | case 21: /* ldx */ | 1359 | case 21: /* ldx */ |
1372 | case 53: /* ldux */ | 1360 | case 53: /* ldux */ |
1373 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 1361 | op->type = MKOP(LOAD, u, 8); |
1374 | 8, regs); | 1362 | break; |
1375 | goto ldst_done; | ||
1376 | #endif | 1363 | #endif |
1377 | 1364 | ||
1378 | case 23: /* lwzx */ | 1365 | case 23: /* lwzx */ |
1379 | case 55: /* lwzux */ | 1366 | case 55: /* lwzux */ |
1380 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 1367 | op->type = MKOP(LOAD, u, 4); |
1381 | 4, regs); | 1368 | break; |
1382 | goto ldst_done; | ||
1383 | 1369 | ||
1384 | case 87: /* lbzx */ | 1370 | case 87: /* lbzx */ |
1385 | case 119: /* lbzux */ | 1371 | case 119: /* lbzux */ |
1386 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 1372 | op->type = MKOP(LOAD, u, 1); |
1387 | 1, regs); | 1373 | break; |
1388 | goto ldst_done; | ||
1389 | 1374 | ||
1390 | #ifdef CONFIG_ALTIVEC | 1375 | #ifdef CONFIG_ALTIVEC |
1391 | case 103: /* lvx */ | 1376 | case 103: /* lvx */ |
1392 | case 359: /* lvxl */ | 1377 | case 359: /* lvxl */ |
1393 | if (!(regs->msr & MSR_VEC)) | 1378 | if (!(regs->msr & MSR_VEC)) |
1394 | break; | 1379 | goto vecunavail; |
1395 | ea = xform_ea(instr, regs, 0); | 1380 | op->type = MKOP(LOAD_VMX, 0, 16); |
1396 | err = do_vec_load(rd, do_lvx, ea, regs); | 1381 | break; |
1397 | goto ldst_done; | ||
1398 | 1382 | ||
1399 | case 231: /* stvx */ | 1383 | case 231: /* stvx */ |
1400 | case 487: /* stvxl */ | 1384 | case 487: /* stvxl */ |
1401 | if (!(regs->msr & MSR_VEC)) | 1385 | if (!(regs->msr & MSR_VEC)) |
1402 | break; | 1386 | goto vecunavail; |
1403 | ea = xform_ea(instr, regs, 0); | 1387 | op->type = MKOP(STORE_VMX, 0, 16); |
1404 | err = do_vec_store(rd, do_stvx, ea, regs); | 1388 | break; |
1405 | goto ldst_done; | ||
1406 | #endif /* CONFIG_ALTIVEC */ | 1389 | #endif /* CONFIG_ALTIVEC */ |
1407 | 1390 | ||
1408 | #ifdef __powerpc64__ | 1391 | #ifdef __powerpc64__ |
1409 | case 149: /* stdx */ | 1392 | case 149: /* stdx */ |
1410 | case 181: /* stdux */ | 1393 | case 181: /* stdux */ |
1411 | val = regs->gpr[rd]; | 1394 | op->type = MKOP(STORE, u, 8); |
1412 | err = write_mem(val, xform_ea(instr, regs, u), 8, regs); | 1395 | break; |
1413 | goto ldst_done; | ||
1414 | #endif | 1396 | #endif |
1415 | 1397 | ||
1416 | case 151: /* stwx */ | 1398 | case 151: /* stwx */ |
1417 | case 183: /* stwux */ | 1399 | case 183: /* stwux */ |
1418 | val = regs->gpr[rd]; | 1400 | op->type = MKOP(STORE, u, 4); |
1419 | err = write_mem(val, xform_ea(instr, regs, u), 4, regs); | 1401 | break; |
1420 | goto ldst_done; | ||
1421 | 1402 | ||
1422 | case 215: /* stbx */ | 1403 | case 215: /* stbx */ |
1423 | case 247: /* stbux */ | 1404 | case 247: /* stbux */ |
1424 | val = regs->gpr[rd]; | 1405 | op->type = MKOP(STORE, u, 1); |
1425 | err = write_mem(val, xform_ea(instr, regs, u), 1, regs); | 1406 | break; |
1426 | goto ldst_done; | ||
1427 | 1407 | ||
1428 | case 279: /* lhzx */ | 1408 | case 279: /* lhzx */ |
1429 | case 311: /* lhzux */ | 1409 | case 311: /* lhzux */ |
1430 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 1410 | op->type = MKOP(LOAD, u, 2); |
1431 | 2, regs); | 1411 | break; |
1432 | goto ldst_done; | ||
1433 | 1412 | ||
1434 | #ifdef __powerpc64__ | 1413 | #ifdef __powerpc64__ |
1435 | case 341: /* lwax */ | 1414 | case 341: /* lwax */ |
1436 | case 373: /* lwaux */ | 1415 | case 373: /* lwaux */ |
1437 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 1416 | op->type = MKOP(LOAD, SIGNEXT | u, 4); |
1438 | 4, regs); | 1417 | break; |
1439 | if (!err) | ||
1440 | regs->gpr[rd] = (signed int) regs->gpr[rd]; | ||
1441 | goto ldst_done; | ||
1442 | #endif | 1418 | #endif |
1443 | 1419 | ||
1444 | case 343: /* lhax */ | 1420 | case 343: /* lhax */ |
1445 | case 375: /* lhaux */ | 1421 | case 375: /* lhaux */ |
1446 | err = read_mem(®s->gpr[rd], xform_ea(instr, regs, u), | 1422 | op->type = MKOP(LOAD, SIGNEXT | u, 2); |
1447 | 2, regs); | 1423 | break; |
1448 | if (!err) | ||
1449 | regs->gpr[rd] = (signed short) regs->gpr[rd]; | ||
1450 | goto ldst_done; | ||
1451 | 1424 | ||
1452 | case 407: /* sthx */ | 1425 | case 407: /* sthx */ |
1453 | case 439: /* sthux */ | 1426 | case 439: /* sthux */ |
1454 | val = regs->gpr[rd]; | 1427 | op->type = MKOP(STORE, u, 2); |
1455 | err = write_mem(val, xform_ea(instr, regs, u), 2, regs); | 1428 | break; |
1456 | goto ldst_done; | ||
1457 | 1429 | ||
1458 | #ifdef __powerpc64__ | 1430 | #ifdef __powerpc64__ |
1459 | case 532: /* ldbrx */ | 1431 | case 532: /* ldbrx */ |
1460 | err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs); | 1432 | op->type = MKOP(LOAD, BYTEREV, 8); |
1461 | if (!err) | 1433 | break; |
1462 | regs->gpr[rd] = byterev_8(val); | ||
1463 | goto ldst_done; | ||
1464 | 1434 | ||
1465 | #endif | 1435 | #endif |
1436 | case 533: /* lswx */ | ||
1437 | op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); | ||
1438 | break; | ||
1466 | 1439 | ||
1467 | case 534: /* lwbrx */ | 1440 | case 534: /* lwbrx */ |
1468 | err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs); | 1441 | op->type = MKOP(LOAD, BYTEREV, 4); |
1469 | if (!err) | 1442 | break; |
1470 | regs->gpr[rd] = byterev_4(val); | 1443 | |
1471 | goto ldst_done; | 1444 | case 597: /* lswi */ |
1445 | if (rb == 0) | ||
1446 | rb = 32; /* # bytes to load */ | ||
1447 | op->type = MKOP(LOAD_MULTI, 0, rb); | ||
1448 | op->ea = 0; | ||
1449 | if (ra) | ||
1450 | op->ea = truncate_if_32bit(regs->msr, | ||
1451 | regs->gpr[ra]); | ||
1452 | break; | ||
1472 | 1453 | ||
1473 | #ifdef CONFIG_PPC_FPU | 1454 | #ifdef CONFIG_PPC_FPU |
1474 | case 535: /* lfsx */ | 1455 | case 535: /* lfsx */ |
1475 | case 567: /* lfsux */ | 1456 | case 567: /* lfsux */ |
1476 | if (!(regs->msr & MSR_FP)) | 1457 | if (!(regs->msr & MSR_FP)) |
1477 | break; | 1458 | goto fpunavail; |
1478 | ea = xform_ea(instr, regs, u); | 1459 | op->type = MKOP(LOAD_FP, u, 4); |
1479 | err = do_fp_load(rd, do_lfs, ea, 4, regs); | 1460 | break; |
1480 | goto ldst_done; | ||
1481 | 1461 | ||
1482 | case 599: /* lfdx */ | 1462 | case 599: /* lfdx */ |
1483 | case 631: /* lfdux */ | 1463 | case 631: /* lfdux */ |
1484 | if (!(regs->msr & MSR_FP)) | 1464 | if (!(regs->msr & MSR_FP)) |
1485 | break; | 1465 | goto fpunavail; |
1486 | ea = xform_ea(instr, regs, u); | 1466 | op->type = MKOP(LOAD_FP, u, 8); |
1487 | err = do_fp_load(rd, do_lfd, ea, 8, regs); | 1467 | break; |
1488 | goto ldst_done; | ||
1489 | 1468 | ||
1490 | case 663: /* stfsx */ | 1469 | case 663: /* stfsx */ |
1491 | case 695: /* stfsux */ | 1470 | case 695: /* stfsux */ |
1492 | if (!(regs->msr & MSR_FP)) | 1471 | if (!(regs->msr & MSR_FP)) |
1493 | break; | 1472 | goto fpunavail; |
1494 | ea = xform_ea(instr, regs, u); | 1473 | op->type = MKOP(STORE_FP, u, 4); |
1495 | err = do_fp_store(rd, do_stfs, ea, 4, regs); | 1474 | break; |
1496 | goto ldst_done; | ||
1497 | 1475 | ||
1498 | case 727: /* stfdx */ | 1476 | case 727: /* stfdx */ |
1499 | case 759: /* stfdux */ | 1477 | case 759: /* stfdux */ |
1500 | if (!(regs->msr & MSR_FP)) | 1478 | if (!(regs->msr & MSR_FP)) |
1501 | break; | 1479 | goto fpunavail; |
1502 | ea = xform_ea(instr, regs, u); | 1480 | op->type = MKOP(STORE_FP, u, 8); |
1503 | err = do_fp_store(rd, do_stfd, ea, 8, regs); | 1481 | break; |
1504 | goto ldst_done; | ||
1505 | #endif | 1482 | #endif |
1506 | 1483 | ||
1507 | #ifdef __powerpc64__ | 1484 | #ifdef __powerpc64__ |
1508 | case 660: /* stdbrx */ | 1485 | case 660: /* stdbrx */ |
1509 | val = byterev_8(regs->gpr[rd]); | 1486 | op->type = MKOP(STORE, BYTEREV, 8); |
1510 | err = write_mem(val, xform_ea(instr, regs, 0), 8, regs); | 1487 | op->val = byterev_8(regs->gpr[rd]); |
1511 | goto ldst_done; | 1488 | break; |
1512 | 1489 | ||
1513 | #endif | 1490 | #endif |
1491 | case 661: /* stswx */ | ||
1492 | op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); | ||
1493 | break; | ||
1494 | |||
1514 | case 662: /* stwbrx */ | 1495 | case 662: /* stwbrx */ |
1515 | val = byterev_4(regs->gpr[rd]); | 1496 | op->type = MKOP(STORE, BYTEREV, 4); |
1516 | err = write_mem(val, xform_ea(instr, regs, 0), 4, regs); | 1497 | op->val = byterev_4(regs->gpr[rd]); |
1517 | goto ldst_done; | 1498 | break; |
1499 | |||
1500 | case 725: | ||
1501 | if (rb == 0) | ||
1502 | rb = 32; /* # bytes to store */ | ||
1503 | op->type = MKOP(STORE_MULTI, 0, rb); | ||
1504 | op->ea = 0; | ||
1505 | if (ra) | ||
1506 | op->ea = truncate_if_32bit(regs->msr, | ||
1507 | regs->gpr[ra]); | ||
1508 | break; | ||
1518 | 1509 | ||
1519 | case 790: /* lhbrx */ | 1510 | case 790: /* lhbrx */ |
1520 | err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs); | 1511 | op->type = MKOP(LOAD, BYTEREV, 2); |
1521 | if (!err) | 1512 | break; |
1522 | regs->gpr[rd] = byterev_2(val); | ||
1523 | goto ldst_done; | ||
1524 | 1513 | ||
1525 | case 918: /* sthbrx */ | 1514 | case 918: /* sthbrx */ |
1526 | val = byterev_2(regs->gpr[rd]); | 1515 | op->type = MKOP(STORE, BYTEREV, 2); |
1527 | err = write_mem(val, xform_ea(instr, regs, 0), 2, regs); | 1516 | op->val = byterev_2(regs->gpr[rd]); |
1528 | goto ldst_done; | 1517 | break; |
1529 | 1518 | ||
1530 | #ifdef CONFIG_VSX | 1519 | #ifdef CONFIG_VSX |
1531 | case 844: /* lxvd2x */ | 1520 | case 844: /* lxvd2x */ |
1532 | case 876: /* lxvd2ux */ | 1521 | case 876: /* lxvd2ux */ |
1533 | if (!(regs->msr & MSR_VSX)) | 1522 | if (!(regs->msr & MSR_VSX)) |
1534 | break; | 1523 | goto vsxunavail; |
1535 | rd |= (instr & 1) << 5; | 1524 | op->reg = rd | ((instr & 1) << 5); |
1536 | ea = xform_ea(instr, regs, u); | 1525 | op->type = MKOP(LOAD_VSX, u, 16); |
1537 | err = do_vsx_load(rd, do_lxvd2x, ea, regs); | 1526 | break; |
1538 | goto ldst_done; | ||
1539 | 1527 | ||
1540 | case 972: /* stxvd2x */ | 1528 | case 972: /* stxvd2x */ |
1541 | case 1004: /* stxvd2ux */ | 1529 | case 1004: /* stxvd2ux */ |
1542 | if (!(regs->msr & MSR_VSX)) | 1530 | if (!(regs->msr & MSR_VSX)) |
1543 | break; | 1531 | goto vsxunavail; |
1544 | rd |= (instr & 1) << 5; | 1532 | op->reg = rd | ((instr & 1) << 5); |
1545 | ea = xform_ea(instr, regs, u); | 1533 | op->type = MKOP(STORE_VSX, u, 16); |
1546 | err = do_vsx_store(rd, do_stxvd2x, ea, regs); | 1534 | break; |
1547 | goto ldst_done; | ||
1548 | 1535 | ||
1549 | #endif /* CONFIG_VSX */ | 1536 | #endif /* CONFIG_VSX */ |
1550 | } | 1537 | } |
@@ -1552,178 +1539,123 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1552 | 1539 | ||
1553 | case 32: /* lwz */ | 1540 | case 32: /* lwz */ |
1554 | case 33: /* lwzu */ | 1541 | case 33: /* lwzu */ |
1555 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 4, regs); | 1542 | op->type = MKOP(LOAD, u, 4); |
1556 | goto ldst_done; | 1543 | op->ea = dform_ea(instr, regs); |
1544 | break; | ||
1557 | 1545 | ||
1558 | case 34: /* lbz */ | 1546 | case 34: /* lbz */ |
1559 | case 35: /* lbzu */ | 1547 | case 35: /* lbzu */ |
1560 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 1, regs); | 1548 | op->type = MKOP(LOAD, u, 1); |
1561 | goto ldst_done; | 1549 | op->ea = dform_ea(instr, regs); |
1550 | break; | ||
1562 | 1551 | ||
1563 | case 36: /* stw */ | 1552 | case 36: /* stw */ |
1564 | val = regs->gpr[rd]; | ||
1565 | err = write_mem(val, dform_ea(instr, regs), 4, regs); | ||
1566 | goto ldst_done; | ||
1567 | |||
1568 | case 37: /* stwu */ | 1553 | case 37: /* stwu */ |
1569 | val = regs->gpr[rd]; | 1554 | op->type = MKOP(STORE, u, 4); |
1570 | val3 = dform_ea(instr, regs); | 1555 | op->ea = dform_ea(instr, regs); |
1571 | /* | 1556 | break; |
1572 | * For PPC32 we always use stwu to change stack point with r1. So | ||
1573 | * this emulated store may corrupt the exception frame, now we | ||
1574 | * have to provide the exception frame trampoline, which is pushed | ||
1575 | * below the kprobed function stack. So we only update gpr[1] but | ||
1576 | * don't emulate the real store operation. We will do real store | ||
1577 | * operation safely in exception return code by checking this flag. | ||
1578 | */ | ||
1579 | if ((ra == 1) && !(regs->msr & MSR_PR) \ | ||
1580 | && (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) { | ||
1581 | #ifdef CONFIG_PPC32 | ||
1582 | /* | ||
1583 | * Check if we will touch kernel sack overflow | ||
1584 | */ | ||
1585 | if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) { | ||
1586 | printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n"); | ||
1587 | err = -EINVAL; | ||
1588 | break; | ||
1589 | } | ||
1590 | #endif /* CONFIG_PPC32 */ | ||
1591 | /* | ||
1592 | * Check if we already set since that means we'll | ||
1593 | * lose the previous value. | ||
1594 | */ | ||
1595 | WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); | ||
1596 | set_thread_flag(TIF_EMULATE_STACK_STORE); | ||
1597 | err = 0; | ||
1598 | } else | ||
1599 | err = write_mem(val, val3, 4, regs); | ||
1600 | goto ldst_done; | ||
1601 | 1557 | ||
1602 | case 38: /* stb */ | 1558 | case 38: /* stb */ |
1603 | case 39: /* stbu */ | 1559 | case 39: /* stbu */ |
1604 | val = regs->gpr[rd]; | 1560 | op->type = MKOP(STORE, u, 1); |
1605 | err = write_mem(val, dform_ea(instr, regs), 1, regs); | 1561 | op->ea = dform_ea(instr, regs); |
1606 | goto ldst_done; | 1562 | break; |
1607 | 1563 | ||
1608 | case 40: /* lhz */ | 1564 | case 40: /* lhz */ |
1609 | case 41: /* lhzu */ | 1565 | case 41: /* lhzu */ |
1610 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 2, regs); | 1566 | op->type = MKOP(LOAD, u, 2); |
1611 | goto ldst_done; | 1567 | op->ea = dform_ea(instr, regs); |
1568 | break; | ||
1612 | 1569 | ||
1613 | case 42: /* lha */ | 1570 | case 42: /* lha */ |
1614 | case 43: /* lhau */ | 1571 | case 43: /* lhau */ |
1615 | err = read_mem(®s->gpr[rd], dform_ea(instr, regs), 2, regs); | 1572 | op->type = MKOP(LOAD, SIGNEXT | u, 2); |
1616 | if (!err) | 1573 | op->ea = dform_ea(instr, regs); |
1617 | regs->gpr[rd] = (signed short) regs->gpr[rd]; | 1574 | break; |
1618 | goto ldst_done; | ||
1619 | 1575 | ||
1620 | case 44: /* sth */ | 1576 | case 44: /* sth */ |
1621 | case 45: /* sthu */ | 1577 | case 45: /* sthu */ |
1622 | val = regs->gpr[rd]; | 1578 | op->type = MKOP(STORE, u, 2); |
1623 | err = write_mem(val, dform_ea(instr, regs), 2, regs); | 1579 | op->ea = dform_ea(instr, regs); |
1624 | goto ldst_done; | 1580 | break; |
1625 | 1581 | ||
1626 | case 46: /* lmw */ | 1582 | case 46: /* lmw */ |
1627 | ra = (instr >> 16) & 0x1f; | ||
1628 | if (ra >= rd) | 1583 | if (ra >= rd) |
1629 | break; /* invalid form, ra in range to load */ | 1584 | break; /* invalid form, ra in range to load */ |
1630 | ea = dform_ea(instr, regs); | 1585 | op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); |
1631 | do { | 1586 | op->ea = dform_ea(instr, regs); |
1632 | err = read_mem(®s->gpr[rd], ea, 4, regs); | 1587 | break; |
1633 | if (err) | ||
1634 | return 0; | ||
1635 | ea += 4; | ||
1636 | } while (++rd < 32); | ||
1637 | goto instr_done; | ||
1638 | 1588 | ||
1639 | case 47: /* stmw */ | 1589 | case 47: /* stmw */ |
1640 | ea = dform_ea(instr, regs); | 1590 | op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); |
1641 | do { | 1591 | op->ea = dform_ea(instr, regs); |
1642 | err = write_mem(regs->gpr[rd], ea, 4, regs); | 1592 | break; |
1643 | if (err) | ||
1644 | return 0; | ||
1645 | ea += 4; | ||
1646 | } while (++rd < 32); | ||
1647 | goto instr_done; | ||
1648 | 1593 | ||
1649 | #ifdef CONFIG_PPC_FPU | 1594 | #ifdef CONFIG_PPC_FPU |
1650 | case 48: /* lfs */ | 1595 | case 48: /* lfs */ |
1651 | case 49: /* lfsu */ | 1596 | case 49: /* lfsu */ |
1652 | if (!(regs->msr & MSR_FP)) | 1597 | if (!(regs->msr & MSR_FP)) |
1653 | break; | 1598 | goto fpunavail; |
1654 | ea = dform_ea(instr, regs); | 1599 | op->type = MKOP(LOAD_FP, u, 4); |
1655 | err = do_fp_load(rd, do_lfs, ea, 4, regs); | 1600 | op->ea = dform_ea(instr, regs); |
1656 | goto ldst_done; | 1601 | break; |
1657 | 1602 | ||
1658 | case 50: /* lfd */ | 1603 | case 50: /* lfd */ |
1659 | case 51: /* lfdu */ | 1604 | case 51: /* lfdu */ |
1660 | if (!(regs->msr & MSR_FP)) | 1605 | if (!(regs->msr & MSR_FP)) |
1661 | break; | 1606 | goto fpunavail; |
1662 | ea = dform_ea(instr, regs); | 1607 | op->type = MKOP(LOAD_FP, u, 8); |
1663 | err = do_fp_load(rd, do_lfd, ea, 8, regs); | 1608 | op->ea = dform_ea(instr, regs); |
1664 | goto ldst_done; | 1609 | break; |
1665 | 1610 | ||
1666 | case 52: /* stfs */ | 1611 | case 52: /* stfs */ |
1667 | case 53: /* stfsu */ | 1612 | case 53: /* stfsu */ |
1668 | if (!(regs->msr & MSR_FP)) | 1613 | if (!(regs->msr & MSR_FP)) |
1669 | break; | 1614 | goto fpunavail; |
1670 | ea = dform_ea(instr, regs); | 1615 | op->type = MKOP(STORE_FP, u, 4); |
1671 | err = do_fp_store(rd, do_stfs, ea, 4, regs); | 1616 | op->ea = dform_ea(instr, regs); |
1672 | goto ldst_done; | 1617 | break; |
1673 | 1618 | ||
1674 | case 54: /* stfd */ | 1619 | case 54: /* stfd */ |
1675 | case 55: /* stfdu */ | 1620 | case 55: /* stfdu */ |
1676 | if (!(regs->msr & MSR_FP)) | 1621 | if (!(regs->msr & MSR_FP)) |
1677 | break; | 1622 | goto fpunavail; |
1678 | ea = dform_ea(instr, regs); | 1623 | op->type = MKOP(STORE_FP, u, 8); |
1679 | err = do_fp_store(rd, do_stfd, ea, 8, regs); | 1624 | op->ea = dform_ea(instr, regs); |
1680 | goto ldst_done; | 1625 | break; |
1681 | #endif | 1626 | #endif |
1682 | 1627 | ||
1683 | #ifdef __powerpc64__ | 1628 | #ifdef __powerpc64__ |
1684 | case 58: /* ld[u], lwa */ | 1629 | case 58: /* ld[u], lwa */ |
1630 | op->ea = dsform_ea(instr, regs); | ||
1685 | switch (instr & 3) { | 1631 | switch (instr & 3) { |
1686 | case 0: /* ld */ | 1632 | case 0: /* ld */ |
1687 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | 1633 | op->type = MKOP(LOAD, 0, 8); |
1688 | 8, regs); | 1634 | break; |
1689 | goto ldst_done; | ||
1690 | case 1: /* ldu */ | 1635 | case 1: /* ldu */ |
1691 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | 1636 | op->type = MKOP(LOAD, UPDATE, 8); |
1692 | 8, regs); | 1637 | break; |
1693 | goto ldst_done; | ||
1694 | case 2: /* lwa */ | 1638 | case 2: /* lwa */ |
1695 | err = read_mem(®s->gpr[rd], dsform_ea(instr, regs), | 1639 | op->type = MKOP(LOAD, SIGNEXT, 4); |
1696 | 4, regs); | 1640 | break; |
1697 | if (!err) | ||
1698 | regs->gpr[rd] = (signed int) regs->gpr[rd]; | ||
1699 | goto ldst_done; | ||
1700 | } | 1641 | } |
1701 | break; | 1642 | break; |
1702 | 1643 | ||
1703 | case 62: /* std[u] */ | 1644 | case 62: /* std[u] */ |
1704 | val = regs->gpr[rd]; | 1645 | op->ea = dsform_ea(instr, regs); |
1705 | switch (instr & 3) { | 1646 | switch (instr & 3) { |
1706 | case 0: /* std */ | 1647 | case 0: /* std */ |
1707 | err = write_mem(val, dsform_ea(instr, regs), 8, regs); | 1648 | op->type = MKOP(STORE, 0, 8); |
1708 | goto ldst_done; | 1649 | break; |
1709 | case 1: /* stdu */ | 1650 | case 1: /* stdu */ |
1710 | err = write_mem(val, dsform_ea(instr, regs), 8, regs); | 1651 | op->type = MKOP(STORE, UPDATE, 8); |
1711 | goto ldst_done; | 1652 | break; |
1712 | } | 1653 | } |
1713 | break; | 1654 | break; |
1714 | #endif /* __powerpc64__ */ | 1655 | #endif /* __powerpc64__ */ |
1715 | 1656 | ||
1716 | } | 1657 | } |
1717 | err = -EINVAL; | 1658 | return 0; |
1718 | |||
1719 | ldst_done: | ||
1720 | if (err) { | ||
1721 | regs->gpr[ra] = old_ra; | ||
1722 | return 0; /* invoke DSI if -EFAULT? */ | ||
1723 | } | ||
1724 | instr_done: | ||
1725 | regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); | ||
1726 | return 1; | ||
1727 | 1659 | ||
1728 | logical_done: | 1660 | logical_done: |
1729 | if (instr & 1) | 1661 | if (instr & 1) |
@@ -1733,5 +1665,349 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1733 | arith_done: | 1665 | arith_done: |
1734 | if (instr & 1) | 1666 | if (instr & 1) |
1735 | set_cr0(regs, rd); | 1667 | set_cr0(regs, rd); |
1736 | goto instr_done; | 1668 | |
1669 | instr_done: | ||
1670 | regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); | ||
1671 | return 1; | ||
1672 | |||
1673 | priv: | ||
1674 | op->type = INTERRUPT | 0x700; | ||
1675 | op->val = SRR1_PROGPRIV; | ||
1676 | return 0; | ||
1677 | |||
1678 | trap: | ||
1679 | op->type = INTERRUPT | 0x700; | ||
1680 | op->val = SRR1_PROGTRAP; | ||
1681 | return 0; | ||
1682 | |||
1683 | #ifdef CONFIG_PPC_FPU | ||
1684 | fpunavail: | ||
1685 | op->type = INTERRUPT | 0x800; | ||
1686 | return 0; | ||
1687 | #endif | ||
1688 | |||
1689 | #ifdef CONFIG_ALTIVEC | ||
1690 | vecunavail: | ||
1691 | op->type = INTERRUPT | 0xf20; | ||
1692 | return 0; | ||
1693 | #endif | ||
1694 | |||
1695 | #ifdef CONFIG_VSX | ||
1696 | vsxunavail: | ||
1697 | op->type = INTERRUPT | 0xf40; | ||
1698 | return 0; | ||
1699 | #endif | ||
1700 | } | ||
1701 | EXPORT_SYMBOL_GPL(analyse_instr); | ||
1702 | |||
1703 | /* | ||
1704 | * For PPC32 we always use stwu with r1 to change the stack pointer. | ||
1705 | * So this emulated store may corrupt the exception frame, now we | ||
1706 | * have to provide the exception frame trampoline, which is pushed | ||
1707 | * below the kprobed function stack. So we only update gpr[1] but | ||
1708 | * don't emulate the real store operation. We will do real store | ||
1709 | * operation safely in exception return code by checking this flag. | ||
1710 | */ | ||
1711 | static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs) | ||
1712 | { | ||
1713 | #ifdef CONFIG_PPC32 | ||
1714 | /* | ||
1715 | * Check if we will touch kernel stack overflow | ||
1716 | */ | ||
1717 | if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) { | ||
1718 | printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n"); | ||
1719 | return -EINVAL; | ||
1720 | } | ||
1721 | #endif /* CONFIG_PPC32 */ | ||
1722 | /* | ||
1723 | * Check if we already set since that means we'll | ||
1724 | * lose the previous value. | ||
1725 | */ | ||
1726 | WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); | ||
1727 | set_thread_flag(TIF_EMULATE_STACK_STORE); | ||
1728 | return 0; | ||
1729 | } | ||
1730 | |||
1731 | static __kprobes void do_signext(unsigned long *valp, int size) | ||
1732 | { | ||
1733 | switch (size) { | ||
1734 | case 2: | ||
1735 | *valp = (signed short) *valp; | ||
1736 | break; | ||
1737 | case 4: | ||
1738 | *valp = (signed int) *valp; | ||
1739 | break; | ||
1740 | } | ||
1741 | } | ||
1742 | |||
1743 | static __kprobes void do_byterev(unsigned long *valp, int size) | ||
1744 | { | ||
1745 | switch (size) { | ||
1746 | case 2: | ||
1747 | *valp = byterev_2(*valp); | ||
1748 | break; | ||
1749 | case 4: | ||
1750 | *valp = byterev_4(*valp); | ||
1751 | break; | ||
1752 | #ifdef __powerpc64__ | ||
1753 | case 8: | ||
1754 | *valp = byterev_8(*valp); | ||
1755 | break; | ||
1756 | #endif | ||
1757 | } | ||
1758 | } | ||
1759 | |||
1760 | /* | ||
1761 | * Emulate instructions that cause a transfer of control, | ||
1762 | * loads and stores, and a few other instructions. | ||
1763 | * Returns 1 if the step was emulated, 0 if not, | ||
1764 | * or -1 if the instruction is one that should not be stepped, | ||
1765 | * such as an rfid, or a mtmsrd that would clear MSR_RI. | ||
1766 | */ | ||
1767 | int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | ||
1768 | { | ||
1769 | struct instruction_op op; | ||
1770 | int r, err, size; | ||
1771 | unsigned long val; | ||
1772 | unsigned int cr; | ||
1773 | int i, rd, nb; | ||
1774 | |||
1775 | r = analyse_instr(&op, regs, instr); | ||
1776 | if (r != 0) | ||
1777 | return r; | ||
1778 | |||
1779 | err = 0; | ||
1780 | size = GETSIZE(op.type); | ||
1781 | switch (op.type & INSTR_TYPE_MASK) { | ||
1782 | case CACHEOP: | ||
1783 | if (!address_ok(regs, op.ea, 8)) | ||
1784 | return 0; | ||
1785 | switch (op.type & CACHEOP_MASK) { | ||
1786 | case DCBST: | ||
1787 | __cacheop_user_asmx(op.ea, err, "dcbst"); | ||
1788 | break; | ||
1789 | case DCBF: | ||
1790 | __cacheop_user_asmx(op.ea, err, "dcbf"); | ||
1791 | break; | ||
1792 | case DCBTST: | ||
1793 | if (op.reg == 0) | ||
1794 | prefetchw((void *) op.ea); | ||
1795 | break; | ||
1796 | case DCBT: | ||
1797 | if (op.reg == 0) | ||
1798 | prefetch((void *) op.ea); | ||
1799 | break; | ||
1800 | case ICBI: | ||
1801 | __cacheop_user_asmx(op.ea, err, "icbi"); | ||
1802 | break; | ||
1803 | } | ||
1804 | if (err) | ||
1805 | return 0; | ||
1806 | goto instr_done; | ||
1807 | |||
1808 | case LARX: | ||
1809 | if (regs->msr & MSR_LE) | ||
1810 | return 0; | ||
1811 | if (op.ea & (size - 1)) | ||
1812 | break; /* can't handle misaligned */ | ||
1813 | err = -EFAULT; | ||
1814 | if (!address_ok(regs, op.ea, size)) | ||
1815 | goto ldst_done; | ||
1816 | err = 0; | ||
1817 | switch (size) { | ||
1818 | case 4: | ||
1819 | __get_user_asmx(val, op.ea, err, "lwarx"); | ||
1820 | break; | ||
1821 | case 8: | ||
1822 | __get_user_asmx(val, op.ea, err, "ldarx"); | ||
1823 | break; | ||
1824 | default: | ||
1825 | return 0; | ||
1826 | } | ||
1827 | if (!err) | ||
1828 | regs->gpr[op.reg] = val; | ||
1829 | goto ldst_done; | ||
1830 | |||
1831 | case STCX: | ||
1832 | if (regs->msr & MSR_LE) | ||
1833 | return 0; | ||
1834 | if (op.ea & (size - 1)) | ||
1835 | break; /* can't handle misaligned */ | ||
1836 | err = -EFAULT; | ||
1837 | if (!address_ok(regs, op.ea, size)) | ||
1838 | goto ldst_done; | ||
1839 | err = 0; | ||
1840 | switch (size) { | ||
1841 | case 4: | ||
1842 | __put_user_asmx(op.val, op.ea, err, "stwcx.", cr); | ||
1843 | break; | ||
1844 | case 8: | ||
1845 | __put_user_asmx(op.val, op.ea, err, "stdcx.", cr); | ||
1846 | break; | ||
1847 | default: | ||
1848 | return 0; | ||
1849 | } | ||
1850 | if (!err) | ||
1851 | regs->ccr = (regs->ccr & 0x0fffffff) | | ||
1852 | (cr & 0xe0000000) | | ||
1853 | ((regs->xer >> 3) & 0x10000000); | ||
1854 | goto ldst_done; | ||
1855 | |||
1856 | case LOAD: | ||
1857 | if (regs->msr & MSR_LE) | ||
1858 | return 0; | ||
1859 | err = read_mem(®s->gpr[op.reg], op.ea, size, regs); | ||
1860 | if (!err) { | ||
1861 | if (op.type & SIGNEXT) | ||
1862 | do_signext(®s->gpr[op.reg], size); | ||
1863 | if (op.type & BYTEREV) | ||
1864 | do_byterev(®s->gpr[op.reg], size); | ||
1865 | } | ||
1866 | goto ldst_done; | ||
1867 | |||
1868 | case LOAD_FP: | ||
1869 | if (regs->msr & MSR_LE) | ||
1870 | return 0; | ||
1871 | if (size == 4) | ||
1872 | err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); | ||
1873 | else | ||
1874 | err = do_fp_load(op.reg, do_lfd, op.ea, size, regs); | ||
1875 | goto ldst_done; | ||
1876 | |||
1877 | #ifdef CONFIG_ALTIVEC | ||
1878 | case LOAD_VMX: | ||
1879 | if (regs->msr & MSR_LE) | ||
1880 | return 0; | ||
1881 | err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); | ||
1882 | goto ldst_done; | ||
1883 | #endif | ||
1884 | #ifdef CONFIG_VSX | ||
1885 | case LOAD_VSX: | ||
1886 | if (regs->msr & MSR_LE) | ||
1887 | return 0; | ||
1888 | err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); | ||
1889 | goto ldst_done; | ||
1890 | #endif | ||
1891 | case LOAD_MULTI: | ||
1892 | if (regs->msr & MSR_LE) | ||
1893 | return 0; | ||
1894 | rd = op.reg; | ||
1895 | for (i = 0; i < size; i += 4) { | ||
1896 | nb = size - i; | ||
1897 | if (nb > 4) | ||
1898 | nb = 4; | ||
1899 | err = read_mem(®s->gpr[rd], op.ea, nb, regs); | ||
1900 | if (err) | ||
1901 | return 0; | ||
1902 | if (nb < 4) /* left-justify last bytes */ | ||
1903 | regs->gpr[rd] <<= 32 - 8 * nb; | ||
1904 | op.ea += 4; | ||
1905 | ++rd; | ||
1906 | } | ||
1907 | goto instr_done; | ||
1908 | |||
1909 | case STORE: | ||
1910 | if (regs->msr & MSR_LE) | ||
1911 | return 0; | ||
1912 | if ((op.type & UPDATE) && size == sizeof(long) && | ||
1913 | op.reg == 1 && op.update_reg == 1 && | ||
1914 | !(regs->msr & MSR_PR) && | ||
1915 | op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { | ||
1916 | err = handle_stack_update(op.ea, regs); | ||
1917 | goto ldst_done; | ||
1918 | } | ||
1919 | err = write_mem(op.val, op.ea, size, regs); | ||
1920 | goto ldst_done; | ||
1921 | |||
1922 | case STORE_FP: | ||
1923 | if (regs->msr & MSR_LE) | ||
1924 | return 0; | ||
1925 | if (size == 4) | ||
1926 | err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); | ||
1927 | else | ||
1928 | err = do_fp_store(op.reg, do_stfd, op.ea, size, regs); | ||
1929 | goto ldst_done; | ||
1930 | |||
1931 | #ifdef CONFIG_ALTIVEC | ||
1932 | case STORE_VMX: | ||
1933 | if (regs->msr & MSR_LE) | ||
1934 | return 0; | ||
1935 | err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); | ||
1936 | goto ldst_done; | ||
1937 | #endif | ||
1938 | #ifdef CONFIG_VSX | ||
1939 | case STORE_VSX: | ||
1940 | if (regs->msr & MSR_LE) | ||
1941 | return 0; | ||
1942 | err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); | ||
1943 | goto ldst_done; | ||
1944 | #endif | ||
1945 | case STORE_MULTI: | ||
1946 | if (regs->msr & MSR_LE) | ||
1947 | return 0; | ||
1948 | rd = op.reg; | ||
1949 | for (i = 0; i < size; i += 4) { | ||
1950 | val = regs->gpr[rd]; | ||
1951 | nb = size - i; | ||
1952 | if (nb > 4) | ||
1953 | nb = 4; | ||
1954 | else | ||
1955 | val >>= 32 - 8 * nb; | ||
1956 | err = write_mem(val, op.ea, nb, regs); | ||
1957 | if (err) | ||
1958 | return 0; | ||
1959 | op.ea += 4; | ||
1960 | ++rd; | ||
1961 | } | ||
1962 | goto instr_done; | ||
1963 | |||
1964 | case MFMSR: | ||
1965 | regs->gpr[op.reg] = regs->msr & MSR_MASK; | ||
1966 | goto instr_done; | ||
1967 | |||
1968 | case MTMSR: | ||
1969 | val = regs->gpr[op.reg]; | ||
1970 | if ((val & MSR_RI) == 0) | ||
1971 | /* can't step mtmsr[d] that would clear MSR_RI */ | ||
1972 | return -1; | ||
1973 | /* here op.val is the mask of bits to change */ | ||
1974 | regs->msr = (regs->msr & ~op.val) | (val & op.val); | ||
1975 | goto instr_done; | ||
1976 | |||
1977 | #ifdef CONFIG_PPC64 | ||
1978 | case SYSCALL: /* sc */ | ||
1979 | /* | ||
1980 | * N.B. this uses knowledge about how the syscall | ||
1981 | * entry code works. If that is changed, this will | ||
1982 | * need to be changed also. | ||
1983 | */ | ||
1984 | if (regs->gpr[0] == 0x1ebe && | ||
1985 | cpu_has_feature(CPU_FTR_REAL_LE)) { | ||
1986 | regs->msr ^= MSR_LE; | ||
1987 | goto instr_done; | ||
1988 | } | ||
1989 | regs->gpr[9] = regs->gpr[13]; | ||
1990 | regs->gpr[10] = MSR_KERNEL; | ||
1991 | regs->gpr[11] = regs->nip + 4; | ||
1992 | regs->gpr[12] = regs->msr & MSR_MASK; | ||
1993 | regs->gpr[13] = (unsigned long) get_paca(); | ||
1994 | regs->nip = (unsigned long) &system_call_common; | ||
1995 | regs->msr = MSR_KERNEL; | ||
1996 | return 1; | ||
1997 | |||
1998 | case RFI: | ||
1999 | return -1; | ||
2000 | #endif | ||
2001 | } | ||
2002 | return 0; | ||
2003 | |||
2004 | ldst_done: | ||
2005 | if (err) | ||
2006 | return 0; | ||
2007 | if (op.type & UPDATE) | ||
2008 | regs->gpr[op.update_reg] = op.ea; | ||
2009 | |||
2010 | instr_done: | ||
2011 | regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); | ||
2012 | return 1; | ||
1737 | } | 2013 | } |
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index d0130fff20e5..325e861616a1 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -34,3 +34,4 @@ obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o | |||
34 | obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o | 34 | obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o |
35 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o | 35 | obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o |
36 | obj-$(CONFIG_HIGHMEM) += highmem.o | 36 | obj-$(CONFIG_HIGHMEM) += highmem.o |
37 | obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o | ||
diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/mm/copro_fault.c index 641e7273d75a..0f9939e693df 100644 --- a/arch/powerpc/platforms/cell/spu_fault.c +++ b/arch/powerpc/mm/copro_fault.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * SPU mm fault handler | 2 | * CoProcessor (SPU/AFU) mm fault handler |
3 | * | 3 | * |
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2007 | 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2007 |
5 | * | 5 | * |
@@ -23,16 +23,17 @@ | |||
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
25 | #include <linux/export.h> | 25 | #include <linux/export.h> |
26 | 26 | #include <asm/reg.h> | |
27 | #include <asm/copro.h> | ||
27 | #include <asm/spu.h> | 28 | #include <asm/spu.h> |
28 | #include <asm/spu_csa.h> | 29 | #include <misc/cxl.h> |
29 | 30 | ||
30 | /* | 31 | /* |
31 | * This ought to be kept in sync with the powerpc specific do_page_fault | 32 | * This ought to be kept in sync with the powerpc specific do_page_fault |
32 | * function. Currently, there are a few corner cases that we haven't had | 33 | * function. Currently, there are a few corner cases that we haven't had |
33 | * to handle fortunately. | 34 | * to handle fortunately. |
34 | */ | 35 | */ |
35 | int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | 36 | int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, |
36 | unsigned long dsisr, unsigned *flt) | 37 | unsigned long dsisr, unsigned *flt) |
37 | { | 38 | { |
38 | struct vm_area_struct *vma; | 39 | struct vm_area_struct *vma; |
@@ -58,12 +59,12 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | |||
58 | goto out_unlock; | 59 | goto out_unlock; |
59 | } | 60 | } |
60 | 61 | ||
61 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; | 62 | is_write = dsisr & DSISR_ISSTORE; |
62 | if (is_write) { | 63 | if (is_write) { |
63 | if (!(vma->vm_flags & VM_WRITE)) | 64 | if (!(vma->vm_flags & VM_WRITE)) |
64 | goto out_unlock; | 65 | goto out_unlock; |
65 | } else { | 66 | } else { |
66 | if (dsisr & MFC_DSISR_ACCESS_DENIED) | 67 | if (dsisr & DSISR_PROTFAULT) |
67 | goto out_unlock; | 68 | goto out_unlock; |
68 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | 69 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
69 | goto out_unlock; | 70 | goto out_unlock; |
@@ -91,4 +92,58 @@ out_unlock: | |||
91 | up_read(&mm->mmap_sem); | 92 | up_read(&mm->mmap_sem); |
92 | return ret; | 93 | return ret; |
93 | } | 94 | } |
94 | EXPORT_SYMBOL_GPL(spu_handle_mm_fault); | 95 | EXPORT_SYMBOL_GPL(copro_handle_mm_fault); |
96 | |||
97 | int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) | ||
98 | { | ||
99 | u64 vsid; | ||
100 | int psize, ssize; | ||
101 | |||
102 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; | ||
103 | |||
104 | switch (REGION_ID(ea)) { | ||
105 | case USER_REGION_ID: | ||
106 | pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); | ||
107 | psize = get_slice_psize(mm, ea); | ||
108 | ssize = user_segment_size(ea); | ||
109 | vsid = get_vsid(mm->context.id, ea, ssize); | ||
110 | break; | ||
111 | case VMALLOC_REGION_ID: | ||
112 | pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea); | ||
113 | if (ea < VMALLOC_END) | ||
114 | psize = mmu_vmalloc_psize; | ||
115 | else | ||
116 | psize = mmu_io_psize; | ||
117 | ssize = mmu_kernel_ssize; | ||
118 | vsid = get_kernel_vsid(ea, mmu_kernel_ssize); | ||
119 | break; | ||
120 | case KERNEL_REGION_ID: | ||
121 | pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea); | ||
122 | psize = mmu_linear_psize; | ||
123 | ssize = mmu_kernel_ssize; | ||
124 | vsid = get_kernel_vsid(ea, mmu_kernel_ssize); | ||
125 | break; | ||
126 | default: | ||
127 | pr_debug("%s: invalid region access at %016llx\n", __func__, ea); | ||
128 | return 1; | ||
129 | } | ||
130 | |||
131 | vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER; | ||
132 | |||
133 | vsid |= mmu_psize_defs[psize].sllp | | ||
134 | ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); | ||
135 | |||
136 | slb->vsid = vsid; | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | EXPORT_SYMBOL_GPL(copro_calculate_slb); | ||
141 | |||
142 | void copro_flush_all_slbs(struct mm_struct *mm) | ||
143 | { | ||
144 | #ifdef CONFIG_SPU_BASE | ||
145 | spu_flush_all_slbs(mm); | ||
146 | #endif | ||
147 | cxl_slbia(mm); | ||
148 | } | ||
149 | EXPORT_SYMBOL_GPL(copro_flush_all_slbs); | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 51ab9e7e6c39..24b3f4949df4 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/magic.h> | 33 | #include <linux/magic.h> |
34 | #include <linux/ratelimit.h> | 34 | #include <linux/ratelimit.h> |
35 | #include <linux/context_tracking.h> | 35 | #include <linux/context_tracking.h> |
36 | #include <linux/hugetlb.h> | ||
36 | 37 | ||
37 | #include <asm/firmware.h> | 38 | #include <asm/firmware.h> |
38 | #include <asm/page.h> | 39 | #include <asm/page.h> |
@@ -114,22 +115,37 @@ static int store_updates_sp(struct pt_regs *regs) | |||
114 | #define MM_FAULT_CONTINUE -1 | 115 | #define MM_FAULT_CONTINUE -1 |
115 | #define MM_FAULT_ERR(sig) (sig) | 116 | #define MM_FAULT_ERR(sig) (sig) |
116 | 117 | ||
117 | static int do_sigbus(struct pt_regs *regs, unsigned long address) | 118 | static int do_sigbus(struct pt_regs *regs, unsigned long address, |
119 | unsigned int fault) | ||
118 | { | 120 | { |
119 | siginfo_t info; | 121 | siginfo_t info; |
122 | unsigned int lsb = 0; | ||
120 | 123 | ||
121 | up_read(¤t->mm->mmap_sem); | 124 | up_read(¤t->mm->mmap_sem); |
122 | 125 | ||
123 | if (user_mode(regs)) { | 126 | if (!user_mode(regs)) |
124 | current->thread.trap_nr = BUS_ADRERR; | 127 | return MM_FAULT_ERR(SIGBUS); |
125 | info.si_signo = SIGBUS; | 128 | |
126 | info.si_errno = 0; | 129 | current->thread.trap_nr = BUS_ADRERR; |
127 | info.si_code = BUS_ADRERR; | 130 | info.si_signo = SIGBUS; |
128 | info.si_addr = (void __user *)address; | 131 | info.si_errno = 0; |
129 | force_sig_info(SIGBUS, &info, current); | 132 | info.si_code = BUS_ADRERR; |
130 | return MM_FAULT_RETURN; | 133 | info.si_addr = (void __user *)address; |
134 | #ifdef CONFIG_MEMORY_FAILURE | ||
135 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { | ||
136 | pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", | ||
137 | current->comm, current->pid, address); | ||
138 | info.si_code = BUS_MCEERR_AR; | ||
131 | } | 139 | } |
132 | return MM_FAULT_ERR(SIGBUS); | 140 | |
141 | if (fault & VM_FAULT_HWPOISON_LARGE) | ||
142 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | ||
143 | if (fault & VM_FAULT_HWPOISON) | ||
144 | lsb = PAGE_SHIFT; | ||
145 | #endif | ||
146 | info.si_addr_lsb = lsb; | ||
147 | force_sig_info(SIGBUS, &info, current); | ||
148 | return MM_FAULT_RETURN; | ||
133 | } | 149 | } |
134 | 150 | ||
135 | static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) | 151 | static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) |
@@ -170,11 +186,8 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) | |||
170 | return MM_FAULT_RETURN; | 186 | return MM_FAULT_RETURN; |
171 | } | 187 | } |
172 | 188 | ||
173 | /* Bus error. x86 handles HWPOISON here, we'll add this if/when | 189 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) |
174 | * we support the feature in HW | 190 | return do_sigbus(regs, addr, fault); |
175 | */ | ||
176 | if (fault & VM_FAULT_SIGBUS) | ||
177 | return do_sigbus(regs, addr); | ||
178 | 191 | ||
179 | /* We don't understand the fault code, this is fatal */ | 192 | /* We don't understand the fault code, this is fatal */ |
180 | BUG(); | 193 | BUG(); |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index afc0a8295f84..ae4962a06476 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <asm/kexec.h> | 29 | #include <asm/kexec.h> |
30 | #include <asm/ppc-opcode.h> | 30 | #include <asm/ppc-opcode.h> |
31 | 31 | ||
32 | #include <misc/cxl.h> | ||
33 | |||
32 | #ifdef DEBUG_LOW | 34 | #ifdef DEBUG_LOW |
33 | #define DBG_LOW(fmt...) udbg_printf(fmt) | 35 | #define DBG_LOW(fmt...) udbg_printf(fmt) |
34 | #else | 36 | #else |
@@ -149,9 +151,11 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) | |||
149 | static inline void tlbie(unsigned long vpn, int psize, int apsize, | 151 | static inline void tlbie(unsigned long vpn, int psize, int apsize, |
150 | int ssize, int local) | 152 | int ssize, int local) |
151 | { | 153 | { |
152 | unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); | 154 | unsigned int use_local; |
153 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | 155 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
154 | 156 | ||
157 | use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); | ||
158 | |||
155 | if (use_local) | 159 | if (use_local) |
156 | use_local = mmu_psize_defs[psize].tlbiel; | 160 | use_local = mmu_psize_defs[psize].tlbiel; |
157 | if (lock_tlbie && !use_local) | 161 | if (lock_tlbie && !use_local) |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index daee7f4e5a14..d5339a3b9945 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include <asm/cacheflush.h> | 51 | #include <asm/cacheflush.h> |
52 | #include <asm/cputable.h> | 52 | #include <asm/cputable.h> |
53 | #include <asm/sections.h> | 53 | #include <asm/sections.h> |
54 | #include <asm/spu.h> | 54 | #include <asm/copro.h> |
55 | #include <asm/udbg.h> | 55 | #include <asm/udbg.h> |
56 | #include <asm/code-patching.h> | 56 | #include <asm/code-patching.h> |
57 | #include <asm/fadump.h> | 57 | #include <asm/fadump.h> |
@@ -92,12 +92,14 @@ extern unsigned long dart_tablebase; | |||
92 | 92 | ||
93 | static unsigned long _SDR1; | 93 | static unsigned long _SDR1; |
94 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | 94 | struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; |
95 | EXPORT_SYMBOL_GPL(mmu_psize_defs); | ||
95 | 96 | ||
96 | struct hash_pte *htab_address; | 97 | struct hash_pte *htab_address; |
97 | unsigned long htab_size_bytes; | 98 | unsigned long htab_size_bytes; |
98 | unsigned long htab_hash_mask; | 99 | unsigned long htab_hash_mask; |
99 | EXPORT_SYMBOL_GPL(htab_hash_mask); | 100 | EXPORT_SYMBOL_GPL(htab_hash_mask); |
100 | int mmu_linear_psize = MMU_PAGE_4K; | 101 | int mmu_linear_psize = MMU_PAGE_4K; |
102 | EXPORT_SYMBOL_GPL(mmu_linear_psize); | ||
101 | int mmu_virtual_psize = MMU_PAGE_4K; | 103 | int mmu_virtual_psize = MMU_PAGE_4K; |
102 | int mmu_vmalloc_psize = MMU_PAGE_4K; | 104 | int mmu_vmalloc_psize = MMU_PAGE_4K; |
103 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 105 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
@@ -105,6 +107,7 @@ int mmu_vmemmap_psize = MMU_PAGE_4K; | |||
105 | #endif | 107 | #endif |
106 | int mmu_io_psize = MMU_PAGE_4K; | 108 | int mmu_io_psize = MMU_PAGE_4K; |
107 | int mmu_kernel_ssize = MMU_SEGSIZE_256M; | 109 | int mmu_kernel_ssize = MMU_SEGSIZE_256M; |
110 | EXPORT_SYMBOL_GPL(mmu_kernel_ssize); | ||
108 | int mmu_highuser_ssize = MMU_SEGSIZE_256M; | 111 | int mmu_highuser_ssize = MMU_SEGSIZE_256M; |
109 | u16 mmu_slb_size = 64; | 112 | u16 mmu_slb_size = 64; |
110 | EXPORT_SYMBOL_GPL(mmu_slb_size); | 113 | EXPORT_SYMBOL_GPL(mmu_slb_size); |
@@ -333,70 +336,69 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, | |||
333 | return 0; | 336 | return 0; |
334 | 337 | ||
335 | prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); | 338 | prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); |
336 | if (prop != NULL) { | 339 | if (!prop) |
337 | pr_info("Page sizes from device-tree:\n"); | 340 | return 0; |
338 | size /= 4; | 341 | |
339 | cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); | 342 | pr_info("Page sizes from device-tree:\n"); |
340 | while(size > 0) { | 343 | size /= 4; |
341 | unsigned int base_shift = be32_to_cpu(prop[0]); | 344 | cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); |
342 | unsigned int slbenc = be32_to_cpu(prop[1]); | 345 | while(size > 0) { |
343 | unsigned int lpnum = be32_to_cpu(prop[2]); | 346 | unsigned int base_shift = be32_to_cpu(prop[0]); |
344 | struct mmu_psize_def *def; | 347 | unsigned int slbenc = be32_to_cpu(prop[1]); |
345 | int idx, base_idx; | 348 | unsigned int lpnum = be32_to_cpu(prop[2]); |
346 | 349 | struct mmu_psize_def *def; | |
347 | size -= 3; prop += 3; | 350 | int idx, base_idx; |
348 | base_idx = get_idx_from_shift(base_shift); | 351 | |
349 | if (base_idx < 0) { | 352 | size -= 3; prop += 3; |
350 | /* | 353 | base_idx = get_idx_from_shift(base_shift); |
351 | * skip the pte encoding also | 354 | if (base_idx < 0) { |
352 | */ | 355 | /* skip the pte encoding also */ |
353 | prop += lpnum * 2; size -= lpnum * 2; | 356 | prop += lpnum * 2; size -= lpnum * 2; |
357 | continue; | ||
358 | } | ||
359 | def = &mmu_psize_defs[base_idx]; | ||
360 | if (base_idx == MMU_PAGE_16M) | ||
361 | cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; | ||
362 | |||
363 | def->shift = base_shift; | ||
364 | if (base_shift <= 23) | ||
365 | def->avpnm = 0; | ||
366 | else | ||
367 | def->avpnm = (1 << (base_shift - 23)) - 1; | ||
368 | def->sllp = slbenc; | ||
369 | /* | ||
370 | * We don't know for sure what's up with tlbiel, so | ||
371 | * for now we only set it for 4K and 64K pages | ||
372 | */ | ||
373 | if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K) | ||
374 | def->tlbiel = 1; | ||
375 | else | ||
376 | def->tlbiel = 0; | ||
377 | |||
378 | while (size > 0 && lpnum) { | ||
379 | unsigned int shift = be32_to_cpu(prop[0]); | ||
380 | int penc = be32_to_cpu(prop[1]); | ||
381 | |||
382 | prop += 2; size -= 2; | ||
383 | lpnum--; | ||
384 | |||
385 | idx = get_idx_from_shift(shift); | ||
386 | if (idx < 0) | ||
354 | continue; | 387 | continue; |
355 | } | 388 | |
356 | def = &mmu_psize_defs[base_idx]; | 389 | if (penc == -1) |
357 | if (base_idx == MMU_PAGE_16M) | 390 | pr_err("Invalid penc for base_shift=%d " |
358 | cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; | 391 | "shift=%d\n", base_shift, shift); |
359 | 392 | ||
360 | def->shift = base_shift; | 393 | def->penc[idx] = penc; |
361 | if (base_shift <= 23) | 394 | pr_info("base_shift=%d: shift=%d, sllp=0x%04lx," |
362 | def->avpnm = 0; | 395 | " avpnm=0x%08lx, tlbiel=%d, penc=%d\n", |
363 | else | 396 | base_shift, shift, def->sllp, |
364 | def->avpnm = (1 << (base_shift - 23)) - 1; | 397 | def->avpnm, def->tlbiel, def->penc[idx]); |
365 | def->sllp = slbenc; | ||
366 | /* | ||
367 | * We don't know for sure what's up with tlbiel, so | ||
368 | * for now we only set it for 4K and 64K pages | ||
369 | */ | ||
370 | if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K) | ||
371 | def->tlbiel = 1; | ||
372 | else | ||
373 | def->tlbiel = 0; | ||
374 | |||
375 | while (size > 0 && lpnum) { | ||
376 | unsigned int shift = be32_to_cpu(prop[0]); | ||
377 | int penc = be32_to_cpu(prop[1]); | ||
378 | |||
379 | prop += 2; size -= 2; | ||
380 | lpnum--; | ||
381 | |||
382 | idx = get_idx_from_shift(shift); | ||
383 | if (idx < 0) | ||
384 | continue; | ||
385 | |||
386 | if (penc == -1) | ||
387 | pr_err("Invalid penc for base_shift=%d " | ||
388 | "shift=%d\n", base_shift, shift); | ||
389 | |||
390 | def->penc[idx] = penc; | ||
391 | pr_info("base_shift=%d: shift=%d, sllp=0x%04lx," | ||
392 | " avpnm=0x%08lx, tlbiel=%d, penc=%d\n", | ||
393 | base_shift, shift, def->sllp, | ||
394 | def->avpnm, def->tlbiel, def->penc[idx]); | ||
395 | } | ||
396 | } | 398 | } |
397 | return 1; | ||
398 | } | 399 | } |
399 | return 0; | 400 | |
401 | return 1; | ||
400 | } | 402 | } |
401 | 403 | ||
402 | #ifdef CONFIG_HUGETLB_PAGE | 404 | #ifdef CONFIG_HUGETLB_PAGE |
@@ -867,7 +869,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) | |||
867 | } | 869 | } |
868 | 870 | ||
869 | #ifdef CONFIG_PPC_MM_SLICES | 871 | #ifdef CONFIG_PPC_MM_SLICES |
870 | unsigned int get_paca_psize(unsigned long addr) | 872 | static unsigned int get_paca_psize(unsigned long addr) |
871 | { | 873 | { |
872 | u64 lpsizes; | 874 | u64 lpsizes; |
873 | unsigned char *hpsizes; | 875 | unsigned char *hpsizes; |
@@ -901,10 +903,8 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) | |||
901 | if (get_slice_psize(mm, addr) == MMU_PAGE_4K) | 903 | if (get_slice_psize(mm, addr) == MMU_PAGE_4K) |
902 | return; | 904 | return; |
903 | slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); | 905 | slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); |
904 | #ifdef CONFIG_SPU_BASE | 906 | copro_flush_all_slbs(mm); |
905 | spu_flush_all_slbs(mm); | 907 | if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { |
906 | #endif | ||
907 | if (get_paca_psize(addr) != MMU_PAGE_4K) { | ||
908 | get_paca()->context = mm->context; | 908 | get_paca()->context = mm->context; |
909 | slb_flush_and_rebolt(); | 909 | slb_flush_and_rebolt(); |
910 | } | 910 | } |
@@ -989,12 +989,11 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm, | |||
989 | * -1 - critical hash insertion error | 989 | * -1 - critical hash insertion error |
990 | * -2 - access not permitted by subpage protection mechanism | 990 | * -2 - access not permitted by subpage protection mechanism |
991 | */ | 991 | */ |
992 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | 992 | int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap) |
993 | { | 993 | { |
994 | enum ctx_state prev_state = exception_enter(); | 994 | enum ctx_state prev_state = exception_enter(); |
995 | pgd_t *pgdir; | 995 | pgd_t *pgdir; |
996 | unsigned long vsid; | 996 | unsigned long vsid; |
997 | struct mm_struct *mm; | ||
998 | pte_t *ptep; | 997 | pte_t *ptep; |
999 | unsigned hugeshift; | 998 | unsigned hugeshift; |
1000 | const struct cpumask *tmp; | 999 | const struct cpumask *tmp; |
@@ -1008,7 +1007,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
1008 | switch (REGION_ID(ea)) { | 1007 | switch (REGION_ID(ea)) { |
1009 | case USER_REGION_ID: | 1008 | case USER_REGION_ID: |
1010 | user_region = 1; | 1009 | user_region = 1; |
1011 | mm = current->mm; | ||
1012 | if (! mm) { | 1010 | if (! mm) { |
1013 | DBG_LOW(" user region with no mm !\n"); | 1011 | DBG_LOW(" user region with no mm !\n"); |
1014 | rc = 1; | 1012 | rc = 1; |
@@ -1019,7 +1017,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
1019 | vsid = get_vsid(mm->context.id, ea, ssize); | 1017 | vsid = get_vsid(mm->context.id, ea, ssize); |
1020 | break; | 1018 | break; |
1021 | case VMALLOC_REGION_ID: | 1019 | case VMALLOC_REGION_ID: |
1022 | mm = &init_mm; | ||
1023 | vsid = get_kernel_vsid(ea, mmu_kernel_ssize); | 1020 | vsid = get_kernel_vsid(ea, mmu_kernel_ssize); |
1024 | if (ea < VMALLOC_END) | 1021 | if (ea < VMALLOC_END) |
1025 | psize = mmu_vmalloc_psize; | 1022 | psize = mmu_vmalloc_psize; |
@@ -1104,7 +1101,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
1104 | WARN_ON(1); | 1101 | WARN_ON(1); |
1105 | } | 1102 | } |
1106 | #endif | 1103 | #endif |
1107 | check_paca_psize(ea, mm, psize, user_region); | 1104 | if (current->mm == mm) |
1105 | check_paca_psize(ea, mm, psize, user_region); | ||
1108 | 1106 | ||
1109 | goto bail; | 1107 | goto bail; |
1110 | } | 1108 | } |
@@ -1141,13 +1139,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
1141 | "to 4kB pages because of " | 1139 | "to 4kB pages because of " |
1142 | "non-cacheable mapping\n"); | 1140 | "non-cacheable mapping\n"); |
1143 | psize = mmu_vmalloc_psize = MMU_PAGE_4K; | 1141 | psize = mmu_vmalloc_psize = MMU_PAGE_4K; |
1144 | #ifdef CONFIG_SPU_BASE | 1142 | copro_flush_all_slbs(mm); |
1145 | spu_flush_all_slbs(mm); | ||
1146 | #endif | ||
1147 | } | 1143 | } |
1148 | } | 1144 | } |
1149 | 1145 | ||
1150 | check_paca_psize(ea, mm, psize, user_region); | 1146 | if (current->mm == mm) |
1147 | check_paca_psize(ea, mm, psize, user_region); | ||
1151 | #endif /* CONFIG_PPC_64K_PAGES */ | 1148 | #endif /* CONFIG_PPC_64K_PAGES */ |
1152 | 1149 | ||
1153 | #ifdef CONFIG_PPC_HAS_HASH_64K | 1150 | #ifdef CONFIG_PPC_HAS_HASH_64K |
@@ -1182,6 +1179,17 @@ bail: | |||
1182 | exception_exit(prev_state); | 1179 | exception_exit(prev_state); |
1183 | return rc; | 1180 | return rc; |
1184 | } | 1181 | } |
1182 | EXPORT_SYMBOL_GPL(hash_page_mm); | ||
1183 | |||
1184 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | ||
1185 | { | ||
1186 | struct mm_struct *mm = current->mm; | ||
1187 | |||
1188 | if (REGION_ID(ea) == VMALLOC_REGION_ID) | ||
1189 | mm = &init_mm; | ||
1190 | |||
1191 | return hash_page_mm(mm, ea, access, trap); | ||
1192 | } | ||
1185 | EXPORT_SYMBOL_GPL(hash_page); | 1193 | EXPORT_SYMBOL_GPL(hash_page); |
1186 | 1194 | ||
1187 | void hash_preload(struct mm_struct *mm, unsigned long ea, | 1195 | void hash_preload(struct mm_struct *mm, unsigned long ea, |
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index cff59f1bec23..cad68ff8eca5 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
@@ -106,11 +106,11 @@ unsigned long __max_low_memory = MAX_LOW_MEM; | |||
106 | void MMU_setup(void) | 106 | void MMU_setup(void) |
107 | { | 107 | { |
108 | /* Check for nobats option (used in mapin_ram). */ | 108 | /* Check for nobats option (used in mapin_ram). */ |
109 | if (strstr(cmd_line, "nobats")) { | 109 | if (strstr(boot_command_line, "nobats")) { |
110 | __map_without_bats = 1; | 110 | __map_without_bats = 1; |
111 | } | 111 | } |
112 | 112 | ||
113 | if (strstr(cmd_line, "noltlbs")) { | 113 | if (strstr(boot_command_line, "noltlbs")) { |
114 | __map_without_ltlbs = 1; | 114 | __map_without_ltlbs = 1; |
115 | } | 115 | } |
116 | #ifdef CONFIG_DEBUG_PAGEALLOC | 116 | #ifdef CONFIG_DEBUG_PAGEALLOC |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 253b4b971c8a..3481556a1880 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -233,9 +233,6 @@ static void __meminit vmemmap_create_mapping(unsigned long start, | |||
233 | } | 233 | } |
234 | 234 | ||
235 | #ifdef CONFIG_MEMORY_HOTPLUG | 235 | #ifdef CONFIG_MEMORY_HOTPLUG |
236 | extern int htab_remove_mapping(unsigned long vstart, unsigned long vend, | ||
237 | int psize, int ssize); | ||
238 | |||
239 | static void vmemmap_remove_mapping(unsigned long start, | 236 | static void vmemmap_remove_mapping(unsigned long start, |
240 | unsigned long page_size) | 237 | unsigned long page_size) |
241 | { | 238 | { |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index e0f7a189c48e..8ebaac75c940 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -260,6 +260,60 @@ static int __init mark_nonram_nosave(void) | |||
260 | } | 260 | } |
261 | return 0; | 261 | return 0; |
262 | } | 262 | } |
263 | #else /* CONFIG_NEED_MULTIPLE_NODES */ | ||
264 | static int __init mark_nonram_nosave(void) | ||
265 | { | ||
266 | return 0; | ||
267 | } | ||
268 | #endif | ||
269 | |||
270 | static bool zone_limits_final; | ||
271 | |||
272 | static unsigned long max_zone_pfns[MAX_NR_ZONES] = { | ||
273 | [0 ... MAX_NR_ZONES - 1] = ~0UL | ||
274 | }; | ||
275 | |||
276 | /* | ||
277 | * Restrict the specified zone and all more restrictive zones | ||
278 | * to be below the specified pfn. May not be called after | ||
279 | * paging_init(). | ||
280 | */ | ||
281 | void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit) | ||
282 | { | ||
283 | int i; | ||
284 | |||
285 | if (WARN_ON(zone_limits_final)) | ||
286 | return; | ||
287 | |||
288 | for (i = zone; i >= 0; i--) { | ||
289 | if (max_zone_pfns[i] > pfn_limit) | ||
290 | max_zone_pfns[i] = pfn_limit; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Find the least restrictive zone that is entirely below the | ||
296 | * specified pfn limit. Returns < 0 if no suitable zone is found. | ||
297 | * | ||
298 | * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit | ||
299 | * systems -- the DMA limit can be higher than any possible real pfn. | ||
300 | */ | ||
301 | int dma_pfn_limit_to_zone(u64 pfn_limit) | ||
302 | { | ||
303 | enum zone_type top_zone = ZONE_NORMAL; | ||
304 | int i; | ||
305 | |||
306 | #ifdef CONFIG_HIGHMEM | ||
307 | top_zone = ZONE_HIGHMEM; | ||
308 | #endif | ||
309 | |||
310 | for (i = top_zone; i >= 0; i--) { | ||
311 | if (max_zone_pfns[i] <= pfn_limit) | ||
312 | return i; | ||
313 | } | ||
314 | |||
315 | return -EPERM; | ||
316 | } | ||
263 | 317 | ||
264 | /* | 318 | /* |
265 | * paging_init() sets up the page tables - in fact we've already done this. | 319 | * paging_init() sets up the page tables - in fact we've already done this. |
@@ -268,7 +322,7 @@ void __init paging_init(void) | |||
268 | { | 322 | { |
269 | unsigned long long total_ram = memblock_phys_mem_size(); | 323 | unsigned long long total_ram = memblock_phys_mem_size(); |
270 | phys_addr_t top_of_ram = memblock_end_of_DRAM(); | 324 | phys_addr_t top_of_ram = memblock_end_of_DRAM(); |
271 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 325 | enum zone_type top_zone; |
272 | 326 | ||
273 | #ifdef CONFIG_PPC32 | 327 | #ifdef CONFIG_PPC32 |
274 | unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); | 328 | unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); |
@@ -290,18 +344,20 @@ void __init paging_init(void) | |||
290 | (unsigned long long)top_of_ram, total_ram); | 344 | (unsigned long long)top_of_ram, total_ram); |
291 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 345 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
292 | (long int)((top_of_ram - total_ram) >> 20)); | 346 | (long int)((top_of_ram - total_ram) >> 20)); |
293 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 347 | |
294 | #ifdef CONFIG_HIGHMEM | 348 | #ifdef CONFIG_HIGHMEM |
295 | max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT; | 349 | top_zone = ZONE_HIGHMEM; |
296 | max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT; | 350 | limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); |
297 | #else | 351 | #else |
298 | max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | 352 | top_zone = ZONE_NORMAL; |
299 | #endif | 353 | #endif |
354 | |||
355 | limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT); | ||
356 | zone_limits_final = true; | ||
300 | free_area_init_nodes(max_zone_pfns); | 357 | free_area_init_nodes(max_zone_pfns); |
301 | 358 | ||
302 | mark_nonram_nosave(); | 359 | mark_nonram_nosave(); |
303 | } | 360 | } |
304 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ | ||
305 | 361 | ||
306 | static void __init register_page_bootmem_info(void) | 362 | static void __init register_page_bootmem_info(void) |
307 | { | 363 | { |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index d7737a542fd7..649666d5d1c2 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -538,7 +538,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, | |||
538 | */ | 538 | */ |
539 | static int numa_setup_cpu(unsigned long lcpu) | 539 | static int numa_setup_cpu(unsigned long lcpu) |
540 | { | 540 | { |
541 | int nid; | 541 | int nid = -1; |
542 | struct device_node *cpu; | 542 | struct device_node *cpu; |
543 | 543 | ||
544 | /* | 544 | /* |
@@ -555,19 +555,21 @@ static int numa_setup_cpu(unsigned long lcpu) | |||
555 | 555 | ||
556 | if (!cpu) { | 556 | if (!cpu) { |
557 | WARN_ON(1); | 557 | WARN_ON(1); |
558 | nid = 0; | 558 | if (cpu_present(lcpu)) |
559 | goto out; | 559 | goto out_present; |
560 | else | ||
561 | goto out; | ||
560 | } | 562 | } |
561 | 563 | ||
562 | nid = of_node_to_nid_single(cpu); | 564 | nid = of_node_to_nid_single(cpu); |
563 | 565 | ||
566 | out_present: | ||
564 | if (nid < 0 || !node_online(nid)) | 567 | if (nid < 0 || !node_online(nid)) |
565 | nid = first_online_node; | 568 | nid = first_online_node; |
566 | out: | ||
567 | map_cpu_to_node(lcpu, nid); | ||
568 | 569 | ||
570 | map_cpu_to_node(lcpu, nid); | ||
569 | of_node_put(cpu); | 571 | of_node_put(cpu); |
570 | 572 | out: | |
571 | return nid; | 573 | return nid; |
572 | } | 574 | } |
573 | 575 | ||
@@ -1127,20 +1129,11 @@ void __init do_init_bootmem(void) | |||
1127 | * even before we online them, so that we can use cpu_to_{node,mem} | 1129 | * even before we online them, so that we can use cpu_to_{node,mem} |
1128 | * early in boot, cf. smp_prepare_cpus(). | 1130 | * early in boot, cf. smp_prepare_cpus(). |
1129 | */ | 1131 | */ |
1130 | for_each_possible_cpu(cpu) { | 1132 | for_each_present_cpu(cpu) { |
1131 | cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, | 1133 | numa_setup_cpu((unsigned long)cpu); |
1132 | (void *)(unsigned long)cpu); | ||
1133 | } | 1134 | } |
1134 | } | 1135 | } |
1135 | 1136 | ||
1136 | void __init paging_init(void) | ||
1137 | { | ||
1138 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
1139 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
1140 | max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; | ||
1141 | free_area_init_nodes(max_zone_pfns); | ||
1142 | } | ||
1143 | |||
1144 | static int __init early_numa(char *p) | 1137 | static int __init early_numa(char *p) |
1145 | { | 1138 | { |
1146 | if (!p) | 1139 | if (!p) |
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index c695943a513c..c90e602677c9 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -48,7 +48,7 @@ static inline int pte_looks_normal(pte_t pte) | |||
48 | (_PAGE_PRESENT | _PAGE_USER); | 48 | (_PAGE_PRESENT | _PAGE_USER); |
49 | } | 49 | } |
50 | 50 | ||
51 | struct page * maybe_pte_to_page(pte_t pte) | 51 | static struct page *maybe_pte_to_page(pte_t pte) |
52 | { | 52 | { |
53 | unsigned long pfn = pte_pfn(pte); | 53 | unsigned long pfn = pte_pfn(pte); |
54 | struct page *page; | 54 | struct page *page; |
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 0399a6702958..6e450ca66526 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long ea, int ssize, | |||
46 | return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; | 46 | return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; |
47 | } | 47 | } |
48 | 48 | ||
49 | #define slb_vsid_shift(ssize) \ | ||
50 | ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T) | ||
51 | |||
52 | static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, | 49 | static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, |
53 | unsigned long flags) | 50 | unsigned long flags) |
54 | { | 51 | { |
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index b0c75cc15efc..8d7bda94d196 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c | |||
@@ -30,9 +30,11 @@ | |||
30 | #include <linux/err.h> | 30 | #include <linux/err.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/export.h> | 32 | #include <linux/export.h> |
33 | #include <linux/hugetlb.h> | ||
33 | #include <asm/mman.h> | 34 | #include <asm/mman.h> |
34 | #include <asm/mmu.h> | 35 | #include <asm/mmu.h> |
35 | #include <asm/spu.h> | 36 | #include <asm/copro.h> |
37 | #include <asm/hugetlb.h> | ||
36 | 38 | ||
37 | /* some sanity checks */ | 39 | /* some sanity checks */ |
38 | #if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE | 40 | #if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE |
@@ -232,9 +234,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz | |||
232 | 234 | ||
233 | spin_unlock_irqrestore(&slice_convert_lock, flags); | 235 | spin_unlock_irqrestore(&slice_convert_lock, flags); |
234 | 236 | ||
235 | #ifdef CONFIG_SPU_BASE | 237 | copro_flush_all_slbs(mm); |
236 | spu_flush_all_slbs(mm); | ||
237 | #endif | ||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
@@ -671,9 +671,7 @@ void slice_set_psize(struct mm_struct *mm, unsigned long address, | |||
671 | 671 | ||
672 | spin_unlock_irqrestore(&slice_convert_lock, flags); | 672 | spin_unlock_irqrestore(&slice_convert_lock, flags); |
673 | 673 | ||
674 | #ifdef CONFIG_SPU_BASE | 674 | copro_flush_all_slbs(mm); |
675 | spu_flush_all_slbs(mm); | ||
676 | #endif | ||
677 | } | 675 | } |
678 | 676 | ||
679 | void slice_set_range_psize(struct mm_struct *mm, unsigned long start, | 677 | void slice_set_range_psize(struct mm_struct *mm, unsigned long start, |
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index f75301f2c85f..6adf55fa5d88 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
14 | #include <asm/compat.h> | 14 | #include <asm/compat.h> |
15 | #include <asm/oprofile_impl.h> | ||
15 | 16 | ||
16 | #define STACK_SP(STACK) *(STACK) | 17 | #define STACK_SP(STACK) *(STACK) |
17 | 18 | ||
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index b7cd00b0171e..a6995d4e93d4 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -59,9 +59,9 @@ struct cpu_hw_events { | |||
59 | struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES]; | 59 | struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES]; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | 62 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
63 | 63 | ||
64 | struct power_pmu *ppmu; | 64 | static struct power_pmu *ppmu; |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Normally, to ignore kernel events we set the FCS (freeze counters | 67 | * Normally, to ignore kernel events we set the FCS (freeze counters |
@@ -124,7 +124,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) | |||
124 | 124 | ||
125 | static inline void power_pmu_bhrb_enable(struct perf_event *event) {} | 125 | static inline void power_pmu_bhrb_enable(struct perf_event *event) {} |
126 | static inline void power_pmu_bhrb_disable(struct perf_event *event) {} | 126 | static inline void power_pmu_bhrb_disable(struct perf_event *event) {} |
127 | void power_pmu_flush_branch_stack(void) {} | 127 | static void power_pmu_flush_branch_stack(void) {} |
128 | static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} | 128 | static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} |
129 | static void pmao_restore_workaround(bool ebb) { } | 129 | static void pmao_restore_workaround(bool ebb) { } |
130 | #endif /* CONFIG_PPC32 */ | 130 | #endif /* CONFIG_PPC32 */ |
@@ -375,7 +375,7 @@ static void power_pmu_bhrb_disable(struct perf_event *event) | |||
375 | /* Called from ctxsw to prevent one process's branch entries to | 375 | /* Called from ctxsw to prevent one process's branch entries to |
376 | * mingle with the other process's entries during context switch. | 376 | * mingle with the other process's entries during context switch. |
377 | */ | 377 | */ |
378 | void power_pmu_flush_branch_stack(void) | 378 | static void power_pmu_flush_branch_stack(void) |
379 | { | 379 | { |
380 | if (ppmu->bhrb_nr) | 380 | if (ppmu->bhrb_nr) |
381 | power_pmu_bhrb_reset(); | 381 | power_pmu_bhrb_reset(); |
@@ -408,7 +408,7 @@ static __u64 power_pmu_bhrb_to(u64 addr) | |||
408 | } | 408 | } |
409 | 409 | ||
410 | /* Processing BHRB entries */ | 410 | /* Processing BHRB entries */ |
411 | void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) | 411 | static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) |
412 | { | 412 | { |
413 | u64 val; | 413 | u64 val; |
414 | u64 addr; | 414 | u64 addr; |
@@ -1573,7 +1573,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags) | |||
1573 | * Set the flag to make pmu::enable() not perform the | 1573 | * Set the flag to make pmu::enable() not perform the |
1574 | * schedulability test, it will be performed at commit time | 1574 | * schedulability test, it will be performed at commit time |
1575 | */ | 1575 | */ |
1576 | void power_pmu_start_txn(struct pmu *pmu) | 1576 | static void power_pmu_start_txn(struct pmu *pmu) |
1577 | { | 1577 | { |
1578 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1578 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1579 | 1579 | ||
@@ -1587,7 +1587,7 @@ void power_pmu_start_txn(struct pmu *pmu) | |||
1587 | * Clear the flag and pmu::enable() will perform the | 1587 | * Clear the flag and pmu::enable() will perform the |
1588 | * schedulability test. | 1588 | * schedulability test. |
1589 | */ | 1589 | */ |
1590 | void power_pmu_cancel_txn(struct pmu *pmu) | 1590 | static void power_pmu_cancel_txn(struct pmu *pmu) |
1591 | { | 1591 | { |
1592 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 1592 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
1593 | 1593 | ||
@@ -1600,7 +1600,7 @@ void power_pmu_cancel_txn(struct pmu *pmu) | |||
1600 | * Perform the group schedulability test as a whole | 1600 | * Perform the group schedulability test as a whole |
1601 | * Return 0 if success | 1601 | * Return 0 if success |
1602 | */ | 1602 | */ |
1603 | int power_pmu_commit_txn(struct pmu *pmu) | 1603 | static int power_pmu_commit_txn(struct pmu *pmu) |
1604 | { | 1604 | { |
1605 | struct cpu_hw_events *cpuhw; | 1605 | struct cpu_hw_events *cpuhw; |
1606 | long i, n; | 1606 | long i, n; |
@@ -1888,7 +1888,7 @@ ssize_t power_events_sysfs_show(struct device *dev, | |||
1888 | return sprintf(page, "event=0x%02llx\n", pmu_attr->id); | 1888 | return sprintf(page, "event=0x%02llx\n", pmu_attr->id); |
1889 | } | 1889 | } |
1890 | 1890 | ||
1891 | struct pmu power_pmu = { | 1891 | static struct pmu power_pmu = { |
1892 | .pmu_enable = power_pmu_enable, | 1892 | .pmu_enable = power_pmu_enable, |
1893 | .pmu_disable = power_pmu_disable, | 1893 | .pmu_disable = power_pmu_disable, |
1894 | .event_init = power_pmu_event_init, | 1894 | .event_init = power_pmu_event_init, |
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 70d4f748b54b..6c8710dd90c9 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c | |||
@@ -75,86 +75,6 @@ static struct attribute_group format_group = { | |||
75 | 75 | ||
76 | static struct kmem_cache *hv_page_cache; | 76 | static struct kmem_cache *hv_page_cache; |
77 | 77 | ||
78 | /* | ||
79 | * read_offset_data - copy data from one buffer to another while treating the | ||
80 | * source buffer as a small view on the total avaliable | ||
81 | * source data. | ||
82 | * | ||
83 | * @dest: buffer to copy into | ||
84 | * @dest_len: length of @dest in bytes | ||
85 | * @requested_offset: the offset within the source data we want. Must be > 0 | ||
86 | * @src: buffer to copy data from | ||
87 | * @src_len: length of @src in bytes | ||
88 | * @source_offset: the offset in the sorce data that (src,src_len) refers to. | ||
89 | * Must be > 0 | ||
90 | * | ||
91 | * returns the number of bytes copied. | ||
92 | * | ||
93 | * The following ascii art shows the various buffer possitioning we need to | ||
94 | * handle, assigns some arbitrary varibles to points on the buffer, and then | ||
95 | * shows how we fiddle with those values to get things we care about (copy | ||
96 | * start in src and copy len) | ||
97 | * | ||
98 | * s = @src buffer | ||
99 | * d = @dest buffer | ||
100 | * '.' areas in d are written to. | ||
101 | * | ||
102 | * u | ||
103 | * x w v z | ||
104 | * d |.........| | ||
105 | * s |----------------------| | ||
106 | * | ||
107 | * u | ||
108 | * x w z v | ||
109 | * d |........------| | ||
110 | * s |------------------| | ||
111 | * | ||
112 | * x w u,z,v | ||
113 | * d |........| | ||
114 | * s |------------------| | ||
115 | * | ||
116 | * x,w u,v,z | ||
117 | * d |..................| | ||
118 | * s |------------------| | ||
119 | * | ||
120 | * x u | ||
121 | * w v z | ||
122 | * d |........| | ||
123 | * s |------------------| | ||
124 | * | ||
125 | * x z w v | ||
126 | * d |------| | ||
127 | * s |------| | ||
128 | * | ||
129 | * x = source_offset | ||
130 | * w = requested_offset | ||
131 | * z = source_offset + src_len | ||
132 | * v = requested_offset + dest_len | ||
133 | * | ||
134 | * w_offset_in_s = w - x = requested_offset - source_offset | ||
135 | * z_offset_in_s = z - x = src_len | ||
136 | * v_offset_in_s = v - x = request_offset + dest_len - src_len | ||
137 | */ | ||
138 | static ssize_t read_offset_data(void *dest, size_t dest_len, | ||
139 | loff_t requested_offset, void *src, | ||
140 | size_t src_len, loff_t source_offset) | ||
141 | { | ||
142 | size_t w_offset_in_s = requested_offset - source_offset; | ||
143 | size_t z_offset_in_s = src_len; | ||
144 | size_t v_offset_in_s = requested_offset + dest_len - src_len; | ||
145 | size_t u_offset_in_s = min(z_offset_in_s, v_offset_in_s); | ||
146 | size_t copy_len = u_offset_in_s - w_offset_in_s; | ||
147 | |||
148 | if (requested_offset < 0 || source_offset < 0) | ||
149 | return -EINVAL; | ||
150 | |||
151 | if (z_offset_in_s <= w_offset_in_s) | ||
152 | return 0; | ||
153 | |||
154 | memcpy(dest, src + w_offset_in_s, copy_len); | ||
155 | return copy_len; | ||
156 | } | ||
157 | |||
158 | static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096, | 78 | static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096, |
159 | unsigned long version, | 79 | unsigned long version, |
160 | unsigned long index) | 80 | unsigned long index) |
@@ -183,8 +103,10 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj, | |||
183 | { | 103 | { |
184 | unsigned long hret; | 104 | unsigned long hret; |
185 | ssize_t ret = 0; | 105 | ssize_t ret = 0; |
186 | size_t catalog_len = 0, catalog_page_len = 0, page_count = 0; | 106 | size_t catalog_len = 0, catalog_page_len = 0; |
187 | loff_t page_offset = 0; | 107 | loff_t page_offset = 0; |
108 | loff_t offset_in_page; | ||
109 | size_t copy_len; | ||
188 | uint64_t catalog_version_num = 0; | 110 | uint64_t catalog_version_num = 0; |
189 | void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); | 111 | void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); |
190 | struct hv_24x7_catalog_page_0 *page_0 = page; | 112 | struct hv_24x7_catalog_page_0 *page_0 = page; |
@@ -202,7 +124,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj, | |||
202 | catalog_len = catalog_page_len * 4096; | 124 | catalog_len = catalog_page_len * 4096; |
203 | 125 | ||
204 | page_offset = offset / 4096; | 126 | page_offset = offset / 4096; |
205 | page_count = count / 4096; | 127 | offset_in_page = offset % 4096; |
206 | 128 | ||
207 | if (page_offset >= catalog_page_len) | 129 | if (page_offset >= catalog_page_len) |
208 | goto e_free; | 130 | goto e_free; |
@@ -216,8 +138,13 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj, | |||
216 | } | 138 | } |
217 | } | 139 | } |
218 | 140 | ||
219 | ret = read_offset_data(buf, count, offset, | 141 | copy_len = 4096 - offset_in_page; |
220 | page, 4096, page_offset * 4096); | 142 | if (copy_len > count) |
143 | copy_len = count; | ||
144 | |||
145 | memcpy(buf, page+offset_in_page, copy_len); | ||
146 | ret = copy_len; | ||
147 | |||
221 | e_free: | 148 | e_free: |
222 | if (hret) | 149 | if (hret) |
223 | pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:" | 150 | pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:" |
@@ -225,9 +152,9 @@ e_free: | |||
225 | catalog_version_num, page_offset, hret); | 152 | catalog_version_num, page_offset, hret); |
226 | kmem_cache_free(hv_page_cache, page); | 153 | kmem_cache_free(hv_page_cache, page); |
227 | 154 | ||
228 | pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n", | 155 | pr_devel("catalog_read: offset=%lld(%lld) count=%zu " |
229 | offset, page_offset, count, page_count, catalog_len, | 156 | "catalog_len=%zu(%zu) => %zd\n", offset, page_offset, |
230 | catalog_page_len, ret); | 157 | count, catalog_len, catalog_page_len, ret); |
231 | 158 | ||
232 | return ret; | 159 | return ret; |
233 | } | 160 | } |
@@ -294,7 +221,7 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, | |||
294 | u16 lpar, u64 *res, | 221 | u16 lpar, u64 *res, |
295 | bool success_expected) | 222 | bool success_expected) |
296 | { | 223 | { |
297 | unsigned long ret; | 224 | unsigned long ret = -ENOMEM; |
298 | 225 | ||
299 | /* | 226 | /* |
300 | * request_buffer and result_buffer are not required to be 4k aligned, | 227 | * request_buffer and result_buffer are not required to be 4k aligned, |
@@ -304,7 +231,27 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, | |||
304 | struct reqb { | 231 | struct reqb { |
305 | struct hv_24x7_request_buffer buf; | 232 | struct hv_24x7_request_buffer buf; |
306 | struct hv_24x7_request req; | 233 | struct hv_24x7_request req; |
307 | } __packed __aligned(4096) request_buffer = { | 234 | } __packed *request_buffer; |
235 | |||
236 | struct { | ||
237 | struct hv_24x7_data_result_buffer buf; | ||
238 | struct hv_24x7_result res; | ||
239 | struct hv_24x7_result_element elem; | ||
240 | __be64 result; | ||
241 | } __packed *result_buffer; | ||
242 | |||
243 | BUILD_BUG_ON(sizeof(*request_buffer) > 4096); | ||
244 | BUILD_BUG_ON(sizeof(*result_buffer) > 4096); | ||
245 | |||
246 | request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER); | ||
247 | if (!request_buffer) | ||
248 | goto out; | ||
249 | |||
250 | result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER); | ||
251 | if (!result_buffer) | ||
252 | goto out_free_request_buffer; | ||
253 | |||
254 | *request_buffer = (struct reqb) { | ||
308 | .buf = { | 255 | .buf = { |
309 | .interface_version = HV_24X7_IF_VERSION_CURRENT, | 256 | .interface_version = HV_24X7_IF_VERSION_CURRENT, |
310 | .num_requests = 1, | 257 | .num_requests = 1, |
@@ -320,28 +267,27 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, | |||
320 | } | 267 | } |
321 | }; | 268 | }; |
322 | 269 | ||
323 | struct resb { | ||
324 | struct hv_24x7_data_result_buffer buf; | ||
325 | struct hv_24x7_result res; | ||
326 | struct hv_24x7_result_element elem; | ||
327 | __be64 result; | ||
328 | } __packed __aligned(4096) result_buffer = {}; | ||
329 | |||
330 | ret = plpar_hcall_norets(H_GET_24X7_DATA, | 270 | ret = plpar_hcall_norets(H_GET_24X7_DATA, |
331 | virt_to_phys(&request_buffer), sizeof(request_buffer), | 271 | virt_to_phys(request_buffer), sizeof(*request_buffer), |
332 | virt_to_phys(&result_buffer), sizeof(result_buffer)); | 272 | virt_to_phys(result_buffer), sizeof(*result_buffer)); |
333 | 273 | ||
334 | if (ret) { | 274 | if (ret) { |
335 | if (success_expected) | 275 | if (success_expected) |
336 | pr_err_ratelimited("hcall failed: %d %#x %#x %d => 0x%lx (%ld) detail=0x%x failing ix=%x\n", | 276 | pr_err_ratelimited("hcall failed: %d %#x %#x %d => " |
337 | domain, offset, ix, lpar, | 277 | "0x%lx (%ld) detail=0x%x failing ix=%x\n", |
338 | ret, ret, | 278 | domain, offset, ix, lpar, ret, ret, |
339 | result_buffer.buf.detailed_rc, | 279 | result_buffer->buf.detailed_rc, |
340 | result_buffer.buf.failing_request_ix); | 280 | result_buffer->buf.failing_request_ix); |
341 | return ret; | 281 | goto out_free_result_buffer; |
342 | } | 282 | } |
343 | 283 | ||
344 | *res = be64_to_cpu(result_buffer.result); | 284 | *res = be64_to_cpu(result_buffer->result); |
285 | |||
286 | out_free_result_buffer: | ||
287 | kfree(result_buffer); | ||
288 | out_free_request_buffer: | ||
289 | kfree(request_buffer); | ||
290 | out: | ||
345 | return ret; | 291 | return ret; |
346 | } | 292 | } |
347 | 293 | ||
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c index b0389bbe4f94..ddc12a1926ef 100644 --- a/arch/powerpc/platforms/40x/ep405.c +++ b/arch/powerpc/platforms/40x/ep405.c | |||
@@ -49,7 +49,7 @@ static void __iomem *bcsr_regs; | |||
49 | /* there's more, can't be bothered typing them tho */ | 49 | /* there's more, can't be bothered typing them tho */ |
50 | 50 | ||
51 | 51 | ||
52 | static __initdata struct of_device_id ep405_of_bus[] = { | 52 | static const struct of_device_id ep405_of_bus[] __initconst = { |
53 | { .compatible = "ibm,plb3", }, | 53 | { .compatible = "ibm,plb3", }, |
54 | { .compatible = "ibm,opb", }, | 54 | { .compatible = "ibm,opb", }, |
55 | { .compatible = "ibm,ebc", }, | 55 | { .compatible = "ibm,ebc", }, |
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c index 8f3920e5a046..b0c46375dd95 100644 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/of_platform.h> | 25 | #include <linux/of_platform.h> |
26 | 26 | ||
27 | static __initdata struct of_device_id ppc40x_of_bus[] = { | 27 | static const struct of_device_id ppc40x_of_bus[] __initconst = { |
28 | { .compatible = "ibm,plb3", }, | 28 | { .compatible = "ibm,plb3", }, |
29 | { .compatible = "ibm,plb4", }, | 29 | { .compatible = "ibm,plb4", }, |
30 | { .compatible = "ibm,opb", }, | 30 | { .compatible = "ibm,opb", }, |
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c index d0fc6866b00c..9aa7ae2f4164 100644 --- a/arch/powerpc/platforms/40x/virtex.c +++ b/arch/powerpc/platforms/40x/virtex.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <asm/xilinx_pci.h> | 17 | #include <asm/xilinx_pci.h> |
18 | #include <asm/ppc4xx.h> | 18 | #include <asm/ppc4xx.h> |
19 | 19 | ||
20 | static struct of_device_id xilinx_of_bus_ids[] __initdata = { | 20 | static const struct of_device_id xilinx_of_bus_ids[] __initconst = { |
21 | { .compatible = "xlnx,plb-v46-1.00.a", }, | 21 | { .compatible = "xlnx,plb-v46-1.00.a", }, |
22 | { .compatible = "xlnx,plb-v34-1.01.a", }, | 22 | { .compatible = "xlnx,plb-v34-1.01.a", }, |
23 | { .compatible = "xlnx,plb-v34-1.02.a", }, | 23 | { .compatible = "xlnx,plb-v34-1.02.a", }, |
diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c index 8b691df72f74..f7ac2d0fcb44 100644 --- a/arch/powerpc/platforms/40x/walnut.c +++ b/arch/powerpc/platforms/40x/walnut.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <asm/pci-bridge.h> | 28 | #include <asm/pci-bridge.h> |
29 | #include <asm/ppc4xx.h> | 29 | #include <asm/ppc4xx.h> |
30 | 30 | ||
31 | static __initdata struct of_device_id walnut_of_bus[] = { | 31 | static const struct of_device_id walnut_of_bus[] __initconst = { |
32 | { .compatible = "ibm,plb3", }, | 32 | { .compatible = "ibm,plb3", }, |
33 | { .compatible = "ibm,opb", }, | 33 | { .compatible = "ibm,opb", }, |
34 | { .compatible = "ibm,ebc", }, | 34 | { .compatible = "ibm,ebc", }, |
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 4d88f6a19058..82f2da28cd27 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig | |||
@@ -215,9 +215,9 @@ config AKEBONO | |||
215 | select NET_VENDOR_IBM | 215 | select NET_VENDOR_IBM |
216 | select IBM_EMAC_EMAC4 | 216 | select IBM_EMAC_EMAC4 |
217 | select IBM_EMAC_RGMII_WOL | 217 | select IBM_EMAC_RGMII_WOL |
218 | select USB | 218 | select USB if USB_SUPPORT |
219 | select USB_OHCI_HCD_PLATFORM | 219 | select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD |
220 | select USB_EHCI_HCD_PLATFORM | 220 | select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD |
221 | select MMC_SDHCI | 221 | select MMC_SDHCI |
222 | select MMC_SDHCI_PLTFM | 222 | select MMC_SDHCI_PLTFM |
223 | select MMC_SDHCI_OF_476GTR | 223 | select MMC_SDHCI_OF_476GTR |
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c index e300dd4c89bf..22ca5430c9cb 100644 --- a/arch/powerpc/platforms/44x/canyonlands.c +++ b/arch/powerpc/platforms/44x/canyonlands.c | |||
@@ -33,7 +33,7 @@ | |||
33 | 33 | ||
34 | #define BCSR_USB_EN 0x11 | 34 | #define BCSR_USB_EN 0x11 |
35 | 35 | ||
36 | static __initdata struct of_device_id ppc460ex_of_bus[] = { | 36 | static const struct of_device_id ppc460ex_of_bus[] __initconst = { |
37 | { .compatible = "ibm,plb4", }, | 37 | { .compatible = "ibm,plb4", }, |
38 | { .compatible = "ibm,opb", }, | 38 | { .compatible = "ibm,opb", }, |
39 | { .compatible = "ibm,ebc", }, | 39 | { .compatible = "ibm,ebc", }, |
diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c index 6a4232bbdf88..ae893226392d 100644 --- a/arch/powerpc/platforms/44x/ebony.c +++ b/arch/powerpc/platforms/44x/ebony.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <asm/pci-bridge.h> | 28 | #include <asm/pci-bridge.h> |
29 | #include <asm/ppc4xx.h> | 29 | #include <asm/ppc4xx.h> |
30 | 30 | ||
31 | static __initdata struct of_device_id ebony_of_bus[] = { | 31 | static const struct of_device_id ebony_of_bus[] __initconst = { |
32 | { .compatible = "ibm,plb4", }, | 32 | { .compatible = "ibm,plb4", }, |
33 | { .compatible = "ibm,opb", }, | 33 | { .compatible = "ibm,opb", }, |
34 | { .compatible = "ibm,ebc", }, | 34 | { .compatible = "ibm,ebc", }, |
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c index 4241bc825800..c7c6758b3cfe 100644 --- a/arch/powerpc/platforms/44x/iss4xx.c +++ b/arch/powerpc/platforms/44x/iss4xx.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <asm/mpic.h> | 32 | #include <asm/mpic.h> |
33 | #include <asm/mmu.h> | 33 | #include <asm/mmu.h> |
34 | 34 | ||
35 | static __initdata struct of_device_id iss4xx_of_bus[] = { | 35 | static const struct of_device_id iss4xx_of_bus[] __initconst = { |
36 | { .compatible = "ibm,plb4", }, | 36 | { .compatible = "ibm,plb4", }, |
37 | { .compatible = "ibm,plb6", }, | 37 | { .compatible = "ibm,plb6", }, |
38 | { .compatible = "ibm,opb", }, | 38 | { .compatible = "ibm,opb", }, |
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c index 3ffb915446e3..573c3d2689c6 100644 --- a/arch/powerpc/platforms/44x/ppc44x_simple.c +++ b/arch/powerpc/platforms/44x/ppc44x_simple.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/of_platform.h> | 25 | #include <linux/of_platform.h> |
26 | 26 | ||
27 | static __initdata struct of_device_id ppc44x_of_bus[] = { | 27 | static const struct of_device_id ppc44x_of_bus[] __initconst = { |
28 | { .compatible = "ibm,plb4", }, | 28 | { .compatible = "ibm,plb4", }, |
29 | { .compatible = "ibm,opb", }, | 29 | { .compatible = "ibm,opb", }, |
30 | { .compatible = "ibm,ebc", }, | 30 | { .compatible = "ibm,ebc", }, |
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index 33986c1a05da..58db9d083969 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c | |||
@@ -38,7 +38,7 @@ | |||
38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
39 | #include <linux/i2c.h> | 39 | #include <linux/i2c.h> |
40 | 40 | ||
41 | static struct of_device_id ppc47x_of_bus[] __initdata = { | 41 | static const struct of_device_id ppc47x_of_bus[] __initconst = { |
42 | { .compatible = "ibm,plb4", }, | 42 | { .compatible = "ibm,plb4", }, |
43 | { .compatible = "ibm,plb6", }, | 43 | { .compatible = "ibm,plb6", }, |
44 | { .compatible = "ibm,opb", }, | 44 | { .compatible = "ibm,opb", }, |
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c index 9e09b835758b..3ee4a03c1496 100644 --- a/arch/powerpc/platforms/44x/sam440ep.c +++ b/arch/powerpc/platforms/44x/sam440ep.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <asm/ppc4xx.h> | 29 | #include <asm/ppc4xx.h> |
30 | #include <linux/i2c.h> | 30 | #include <linux/i2c.h> |
31 | 31 | ||
32 | static __initdata struct of_device_id sam440ep_of_bus[] = { | 32 | static const struct of_device_id sam440ep_of_bus[] __initconst = { |
33 | { .compatible = "ibm,plb4", }, | 33 | { .compatible = "ibm,plb4", }, |
34 | { .compatible = "ibm,opb", }, | 34 | { .compatible = "ibm,opb", }, |
35 | { .compatible = "ibm,ebc", }, | 35 | { .compatible = "ibm,ebc", }, |
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c index cf96ccaa760c..ad272c17c640 100644 --- a/arch/powerpc/platforms/44x/virtex.c +++ b/arch/powerpc/platforms/44x/virtex.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/ppc4xx.h> | 21 | #include <asm/ppc4xx.h> |
22 | #include "44x.h" | 22 | #include "44x.h" |
23 | 23 | ||
24 | static struct of_device_id xilinx_of_bus_ids[] __initdata = { | 24 | static const struct of_device_id xilinx_of_bus_ids[] __initconst = { |
25 | { .compatible = "simple-bus", }, | 25 | { .compatible = "simple-bus", }, |
26 | { .compatible = "xlnx,plb-v46-1.00.a", }, | 26 | { .compatible = "xlnx,plb-v46-1.00.a", }, |
27 | { .compatible = "xlnx,plb-v46-1.02.a", }, | 27 | { .compatible = "xlnx,plb-v46-1.02.a", }, |
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c index 3a104284b338..501333cf42cf 100644 --- a/arch/powerpc/platforms/44x/warp.c +++ b/arch/powerpc/platforms/44x/warp.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <asm/dma.h> | 28 | #include <asm/dma.h> |
29 | 29 | ||
30 | 30 | ||
31 | static __initdata struct of_device_id warp_of_bus[] = { | 31 | static const struct of_device_id warp_of_bus[] __initconst = { |
32 | { .compatible = "ibm,plb4", }, | 32 | { .compatible = "ibm,plb4", }, |
33 | { .compatible = "ibm,opb", }, | 33 | { .compatible = "ibm,opb", }, |
34 | { .compatible = "ibm,ebc", }, | 34 | { .compatible = "ibm,ebc", }, |
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index adb95f03d4d4..e996e007bc44 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c | |||
@@ -337,7 +337,7 @@ void __init mpc512x_init_IRQ(void) | |||
337 | /* | 337 | /* |
338 | * Nodes to do bus probe on, soc and localbus | 338 | * Nodes to do bus probe on, soc and localbus |
339 | */ | 339 | */ |
340 | static struct of_device_id __initdata of_bus_ids[] = { | 340 | static const struct of_device_id of_bus_ids[] __initconst = { |
341 | { .compatible = "fsl,mpc5121-immr", }, | 341 | { .compatible = "fsl,mpc5121-immr", }, |
342 | { .compatible = "fsl,mpc5121-localbus", }, | 342 | { .compatible = "fsl,mpc5121-localbus", }, |
343 | { .compatible = "fsl,mpc5121-mbx", }, | 343 | { .compatible = "fsl,mpc5121-mbx", }, |
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c index 1843bc932011..7492de3cf6d0 100644 --- a/arch/powerpc/platforms/52xx/lite5200.c +++ b/arch/powerpc/platforms/52xx/lite5200.c | |||
@@ -34,13 +34,13 @@ | |||
34 | */ | 34 | */ |
35 | 35 | ||
36 | /* mpc5200 device tree match tables */ | 36 | /* mpc5200 device tree match tables */ |
37 | static struct of_device_id mpc5200_cdm_ids[] __initdata = { | 37 | static const struct of_device_id mpc5200_cdm_ids[] __initconst = { |
38 | { .compatible = "fsl,mpc5200-cdm", }, | 38 | { .compatible = "fsl,mpc5200-cdm", }, |
39 | { .compatible = "mpc5200-cdm", }, | 39 | { .compatible = "mpc5200-cdm", }, |
40 | {} | 40 | {} |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct of_device_id mpc5200_gpio_ids[] __initdata = { | 43 | static const struct of_device_id mpc5200_gpio_ids[] __initconst = { |
44 | { .compatible = "fsl,mpc5200-gpio", }, | 44 | { .compatible = "fsl,mpc5200-gpio", }, |
45 | { .compatible = "mpc5200-gpio", }, | 45 | { .compatible = "mpc5200-gpio", }, |
46 | {} | 46 | {} |
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 070d315dd6cd..32cae33c4266 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <asm/machdep.h> | 30 | #include <asm/machdep.h> |
31 | #include <asm/mpc52xx.h> | 31 | #include <asm/mpc52xx.h> |
32 | 32 | ||
33 | static struct of_device_id mpc5200_gpio_ids[] __initdata = { | 33 | static const struct of_device_id mpc5200_gpio_ids[] __initconst = { |
34 | { .compatible = "fsl,mpc5200-gpio", }, | 34 | { .compatible = "fsl,mpc5200-gpio", }, |
35 | { .compatible = "mpc5200-gpio", }, | 35 | { .compatible = "mpc5200-gpio", }, |
36 | {} | 36 | {} |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index d7e94f49532a..26993826a797 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c | |||
@@ -23,12 +23,12 @@ | |||
23 | #include <asm/mpc52xx.h> | 23 | #include <asm/mpc52xx.h> |
24 | 24 | ||
25 | /* MPC5200 device tree match tables */ | 25 | /* MPC5200 device tree match tables */ |
26 | static struct of_device_id mpc52xx_xlb_ids[] __initdata = { | 26 | static const struct of_device_id mpc52xx_xlb_ids[] __initconst = { |
27 | { .compatible = "fsl,mpc5200-xlb", }, | 27 | { .compatible = "fsl,mpc5200-xlb", }, |
28 | { .compatible = "mpc5200-xlb", }, | 28 | { .compatible = "mpc5200-xlb", }, |
29 | {} | 29 | {} |
30 | }; | 30 | }; |
31 | static struct of_device_id mpc52xx_bus_ids[] __initdata = { | 31 | static const struct of_device_id mpc52xx_bus_ids[] __initconst = { |
32 | { .compatible = "fsl,mpc5200-immr", }, | 32 | { .compatible = "fsl,mpc5200-immr", }, |
33 | { .compatible = "fsl,mpc5200b-immr", }, | 33 | { .compatible = "fsl,mpc5200b-immr", }, |
34 | { .compatible = "simple-bus", }, | 34 | { .compatible = "simple-bus", }, |
@@ -108,21 +108,21 @@ void __init mpc52xx_declare_of_platform_devices(void) | |||
108 | /* | 108 | /* |
109 | * match tables used by mpc52xx_map_common_devices() | 109 | * match tables used by mpc52xx_map_common_devices() |
110 | */ | 110 | */ |
111 | static struct of_device_id mpc52xx_gpt_ids[] __initdata = { | 111 | static const struct of_device_id mpc52xx_gpt_ids[] __initconst = { |
112 | { .compatible = "fsl,mpc5200-gpt", }, | 112 | { .compatible = "fsl,mpc5200-gpt", }, |
113 | { .compatible = "mpc5200-gpt", }, /* old */ | 113 | { .compatible = "mpc5200-gpt", }, /* old */ |
114 | {} | 114 | {} |
115 | }; | 115 | }; |
116 | static struct of_device_id mpc52xx_cdm_ids[] __initdata = { | 116 | static const struct of_device_id mpc52xx_cdm_ids[] __initconst = { |
117 | { .compatible = "fsl,mpc5200-cdm", }, | 117 | { .compatible = "fsl,mpc5200-cdm", }, |
118 | { .compatible = "mpc5200-cdm", }, /* old */ | 118 | { .compatible = "mpc5200-cdm", }, /* old */ |
119 | {} | 119 | {} |
120 | }; | 120 | }; |
121 | static const struct of_device_id mpc52xx_gpio_simple[] = { | 121 | static const struct of_device_id mpc52xx_gpio_simple[] __initconst = { |
122 | { .compatible = "fsl,mpc5200-gpio", }, | 122 | { .compatible = "fsl,mpc5200-gpio", }, |
123 | {} | 123 | {} |
124 | }; | 124 | }; |
125 | static const struct of_device_id mpc52xx_gpio_wkup[] = { | 125 | static const struct of_device_id mpc52xx_gpio_wkup[] __initconst = { |
126 | { .compatible = "fsl,mpc5200-gpio-wkup", }, | 126 | { .compatible = "fsl,mpc5200-gpio-wkup", }, |
127 | {} | 127 | {} |
128 | }; | 128 | }; |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 37f7a89c10f2..f8f0081759fb 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c | |||
@@ -564,7 +564,7 @@ static int mpc52xx_lpbfifo_remove(struct platform_device *op) | |||
564 | return 0; | 564 | return 0; |
565 | } | 565 | } |
566 | 566 | ||
567 | static struct of_device_id mpc52xx_lpbfifo_match[] = { | 567 | static const struct of_device_id mpc52xx_lpbfifo_match[] = { |
568 | { .compatible = "fsl,mpc5200-lpbfifo", }, | 568 | { .compatible = "fsl,mpc5200-lpbfifo", }, |
569 | {}, | 569 | {}, |
570 | }; | 570 | }; |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 2898b737deb7..2944bc84b9d6 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c | |||
@@ -119,12 +119,12 @@ | |||
119 | 119 | ||
120 | 120 | ||
121 | /* MPC5200 device tree match tables */ | 121 | /* MPC5200 device tree match tables */ |
122 | static struct of_device_id mpc52xx_pic_ids[] __initdata = { | 122 | static const struct of_device_id mpc52xx_pic_ids[] __initconst = { |
123 | { .compatible = "fsl,mpc5200-pic", }, | 123 | { .compatible = "fsl,mpc5200-pic", }, |
124 | { .compatible = "mpc5200-pic", }, | 124 | { .compatible = "mpc5200-pic", }, |
125 | {} | 125 | {} |
126 | }; | 126 | }; |
127 | static struct of_device_id mpc52xx_sdma_ids[] __initdata = { | 127 | static const struct of_device_id mpc52xx_sdma_ids[] __initconst = { |
128 | { .compatible = "fsl,mpc5200-bestcomm", }, | 128 | { .compatible = "fsl,mpc5200-bestcomm", }, |
129 | { .compatible = "mpc5200-bestcomm", }, | 129 | { .compatible = "mpc5200-bestcomm", }, |
130 | {} | 130 | {} |
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 79799b29ffe2..3d0c3a01143d 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c | |||
@@ -298,7 +298,7 @@ static void __init ep8248e_setup_arch(void) | |||
298 | ppc_md.progress("ep8248e_setup_arch(), finish", 0); | 298 | ppc_md.progress("ep8248e_setup_arch(), finish", 0); |
299 | } | 299 | } |
300 | 300 | ||
301 | static __initdata struct of_device_id of_bus_ids[] = { | 301 | static const struct of_device_id of_bus_ids[] __initconst = { |
302 | { .compatible = "simple-bus", }, | 302 | { .compatible = "simple-bus", }, |
303 | { .compatible = "fsl,ep8248e-bcsr", }, | 303 | { .compatible = "fsl,ep8248e-bcsr", }, |
304 | {}, | 304 | {}, |
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c index 058cc1895c88..387b446f4161 100644 --- a/arch/powerpc/platforms/82xx/km82xx.c +++ b/arch/powerpc/platforms/82xx/km82xx.c | |||
@@ -180,7 +180,7 @@ static void __init km82xx_setup_arch(void) | |||
180 | ppc_md.progress("km82xx_setup_arch(), finish", 0); | 180 | ppc_md.progress("km82xx_setup_arch(), finish", 0); |
181 | } | 181 | } |
182 | 182 | ||
183 | static __initdata struct of_device_id of_bus_ids[] = { | 183 | static const struct of_device_id of_bus_ids[] __initconst = { |
184 | { .compatible = "simple-bus", }, | 184 | { .compatible = "simple-bus", }, |
185 | {}, | 185 | {}, |
186 | }; | 186 | }; |
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c index 6a14cf50f4a2..d24deacf07d0 100644 --- a/arch/powerpc/platforms/82xx/mpc8272_ads.c +++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c | |||
@@ -181,7 +181,7 @@ static void __init mpc8272_ads_setup_arch(void) | |||
181 | ppc_md.progress("mpc8272_ads_setup_arch(), finish", 0); | 181 | ppc_md.progress("mpc8272_ads_setup_arch(), finish", 0); |
182 | } | 182 | } |
183 | 183 | ||
184 | static struct of_device_id __initdata of_bus_ids[] = { | 184 | static const struct of_device_id of_bus_ids[] __initconst = { |
185 | { .name = "soc", }, | 185 | { .name = "soc", }, |
186 | { .name = "cpm", }, | 186 | { .name = "cpm", }, |
187 | { .name = "localbus", }, | 187 | { .name = "localbus", }, |
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c index e5f82ec8df17..3a5164ad10ad 100644 --- a/arch/powerpc/platforms/82xx/pq2fads.c +++ b/arch/powerpc/platforms/82xx/pq2fads.c | |||
@@ -168,7 +168,7 @@ static int __init pq2fads_probe(void) | |||
168 | return of_flat_dt_is_compatible(root, "fsl,pq2fads"); | 168 | return of_flat_dt_is_compatible(root, "fsl,pq2fads"); |
169 | } | 169 | } |
170 | 170 | ||
171 | static struct of_device_id __initdata of_bus_ids[] = { | 171 | static const struct of_device_id of_bus_ids[] __initconst = { |
172 | { .name = "soc", }, | 172 | { .name = "soc", }, |
173 | { .name = "cpm", }, | 173 | { .name = "cpm", }, |
174 | { .name = "localbus", }, | 174 | { .name = "localbus", }, |
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 73997027b085..463fa91ee5b6 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c | |||
@@ -214,7 +214,7 @@ static const struct i2c_device_id mcu_ids[] = { | |||
214 | }; | 214 | }; |
215 | MODULE_DEVICE_TABLE(i2c, mcu_ids); | 215 | MODULE_DEVICE_TABLE(i2c, mcu_ids); |
216 | 216 | ||
217 | static struct of_device_id mcu_of_match_table[] = { | 217 | static const struct of_device_id mcu_of_match_table[] = { |
218 | { .compatible = "fsl,mcu-mpc8349emitx", }, | 218 | { .compatible = "fsl,mcu-mpc8349emitx", }, |
219 | { }, | 219 | { }, |
220 | }; | 220 | }; |
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c index 125336f750c6..ef9d01a049c1 100644 --- a/arch/powerpc/platforms/83xx/misc.c +++ b/arch/powerpc/platforms/83xx/misc.c | |||
@@ -114,7 +114,7 @@ void __init mpc83xx_ipic_and_qe_init_IRQ(void) | |||
114 | } | 114 | } |
115 | #endif /* CONFIG_QUICC_ENGINE */ | 115 | #endif /* CONFIG_QUICC_ENGINE */ |
116 | 116 | ||
117 | static struct of_device_id __initdata of_bus_ids[] = { | 117 | static const struct of_device_id of_bus_ids[] __initconst = { |
118 | { .type = "soc", }, | 118 | { .type = "soc", }, |
119 | { .compatible = "soc", }, | 119 | { .compatible = "soc", }, |
120 | { .compatible = "simple-bus" }, | 120 | { .compatible = "simple-bus" }, |
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c index a494fa57bdf9..80aea8c4b5a3 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_itx.c +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #include "mpc83xx.h" | 39 | #include "mpc83xx.h" |
40 | 40 | ||
41 | static struct of_device_id __initdata mpc834x_itx_ids[] = { | 41 | static const struct of_device_id mpc834x_itx_ids[] __initconst = { |
42 | { .compatible = "fsl,pq2pro-localbus", }, | 42 | { .compatible = "fsl,pq2pro-localbus", }, |
43 | {}, | 43 | {}, |
44 | }; | 44 | }; |
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 4b4c081df94d..eeb80e25214d 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c | |||
@@ -321,7 +321,7 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = { | |||
321 | .end = mpc83xx_suspend_end, | 321 | .end = mpc83xx_suspend_end, |
322 | }; | 322 | }; |
323 | 323 | ||
324 | static struct of_device_id pmc_match[]; | 324 | static const struct of_device_id pmc_match[]; |
325 | static int pmc_probe(struct platform_device *ofdev) | 325 | static int pmc_probe(struct platform_device *ofdev) |
326 | { | 326 | { |
327 | const struct of_device_id *match; | 327 | const struct of_device_id *match; |
@@ -420,7 +420,7 @@ static struct pmc_type pmc_types[] = { | |||
420 | } | 420 | } |
421 | }; | 421 | }; |
422 | 422 | ||
423 | static struct of_device_id pmc_match[] = { | 423 | static const struct of_device_id pmc_match[] = { |
424 | { | 424 | { |
425 | .compatible = "fsl,mpc8313-pmc", | 425 | .compatible = "fsl,mpc8313-pmc", |
426 | .data = &pmc_types[0], | 426 | .data = &pmc_types[0], |
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 0c1e6903597e..f22635a71d01 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig | |||
@@ -276,7 +276,7 @@ config CORENET_GENERIC | |||
276 | For 64bit kernel, the following boards are supported: | 276 | For 64bit kernel, the following boards are supported: |
277 | T208x QDS/RDB, T4240 QDS/RDB and B4 QDS | 277 | T208x QDS/RDB, T4240 QDS/RDB and B4 QDS |
278 | The following boards are supported for both 32bit and 64bit kernel: | 278 | The following boards are supported for both 32bit and 64bit kernel: |
279 | P5020 DS, P5040 DS and T104xQDS | 279 | P5020 DS, P5040 DS and T104xQDS/RDB |
280 | 280 | ||
281 | endif # FSL_SOC_BOOKE | 281 | endif # FSL_SOC_BOOKE |
282 | 282 | ||
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index b564b5e23f7c..4a9ad871a168 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #include "mpc85xx.h" | 15 | #include "mpc85xx.h" |
16 | 16 | ||
17 | static struct of_device_id __initdata mpc85xx_common_ids[] = { | 17 | static const struct of_device_id mpc85xx_common_ids[] __initconst = { |
18 | { .type = "soc", }, | 18 | { .type = "soc", }, |
19 | { .compatible = "soc", }, | 19 | { .compatible = "soc", }, |
20 | { .compatible = "simple-bus", }, | 20 | { .compatible = "simple-bus", }, |
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index d22dd85e50bf..e56b89a792ed 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/time.h> | 20 | #include <asm/time.h> |
21 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
22 | #include <asm/pci-bridge.h> | 22 | #include <asm/pci-bridge.h> |
23 | #include <asm/pgtable.h> | ||
23 | #include <asm/ppc-pci.h> | 24 | #include <asm/ppc-pci.h> |
24 | #include <mm/mmu_decl.h> | 25 | #include <mm/mmu_decl.h> |
25 | #include <asm/prom.h> | 26 | #include <asm/prom.h> |
@@ -67,6 +68,16 @@ void __init corenet_gen_setup_arch(void) | |||
67 | 68 | ||
68 | swiotlb_detect_4g(); | 69 | swiotlb_detect_4g(); |
69 | 70 | ||
71 | #if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32) | ||
72 | /* | ||
73 | * Inbound windows don't cover the full lower 4 GiB | ||
74 | * due to conflicts with PCICSRBAR and outbound windows, | ||
75 | * so limit the DMA32 zone to 2 GiB, to allow consistent | ||
76 | * allocations to succeed. | ||
77 | */ | ||
78 | limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT)); | ||
79 | #endif | ||
80 | |||
70 | pr_info("%s board\n", ppc_md.name); | 81 | pr_info("%s board\n", ppc_md.name); |
71 | 82 | ||
72 | mpc85xx_qe_init(); | 83 | mpc85xx_qe_init(); |
@@ -129,6 +140,9 @@ static const char * const boards[] __initconst = { | |||
129 | "fsl,B4220QDS", | 140 | "fsl,B4220QDS", |
130 | "fsl,T1040QDS", | 141 | "fsl,T1040QDS", |
131 | "fsl,T1042QDS", | 142 | "fsl,T1042QDS", |
143 | "fsl,T1040RDB", | ||
144 | "fsl,T1042RDB", | ||
145 | "fsl,T1042RDB_PI", | ||
132 | "keymile,kmcoge4", | 146 | "keymile,kmcoge4", |
133 | NULL | 147 | NULL |
134 | }; | 148 | }; |
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c index 3daff7c63569..12019f17f297 100644 --- a/arch/powerpc/platforms/85xx/ppa8548.c +++ b/arch/powerpc/platforms/85xx/ppa8548.c | |||
@@ -59,7 +59,7 @@ static void ppa8548_show_cpuinfo(struct seq_file *m) | |||
59 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); | 59 | seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct of_device_id __initdata of_bus_ids[] = { | 62 | static const struct of_device_id of_bus_ids[] __initconst = { |
63 | { .name = "soc", }, | 63 | { .name = "soc", }, |
64 | { .type = "soc", }, | 64 | { .type = "soc", }, |
65 | { .compatible = "simple-bus", }, | 65 | { .compatible = "simple-bus", }, |
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index 7f2673293549..8ad2fe6f200a 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/of_fdt.h> | 19 | #include <linux/of_fdt.h> |
20 | #include <asm/machdep.h> | 20 | #include <asm/machdep.h> |
21 | #include <asm/pgtable.h> | ||
21 | #include <asm/time.h> | 22 | #include <asm/time.h> |
22 | #include <asm/udbg.h> | 23 | #include <asm/udbg.h> |
23 | #include <asm/mpic.h> | 24 | #include <asm/mpic.h> |
@@ -44,6 +45,15 @@ static void __init qemu_e500_setup_arch(void) | |||
44 | 45 | ||
45 | fsl_pci_assign_primary(); | 46 | fsl_pci_assign_primary(); |
46 | swiotlb_detect_4g(); | 47 | swiotlb_detect_4g(); |
48 | #if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32) | ||
49 | /* | ||
50 | * Inbound windows don't cover the full lower 4 GiB | ||
51 | * due to conflicts with PCICSRBAR and outbound windows, | ||
52 | * so limit the DMA32 zone to 2 GiB, to allow consistent | ||
53 | * allocations to succeed. | ||
54 | */ | ||
55 | limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT)); | ||
56 | #endif | ||
47 | mpc85xx_smp_init(); | 57 | mpc85xx_smp_init(); |
48 | } | 58 | } |
49 | 59 | ||
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index bb75add67084..8162b0412117 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | static struct device_node *halt_node; | 25 | static struct device_node *halt_node; |
26 | 26 | ||
27 | static struct of_device_id child_match[] = { | 27 | static const struct of_device_id child_match[] = { |
28 | { | 28 | { |
29 | .compatible = "sgy,gpio-halt", | 29 | .compatible = "sgy,gpio-halt", |
30 | }, | 30 | }, |
@@ -147,7 +147,7 @@ static int gpio_halt_remove(struct platform_device *pdev) | |||
147 | return 0; | 147 | return 0; |
148 | } | 148 | } |
149 | 149 | ||
150 | static struct of_device_id gpio_halt_match[] = { | 150 | static const struct of_device_id gpio_halt_match[] = { |
151 | /* We match on the gpio bus itself and scan the children since they | 151 | /* We match on the gpio bus itself and scan the children since they |
152 | * wont be matched against us. We know the bus wont match until it | 152 | * wont be matched against us. We know the bus wont match until it |
153 | * has been registered too. */ | 153 | * has been registered too. */ |
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c index c23f3443880a..bf17933b20f3 100644 --- a/arch/powerpc/platforms/86xx/gef_ppc9a.c +++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c | |||
@@ -213,7 +213,7 @@ static long __init mpc86xx_time_init(void) | |||
213 | return 0; | 213 | return 0; |
214 | } | 214 | } |
215 | 215 | ||
216 | static __initdata struct of_device_id of_bus_ids[] = { | 216 | static const struct of_device_id of_bus_ids[] __initconst = { |
217 | { .compatible = "simple-bus", }, | 217 | { .compatible = "simple-bus", }, |
218 | { .compatible = "gianfar", }, | 218 | { .compatible = "gianfar", }, |
219 | { .compatible = "fsl,mpc8641-pcie", }, | 219 | { .compatible = "fsl,mpc8641-pcie", }, |
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c index 8a6ac20686ea..8facf5873866 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc310.c +++ b/arch/powerpc/platforms/86xx/gef_sbc310.c | |||
@@ -200,7 +200,7 @@ static long __init mpc86xx_time_init(void) | |||
200 | return 0; | 200 | return 0; |
201 | } | 201 | } |
202 | 202 | ||
203 | static __initdata struct of_device_id of_bus_ids[] = { | 203 | static const struct of_device_id of_bus_ids[] __initconst = { |
204 | { .compatible = "simple-bus", }, | 204 | { .compatible = "simple-bus", }, |
205 | { .compatible = "gianfar", }, | 205 | { .compatible = "gianfar", }, |
206 | { .compatible = "fsl,mpc8641-pcie", }, | 206 | { .compatible = "fsl,mpc8641-pcie", }, |
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c index 06c72636f299..8c9058df5642 100644 --- a/arch/powerpc/platforms/86xx/gef_sbc610.c +++ b/arch/powerpc/platforms/86xx/gef_sbc610.c | |||
@@ -190,7 +190,7 @@ static long __init mpc86xx_time_init(void) | |||
190 | return 0; | 190 | return 0; |
191 | } | 191 | } |
192 | 192 | ||
193 | static __initdata struct of_device_id of_bus_ids[] = { | 193 | static const struct of_device_id of_bus_ids[] __initconst = { |
194 | { .compatible = "simple-bus", }, | 194 | { .compatible = "simple-bus", }, |
195 | { .compatible = "gianfar", }, | 195 | { .compatible = "gianfar", }, |
196 | { .compatible = "fsl,mpc8641-pcie", }, | 196 | { .compatible = "fsl,mpc8641-pcie", }, |
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index d479d68fbb2b..55413a547ea8 100644 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c | |||
@@ -85,7 +85,7 @@ static void __init mpc8610_suspend_init(void) | |||
85 | static inline void mpc8610_suspend_init(void) { } | 85 | static inline void mpc8610_suspend_init(void) { } |
86 | #endif /* CONFIG_SUSPEND */ | 86 | #endif /* CONFIG_SUSPEND */ |
87 | 87 | ||
88 | static struct of_device_id __initdata mpc8610_ids[] = { | 88 | static const struct of_device_id mpc8610_ids[] __initconst = { |
89 | { .compatible = "fsl,mpc8610-immr", }, | 89 | { .compatible = "fsl,mpc8610-immr", }, |
90 | { .compatible = "fsl,mpc8610-guts", }, | 90 | { .compatible = "fsl,mpc8610-guts", }, |
91 | { .compatible = "simple-bus", }, | 91 | { .compatible = "simple-bus", }, |
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index e8bf3fae5606..07ccb1b0cc7d 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c | |||
@@ -127,7 +127,7 @@ mpc86xx_time_init(void) | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static __initdata struct of_device_id of_bus_ids[] = { | 130 | static const struct of_device_id of_bus_ids[] __initconst = { |
131 | { .compatible = "simple-bus", }, | 131 | { .compatible = "simple-bus", }, |
132 | { .compatible = "fsl,srio", }, | 132 | { .compatible = "fsl,srio", }, |
133 | { .compatible = "gianfar", }, | 133 | { .compatible = "gianfar", }, |
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c index b47a8fd0f3d3..6810b71d54a7 100644 --- a/arch/powerpc/platforms/86xx/sbc8641d.c +++ b/arch/powerpc/platforms/86xx/sbc8641d.c | |||
@@ -92,7 +92,7 @@ mpc86xx_time_init(void) | |||
92 | return 0; | 92 | return 0; |
93 | } | 93 | } |
94 | 94 | ||
95 | static __initdata struct of_device_id of_bus_ids[] = { | 95 | static const struct of_device_id of_bus_ids[] __initconst = { |
96 | { .compatible = "simple-bus", }, | 96 | { .compatible = "simple-bus", }, |
97 | { .compatible = "gianfar", }, | 97 | { .compatible = "gianfar", }, |
98 | { .compatible = "fsl,mpc8641-pcie", }, | 98 | { .compatible = "fsl,mpc8641-pcie", }, |
diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c index 82363e98f50e..61cae4c1edb8 100644 --- a/arch/powerpc/platforms/8xx/adder875.c +++ b/arch/powerpc/platforms/8xx/adder875.c | |||
@@ -92,7 +92,7 @@ static int __init adder875_probe(void) | |||
92 | return of_flat_dt_is_compatible(root, "analogue-and-micro,adder875"); | 92 | return of_flat_dt_is_compatible(root, "analogue-and-micro,adder875"); |
93 | } | 93 | } |
94 | 94 | ||
95 | static __initdata struct of_device_id of_bus_ids[] = { | 95 | static const struct of_device_id of_bus_ids[] __initconst = { |
96 | { .compatible = "simple-bus", }, | 96 | { .compatible = "simple-bus", }, |
97 | {}, | 97 | {}, |
98 | }; | 98 | }; |
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c index e62166681d08..2bedeb7d5f8f 100644 --- a/arch/powerpc/platforms/8xx/ep88xc.c +++ b/arch/powerpc/platforms/8xx/ep88xc.c | |||
@@ -147,7 +147,7 @@ static int __init ep88xc_probe(void) | |||
147 | return of_flat_dt_is_compatible(root, "fsl,ep88xc"); | 147 | return of_flat_dt_is_compatible(root, "fsl,ep88xc"); |
148 | } | 148 | } |
149 | 149 | ||
150 | static struct of_device_id __initdata of_bus_ids[] = { | 150 | static const struct of_device_id of_bus_ids[] __initconst = { |
151 | { .name = "soc", }, | 151 | { .name = "soc", }, |
152 | { .name = "cpm", }, | 152 | { .name = "cpm", }, |
153 | { .name = "localbus", }, | 153 | { .name = "localbus", }, |
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c index 63084640c5c5..78180c5e73ff 100644 --- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c | |||
@@ -122,7 +122,7 @@ static int __init mpc86xads_probe(void) | |||
122 | return of_flat_dt_is_compatible(root, "fsl,mpc866ads"); | 122 | return of_flat_dt_is_compatible(root, "fsl,mpc866ads"); |
123 | } | 123 | } |
124 | 124 | ||
125 | static struct of_device_id __initdata of_bus_ids[] = { | 125 | static const struct of_device_id of_bus_ids[] __initconst = { |
126 | { .name = "soc", }, | 126 | { .name = "soc", }, |
127 | { .name = "cpm", }, | 127 | { .name = "cpm", }, |
128 | { .name = "localbus", }, | 128 | { .name = "localbus", }, |
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c index 5921dcb498fd..4d62bf9dc789 100644 --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c | |||
@@ -197,7 +197,7 @@ static int __init mpc885ads_probe(void) | |||
197 | return of_flat_dt_is_compatible(root, "fsl,mpc885ads"); | 197 | return of_flat_dt_is_compatible(root, "fsl,mpc885ads"); |
198 | } | 198 | } |
199 | 199 | ||
200 | static struct of_device_id __initdata of_bus_ids[] = { | 200 | static const struct of_device_id of_bus_ids[] __initconst = { |
201 | { .name = "soc", }, | 201 | { .name = "soc", }, |
202 | { .name = "cpm", }, | 202 | { .name = "cpm", }, |
203 | { .name = "localbus", }, | 203 | { .name = "localbus", }, |
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c index dda607807def..bee47a2b23e6 100644 --- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c +++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c | |||
@@ -124,7 +124,7 @@ static int __init tqm8xx_probe(void) | |||
124 | return of_flat_dt_is_compatible(node, "tqc,tqm8xx"); | 124 | return of_flat_dt_is_compatible(node, "tqc,tqm8xx"); |
125 | } | 125 | } |
126 | 126 | ||
127 | static struct of_device_id __initdata of_bus_ids[] = { | 127 | static const struct of_device_id of_bus_ids[] __initconst = { |
128 | { .name = "soc", }, | 128 | { .name = "soc", }, |
129 | { .name = "cpm", }, | 129 | { .name = "cpm", }, |
130 | { .name = "localbus", }, | 130 | { .name = "localbus", }, |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 7d9ee3d8c618..76483e3acd60 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -116,6 +116,12 @@ config POWER6_CPU | |||
116 | config POWER7_CPU | 116 | config POWER7_CPU |
117 | bool "POWER7" | 117 | bool "POWER7" |
118 | depends on PPC_BOOK3S_64 | 118 | depends on PPC_BOOK3S_64 |
119 | select ARCH_HAS_FAST_MULTIPLIER | ||
120 | |||
121 | config POWER8_CPU | ||
122 | bool "POWER8" | ||
123 | depends on PPC_BOOK3S_64 | ||
124 | select ARCH_HAS_FAST_MULTIPLIER | ||
119 | 125 | ||
120 | config E5500_CPU | 126 | config E5500_CPU |
121 | bool "Freescale e5500" | 127 | bool "Freescale e5500" |
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 9978f594cac0..870b6dbd4d18 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig | |||
@@ -86,6 +86,7 @@ config SPU_FS_64K_LS | |||
86 | config SPU_BASE | 86 | config SPU_BASE |
87 | bool | 87 | bool |
88 | default n | 88 | default n |
89 | select PPC_COPRO_BASE | ||
89 | 90 | ||
90 | config CBE_RAS | 91 | config CBE_RAS |
91 | bool "RAS features for bare metal Cell BE" | 92 | bool "RAS features for bare metal Cell BE" |
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index fe053e7c73ee..2d16884f67b9 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile | |||
@@ -20,7 +20,7 @@ spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o | |||
20 | 20 | ||
21 | obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ | 21 | obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ |
22 | spu_notify.o \ | 22 | spu_notify.o \ |
23 | spu_syscalls.o spu_fault.o \ | 23 | spu_syscalls.o \ |
24 | $(spu-priv1-y) \ | 24 | $(spu-priv1-y) \ |
25 | $(spu-manage-y) \ | 25 | $(spu-manage-y) \ |
26 | spufs/ | 26 | spufs/ |
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c index 173568140a32..2b98a36ef8fb 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.c +++ b/arch/powerpc/platforms/cell/celleb_pci.c | |||
@@ -454,7 +454,7 @@ static struct celleb_phb_spec celleb_fake_pci_spec __initdata = { | |||
454 | .setup = celleb_setup_fake_pci, | 454 | .setup = celleb_setup_fake_pci, |
455 | }; | 455 | }; |
456 | 456 | ||
457 | static struct of_device_id celleb_phb_match[] __initdata = { | 457 | static const struct of_device_id celleb_phb_match[] __initconst = { |
458 | { | 458 | { |
459 | .name = "pci-pseudo", | 459 | .name = "pci-pseudo", |
460 | .data = &celleb_fake_pci_spec, | 460 | .data = &celleb_fake_pci_spec, |
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c index 1d5a4d8ddad9..34e8ce2976aa 100644 --- a/arch/powerpc/platforms/cell/celleb_setup.c +++ b/arch/powerpc/platforms/cell/celleb_setup.c | |||
@@ -102,7 +102,7 @@ static void __init celleb_setup_arch_common(void) | |||
102 | #endif | 102 | #endif |
103 | } | 103 | } |
104 | 104 | ||
105 | static struct of_device_id celleb_bus_ids[] __initdata = { | 105 | static const struct of_device_id celleb_bus_ids[] __initconst = { |
106 | { .type = "scc", }, | 106 | { .type = "scc", }, |
107 | { .type = "ioif", }, /* old style */ | 107 | { .type = "ioif", }, /* old style */ |
108 | {}, | 108 | {}, |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 2930d1e81a05..ffcbd242e669 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -76,10 +76,6 @@ static LIST_HEAD(spu_full_list); | |||
76 | static DEFINE_SPINLOCK(spu_full_list_lock); | 76 | static DEFINE_SPINLOCK(spu_full_list_lock); |
77 | static DEFINE_MUTEX(spu_full_list_mutex); | 77 | static DEFINE_MUTEX(spu_full_list_mutex); |
78 | 78 | ||
79 | struct spu_slb { | ||
80 | u64 esid, vsid; | ||
81 | }; | ||
82 | |||
83 | void spu_invalidate_slbs(struct spu *spu) | 79 | void spu_invalidate_slbs(struct spu *spu) |
84 | { | 80 | { |
85 | struct spu_priv2 __iomem *priv2 = spu->priv2; | 81 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
@@ -149,7 +145,7 @@ static void spu_restart_dma(struct spu *spu) | |||
149 | } | 145 | } |
150 | } | 146 | } |
151 | 147 | ||
152 | static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) | 148 | static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb) |
153 | { | 149 | { |
154 | struct spu_priv2 __iomem *priv2 = spu->priv2; | 150 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
155 | 151 | ||
@@ -167,45 +163,12 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) | |||
167 | 163 | ||
168 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | 164 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
169 | { | 165 | { |
170 | struct mm_struct *mm = spu->mm; | 166 | struct copro_slb slb; |
171 | struct spu_slb slb; | 167 | int ret; |
172 | int psize; | ||
173 | |||
174 | pr_debug("%s\n", __func__); | ||
175 | |||
176 | slb.esid = (ea & ESID_MASK) | SLB_ESID_V; | ||
177 | 168 | ||
178 | switch(REGION_ID(ea)) { | 169 | ret = copro_calculate_slb(spu->mm, ea, &slb); |
179 | case USER_REGION_ID: | 170 | if (ret) |
180 | #ifdef CONFIG_PPC_MM_SLICES | 171 | return ret; |
181 | psize = get_slice_psize(mm, ea); | ||
182 | #else | ||
183 | psize = mm->context.user_psize; | ||
184 | #endif | ||
185 | slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) | ||
186 | << SLB_VSID_SHIFT) | SLB_VSID_USER; | ||
187 | break; | ||
188 | case VMALLOC_REGION_ID: | ||
189 | if (ea < VMALLOC_END) | ||
190 | psize = mmu_vmalloc_psize; | ||
191 | else | ||
192 | psize = mmu_io_psize; | ||
193 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) | ||
194 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | ||
195 | break; | ||
196 | case KERNEL_REGION_ID: | ||
197 | psize = mmu_linear_psize; | ||
198 | slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) | ||
199 | << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | ||
200 | break; | ||
201 | default: | ||
202 | /* Future: support kernel segments so that drivers | ||
203 | * can use SPUs. | ||
204 | */ | ||
205 | pr_debug("invalid region access at %016lx\n", ea); | ||
206 | return 1; | ||
207 | } | ||
208 | slb.vsid |= mmu_psize_defs[psize].sllp; | ||
209 | 172 | ||
210 | spu_load_slb(spu, spu->slb_replace, &slb); | 173 | spu_load_slb(spu, spu->slb_replace, &slb); |
211 | 174 | ||
@@ -253,7 +216,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |||
253 | return 0; | 216 | return 0; |
254 | } | 217 | } |
255 | 218 | ||
256 | static void __spu_kernel_slb(void *addr, struct spu_slb *slb) | 219 | static void __spu_kernel_slb(void *addr, struct copro_slb *slb) |
257 | { | 220 | { |
258 | unsigned long ea = (unsigned long)addr; | 221 | unsigned long ea = (unsigned long)addr; |
259 | u64 llp; | 222 | u64 llp; |
@@ -272,7 +235,7 @@ static void __spu_kernel_slb(void *addr, struct spu_slb *slb) | |||
272 | * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the | 235 | * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the |
273 | * address @new_addr is present. | 236 | * address @new_addr is present. |
274 | */ | 237 | */ |
275 | static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, | 238 | static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, |
276 | void *new_addr) | 239 | void *new_addr) |
277 | { | 240 | { |
278 | unsigned long ea = (unsigned long)new_addr; | 241 | unsigned long ea = (unsigned long)new_addr; |
@@ -297,7 +260,7 @@ static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, | |||
297 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, | 260 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, |
298 | void *code, int code_size) | 261 | void *code, int code_size) |
299 | { | 262 | { |
300 | struct spu_slb slbs[4]; | 263 | struct copro_slb slbs[4]; |
301 | int i, nr_slbs = 0; | 264 | int i, nr_slbs = 0; |
302 | /* start and end addresses of both mappings */ | 265 | /* start and end addresses of both mappings */ |
303 | void *addrs[] = { | 266 | void *addrs[] = { |
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c index 8cb6260cc80f..e45894a08118 100644 --- a/arch/powerpc/platforms/cell/spufs/fault.c +++ b/arch/powerpc/platforms/cell/spufs/fault.c | |||
@@ -138,7 +138,7 @@ int spufs_handle_class1(struct spu_context *ctx) | |||
138 | if (ctx->state == SPU_STATE_RUNNABLE) | 138 | if (ctx->state == SPU_STATE_RUNNABLE) |
139 | ctx->spu->stats.hash_flt++; | 139 | ctx->spu->stats.hash_flt++; |
140 | 140 | ||
141 | /* we must not hold the lock when entering spu_handle_mm_fault */ | 141 | /* we must not hold the lock when entering copro_handle_mm_fault */ |
142 | spu_release(ctx); | 142 | spu_release(ctx); |
143 | 143 | ||
144 | access = (_PAGE_PRESENT | _PAGE_USER); | 144 | access = (_PAGE_PRESENT | _PAGE_USER); |
@@ -149,7 +149,7 @@ int spufs_handle_class1(struct spu_context *ctx) | |||
149 | 149 | ||
150 | /* hashing failed, so try the actual fault handler */ | 150 | /* hashing failed, so try the actual fault handler */ |
151 | if (ret) | 151 | if (ret) |
152 | ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt); | 152 | ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); |
153 | 153 | ||
154 | /* | 154 | /* |
155 | * This is nasty: we need the state_mutex for all the bookkeeping even | 155 | * This is nasty: we need the state_mutex for all the bookkeeping even |
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 7044fd36197b..5b77b1919fd2 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -258,7 +258,7 @@ static void chrp_init_early(void) | |||
258 | struct device_node *node; | 258 | struct device_node *node; |
259 | const char *property; | 259 | const char *property; |
260 | 260 | ||
261 | if (strstr(cmd_line, "console=")) | 261 | if (strstr(boot_command_line, "console=")) |
262 | return; | 262 | return; |
263 | /* find the boot console from /chosen/stdout */ | 263 | /* find the boot console from /chosen/stdout */ |
264 | if (!of_chosen) | 264 | if (!of_chosen) |
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c index a138e14bad2e..bd4ba5d7d568 100644 --- a/arch/powerpc/platforms/embedded6xx/gamecube.c +++ b/arch/powerpc/platforms/embedded6xx/gamecube.c | |||
@@ -90,7 +90,7 @@ define_machine(gamecube) { | |||
90 | }; | 90 | }; |
91 | 91 | ||
92 | 92 | ||
93 | static struct of_device_id gamecube_of_bus[] = { | 93 | static const struct of_device_id gamecube_of_bus[] = { |
94 | { .compatible = "nintendo,flipper", }, | 94 | { .compatible = "nintendo,flipper", }, |
95 | { }, | 95 | { }, |
96 | }; | 96 | }; |
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index 455e7c087422..168e1d80b2e5 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | #include "mpc10x.h" | 22 | #include "mpc10x.h" |
23 | 23 | ||
24 | static __initdata struct of_device_id of_bus_ids[] = { | 24 | static const struct of_device_id of_bus_ids[] __initconst = { |
25 | { .type = "soc", }, | 25 | { .type = "soc", }, |
26 | { .compatible = "simple-bus", }, | 26 | { .compatible = "simple-bus", }, |
27 | {}, | 27 | {}, |
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c index 25e3bfb64efb..1613303177e6 100644 --- a/arch/powerpc/platforms/embedded6xx/mvme5100.c +++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c | |||
@@ -149,7 +149,7 @@ static int __init mvme5100_add_bridge(struct device_node *dev) | |||
149 | return 0; | 149 | return 0; |
150 | } | 150 | } |
151 | 151 | ||
152 | static struct of_device_id mvme5100_of_bus_ids[] __initdata = { | 152 | static const struct of_device_id mvme5100_of_bus_ids[] __initconst = { |
153 | { .compatible = "hawk-bridge", }, | 153 | { .compatible = "hawk-bridge", }, |
154 | {}, | 154 | {}, |
155 | }; | 155 | }; |
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c index c458b60d14c4..d572833ebd00 100644 --- a/arch/powerpc/platforms/embedded6xx/storcenter.c +++ b/arch/powerpc/platforms/embedded6xx/storcenter.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include "mpc10x.h" | 24 | #include "mpc10x.h" |
25 | 25 | ||
26 | 26 | ||
27 | static __initdata struct of_device_id storcenter_of_bus[] = { | 27 | static const struct of_device_id storcenter_of_bus[] __initconst = { |
28 | { .name = "soc", }, | 28 | { .name = "soc", }, |
29 | {}, | 29 | {}, |
30 | }; | 30 | }; |
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 6d8dadf19f0b..388e29bab8f6 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c | |||
@@ -235,7 +235,7 @@ define_machine(wii) { | |||
235 | .machine_shutdown = wii_shutdown, | 235 | .machine_shutdown = wii_shutdown, |
236 | }; | 236 | }; |
237 | 237 | ||
238 | static struct of_device_id wii_of_bus[] = { | 238 | static const struct of_device_id wii_of_bus[] = { |
239 | { .compatible = "nintendo,hollywood", }, | 239 | { .compatible = "nintendo,hollywood", }, |
240 | { }, | 240 | { }, |
241 | }; | 241 | }; |
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index 15adee544638..ada33358950d 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c | |||
@@ -290,7 +290,7 @@ static int gpio_mdio_remove(struct platform_device *dev) | |||
290 | return 0; | 290 | return 0; |
291 | } | 291 | } |
292 | 292 | ||
293 | static struct of_device_id gpio_mdio_match[] = | 293 | static const struct of_device_id gpio_mdio_match[] = |
294 | { | 294 | { |
295 | { | 295 | { |
296 | .compatible = "gpio-mdio", | 296 | .compatible = "gpio-mdio", |
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 8c54de6d8ec4..d71b2c7e8403 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c | |||
@@ -393,7 +393,7 @@ static inline void pasemi_pcmcia_init(void) | |||
393 | #endif | 393 | #endif |
394 | 394 | ||
395 | 395 | ||
396 | static struct of_device_id pasemi_bus_ids[] = { | 396 | static const struct of_device_id pasemi_bus_ids[] = { |
397 | /* Unfortunately needed for legacy firmwares */ | 397 | /* Unfortunately needed for legacy firmwares */ |
398 | { .type = "localbus", }, | 398 | { .type = "localbus", }, |
399 | { .type = "sdc", }, | 399 | { .type = "sdc", }, |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 141f8899a633..b127a29ac526 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -336,7 +336,7 @@ static void __init pmac_setup_arch(void) | |||
336 | #endif | 336 | #endif |
337 | 337 | ||
338 | #ifdef CONFIG_ADB | 338 | #ifdef CONFIG_ADB |
339 | if (strstr(cmd_line, "adb_sync")) { | 339 | if (strstr(boot_command_line, "adb_sync")) { |
340 | extern int __adb_probe_sync; | 340 | extern int __adb_probe_sync; |
341 | __adb_probe_sync = 1; | 341 | __adb_probe_sync = 1; |
342 | } | 342 | } |
@@ -460,7 +460,7 @@ pmac_halt(void) | |||
460 | static void __init pmac_init_early(void) | 460 | static void __init pmac_init_early(void) |
461 | { | 461 | { |
462 | /* Enable early btext debug if requested */ | 462 | /* Enable early btext debug if requested */ |
463 | if (strstr(cmd_line, "btextdbg")) { | 463 | if (strstr(boot_command_line, "btextdbg")) { |
464 | udbg_adb_init_early(); | 464 | udbg_adb_init_early(); |
465 | register_early_udbg_console(); | 465 | register_early_udbg_console(); |
466 | } | 466 | } |
@@ -469,8 +469,8 @@ static void __init pmac_init_early(void) | |||
469 | pmac_feature_init(); | 469 | pmac_feature_init(); |
470 | 470 | ||
471 | /* Initialize debug stuff */ | 471 | /* Initialize debug stuff */ |
472 | udbg_scc_init(!!strstr(cmd_line, "sccdbg")); | 472 | udbg_scc_init(!!strstr(boot_command_line, "sccdbg")); |
473 | udbg_adb_init(!!strstr(cmd_line, "btextdbg")); | 473 | udbg_adb_init(!!strstr(boot_command_line, "btextdbg")); |
474 | 474 | ||
475 | #ifdef CONFIG_PPC64 | 475 | #ifdef CONFIG_PPC64 |
476 | iommu_init_early_dart(); | 476 | iommu_init_early_dart(); |
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index c945bed4dc9e..426814a2ede3 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
@@ -66,6 +66,54 @@ static struct notifier_block ioda_eeh_nb = { | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | #ifdef CONFIG_DEBUG_FS | 68 | #ifdef CONFIG_DEBUG_FS |
69 | static ssize_t ioda_eeh_ei_write(struct file *filp, | ||
70 | const char __user *user_buf, | ||
71 | size_t count, loff_t *ppos) | ||
72 | { | ||
73 | struct pci_controller *hose = filp->private_data; | ||
74 | struct pnv_phb *phb = hose->private_data; | ||
75 | struct eeh_dev *edev; | ||
76 | struct eeh_pe *pe; | ||
77 | int pe_no, type, func; | ||
78 | unsigned long addr, mask; | ||
79 | char buf[50]; | ||
80 | int ret; | ||
81 | |||
82 | if (!phb->eeh_ops || !phb->eeh_ops->err_inject) | ||
83 | return -ENXIO; | ||
84 | |||
85 | ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); | ||
86 | if (!ret) | ||
87 | return -EFAULT; | ||
88 | |||
89 | /* Retrieve parameters */ | ||
90 | ret = sscanf(buf, "%x:%x:%x:%lx:%lx", | ||
91 | &pe_no, &type, &func, &addr, &mask); | ||
92 | if (ret != 5) | ||
93 | return -EINVAL; | ||
94 | |||
95 | /* Retrieve PE */ | ||
96 | edev = kzalloc(sizeof(*edev), GFP_KERNEL); | ||
97 | if (!edev) | ||
98 | return -ENOMEM; | ||
99 | edev->phb = hose; | ||
100 | edev->pe_config_addr = pe_no; | ||
101 | pe = eeh_pe_get(edev); | ||
102 | kfree(edev); | ||
103 | if (!pe) | ||
104 | return -ENODEV; | ||
105 | |||
106 | /* Do error injection */ | ||
107 | ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask); | ||
108 | return ret < 0 ? ret : count; | ||
109 | } | ||
110 | |||
111 | static const struct file_operations ioda_eeh_ei_fops = { | ||
112 | .open = simple_open, | ||
113 | .llseek = no_llseek, | ||
114 | .write = ioda_eeh_ei_write, | ||
115 | }; | ||
116 | |||
69 | static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val) | 117 | static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val) |
70 | { | 118 | { |
71 | struct pci_controller *hose = data; | 119 | struct pci_controller *hose = data; |
@@ -152,6 +200,10 @@ static int ioda_eeh_post_init(struct pci_controller *hose) | |||
152 | if (!phb->has_dbgfs && phb->dbgfs) { | 200 | if (!phb->has_dbgfs && phb->dbgfs) { |
153 | phb->has_dbgfs = 1; | 201 | phb->has_dbgfs = 1; |
154 | 202 | ||
203 | debugfs_create_file("err_injct", 0200, | ||
204 | phb->dbgfs, hose, | ||
205 | &ioda_eeh_ei_fops); | ||
206 | |||
155 | debugfs_create_file("err_injct_outbound", 0600, | 207 | debugfs_create_file("err_injct_outbound", 0600, |
156 | phb->dbgfs, hose, | 208 | phb->dbgfs, hose, |
157 | &ioda_eeh_outb_dbgfs_ops); | 209 | &ioda_eeh_outb_dbgfs_ops); |
@@ -189,6 +241,7 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option) | |||
189 | { | 241 | { |
190 | struct pci_controller *hose = pe->phb; | 242 | struct pci_controller *hose = pe->phb; |
191 | struct pnv_phb *phb = hose->private_data; | 243 | struct pnv_phb *phb = hose->private_data; |
244 | bool freeze_pe = false; | ||
192 | int enable, ret = 0; | 245 | int enable, ret = 0; |
193 | s64 rc; | 246 | s64 rc; |
194 | 247 | ||
@@ -212,6 +265,10 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option) | |||
212 | case EEH_OPT_THAW_DMA: | 265 | case EEH_OPT_THAW_DMA: |
213 | enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; | 266 | enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; |
214 | break; | 267 | break; |
268 | case EEH_OPT_FREEZE_PE: | ||
269 | freeze_pe = true; | ||
270 | enable = OPAL_EEH_ACTION_SET_FREEZE_ALL; | ||
271 | break; | ||
215 | default: | 272 | default: |
216 | pr_warn("%s: Invalid option %d\n", | 273 | pr_warn("%s: Invalid option %d\n", |
217 | __func__, option); | 274 | __func__, option); |
@@ -219,17 +276,35 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option) | |||
219 | } | 276 | } |
220 | 277 | ||
221 | /* If PHB supports compound PE, to handle it */ | 278 | /* If PHB supports compound PE, to handle it */ |
222 | if (phb->unfreeze_pe) { | 279 | if (freeze_pe) { |
223 | ret = phb->unfreeze_pe(phb, pe->addr, enable); | 280 | if (phb->freeze_pe) { |
281 | phb->freeze_pe(phb, pe->addr); | ||
282 | } else { | ||
283 | rc = opal_pci_eeh_freeze_set(phb->opal_id, | ||
284 | pe->addr, | ||
285 | enable); | ||
286 | if (rc != OPAL_SUCCESS) { | ||
287 | pr_warn("%s: Failure %lld freezing " | ||
288 | "PHB#%x-PE#%x\n", | ||
289 | __func__, rc, | ||
290 | phb->hose->global_number, pe->addr); | ||
291 | ret = -EIO; | ||
292 | } | ||
293 | } | ||
224 | } else { | 294 | } else { |
225 | rc = opal_pci_eeh_freeze_clear(phb->opal_id, | 295 | if (phb->unfreeze_pe) { |
226 | pe->addr, | 296 | ret = phb->unfreeze_pe(phb, pe->addr, enable); |
227 | enable); | 297 | } else { |
228 | if (rc != OPAL_SUCCESS) { | 298 | rc = opal_pci_eeh_freeze_clear(phb->opal_id, |
229 | pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n", | 299 | pe->addr, |
230 | __func__, rc, option, phb->hose->global_number, | 300 | enable); |
231 | pe->addr); | 301 | if (rc != OPAL_SUCCESS) { |
232 | ret = -EIO; | 302 | pr_warn("%s: Failure %lld enable %d " |
303 | "for PHB#%x-PE#%x\n", | ||
304 | __func__, rc, option, | ||
305 | phb->hose->global_number, pe->addr); | ||
306 | ret = -EIO; | ||
307 | } | ||
233 | } | 308 | } |
234 | } | 309 | } |
235 | 310 | ||
@@ -439,11 +514,11 @@ int ioda_eeh_phb_reset(struct pci_controller *hose, int option) | |||
439 | if (option == EEH_RESET_FUNDAMENTAL || | 514 | if (option == EEH_RESET_FUNDAMENTAL || |
440 | option == EEH_RESET_HOT) | 515 | option == EEH_RESET_HOT) |
441 | rc = opal_pci_reset(phb->opal_id, | 516 | rc = opal_pci_reset(phb->opal_id, |
442 | OPAL_PHB_COMPLETE, | 517 | OPAL_RESET_PHB_COMPLETE, |
443 | OPAL_ASSERT_RESET); | 518 | OPAL_ASSERT_RESET); |
444 | else if (option == EEH_RESET_DEACTIVATE) | 519 | else if (option == EEH_RESET_DEACTIVATE) |
445 | rc = opal_pci_reset(phb->opal_id, | 520 | rc = opal_pci_reset(phb->opal_id, |
446 | OPAL_PHB_COMPLETE, | 521 | OPAL_RESET_PHB_COMPLETE, |
447 | OPAL_DEASSERT_RESET); | 522 | OPAL_DEASSERT_RESET); |
448 | if (rc < 0) | 523 | if (rc < 0) |
449 | goto out; | 524 | goto out; |
@@ -483,15 +558,15 @@ static int ioda_eeh_root_reset(struct pci_controller *hose, int option) | |||
483 | */ | 558 | */ |
484 | if (option == EEH_RESET_FUNDAMENTAL) | 559 | if (option == EEH_RESET_FUNDAMENTAL) |
485 | rc = opal_pci_reset(phb->opal_id, | 560 | rc = opal_pci_reset(phb->opal_id, |
486 | OPAL_PCI_FUNDAMENTAL_RESET, | 561 | OPAL_RESET_PCI_FUNDAMENTAL, |
487 | OPAL_ASSERT_RESET); | 562 | OPAL_ASSERT_RESET); |
488 | else if (option == EEH_RESET_HOT) | 563 | else if (option == EEH_RESET_HOT) |
489 | rc = opal_pci_reset(phb->opal_id, | 564 | rc = opal_pci_reset(phb->opal_id, |
490 | OPAL_PCI_HOT_RESET, | 565 | OPAL_RESET_PCI_HOT, |
491 | OPAL_ASSERT_RESET); | 566 | OPAL_ASSERT_RESET); |
492 | else if (option == EEH_RESET_DEACTIVATE) | 567 | else if (option == EEH_RESET_DEACTIVATE) |
493 | rc = opal_pci_reset(phb->opal_id, | 568 | rc = opal_pci_reset(phb->opal_id, |
494 | OPAL_PCI_HOT_RESET, | 569 | OPAL_RESET_PCI_HOT, |
495 | OPAL_DEASSERT_RESET); | 570 | OPAL_DEASSERT_RESET); |
496 | if (rc < 0) | 571 | if (rc < 0) |
497 | goto out; | 572 | goto out; |
@@ -607,6 +682,31 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) | |||
607 | if (pe->type & EEH_PE_PHB) { | 682 | if (pe->type & EEH_PE_PHB) { |
608 | ret = ioda_eeh_phb_reset(hose, option); | 683 | ret = ioda_eeh_phb_reset(hose, option); |
609 | } else { | 684 | } else { |
685 | struct pnv_phb *phb; | ||
686 | s64 rc; | ||
687 | |||
688 | /* | ||
689 | * The frozen PE might be caused by PAPR error injection | ||
690 | * registers, which are expected to be cleared after hitting | ||
691 | * frozen PE as stated in the hardware spec. Unfortunately, | ||
692 | * that's not true on P7IOC. So we have to clear it manually | ||
693 | * to avoid recursive EEH errors during recovery. | ||
694 | */ | ||
695 | phb = hose->private_data; | ||
696 | if (phb->model == PNV_PHB_MODEL_P7IOC && | ||
697 | (option == EEH_RESET_HOT || | ||
698 | option == EEH_RESET_FUNDAMENTAL)) { | ||
699 | rc = opal_pci_reset(phb->opal_id, | ||
700 | OPAL_RESET_PHB_ERROR, | ||
701 | OPAL_ASSERT_RESET); | ||
702 | if (rc != OPAL_SUCCESS) { | ||
703 | pr_warn("%s: Failure %lld clearing " | ||
704 | "error injection registers\n", | ||
705 | __func__, rc); | ||
706 | return -EIO; | ||
707 | } | ||
708 | } | ||
709 | |||
610 | bus = eeh_pe_bus_get(pe); | 710 | bus = eeh_pe_bus_get(pe); |
611 | if (pci_is_root_bus(bus) || | 711 | if (pci_is_root_bus(bus) || |
612 | pci_is_root_bus(bus->parent)) | 712 | pci_is_root_bus(bus->parent)) |
@@ -628,8 +728,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) | |||
628 | * Retrieve error log, which contains log from device driver | 728 | * Retrieve error log, which contains log from device driver |
629 | * and firmware. | 729 | * and firmware. |
630 | */ | 730 | */ |
631 | int ioda_eeh_get_log(struct eeh_pe *pe, int severity, | 731 | static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, |
632 | char *drv_log, unsigned long len) | 732 | char *drv_log, unsigned long len) |
633 | { | 733 | { |
634 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); | 734 | pnv_pci_dump_phb_diag_data(pe->phb, pe->data); |
635 | 735 | ||
@@ -650,6 +750,49 @@ static int ioda_eeh_configure_bridge(struct eeh_pe *pe) | |||
650 | return 0; | 750 | return 0; |
651 | } | 751 | } |
652 | 752 | ||
753 | static int ioda_eeh_err_inject(struct eeh_pe *pe, int type, int func, | ||
754 | unsigned long addr, unsigned long mask) | ||
755 | { | ||
756 | struct pci_controller *hose = pe->phb; | ||
757 | struct pnv_phb *phb = hose->private_data; | ||
758 | s64 ret; | ||
759 | |||
760 | /* Sanity check on error type */ | ||
761 | if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && | ||
762 | type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { | ||
763 | pr_warn("%s: Invalid error type %d\n", | ||
764 | __func__, type); | ||
765 | return -ERANGE; | ||
766 | } | ||
767 | |||
768 | if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || | ||
769 | func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { | ||
770 | pr_warn("%s: Invalid error function %d\n", | ||
771 | __func__, func); | ||
772 | return -ERANGE; | ||
773 | } | ||
774 | |||
775 | /* Firmware supports error injection ? */ | ||
776 | if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { | ||
777 | pr_warn("%s: Firmware doesn't support error injection\n", | ||
778 | __func__); | ||
779 | return -ENXIO; | ||
780 | } | ||
781 | |||
782 | /* Do error injection */ | ||
783 | ret = opal_pci_err_inject(phb->opal_id, pe->addr, | ||
784 | type, func, addr, mask); | ||
785 | if (ret != OPAL_SUCCESS) { | ||
786 | pr_warn("%s: Failure %lld injecting error " | ||
787 | "%d-%d to PHB#%x-PE#%x\n", | ||
788 | __func__, ret, type, func, | ||
789 | hose->global_number, pe->addr); | ||
790 | return -EIO; | ||
791 | } | ||
792 | |||
793 | return 0; | ||
794 | } | ||
795 | |||
653 | static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) | 796 | static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) |
654 | { | 797 | { |
655 | /* GEM */ | 798 | /* GEM */ |
@@ -743,14 +886,12 @@ static int ioda_eeh_get_pe(struct pci_controller *hose, | |||
743 | * the master PE because slave PE is invisible | 886 | * the master PE because slave PE is invisible |
744 | * to EEH core. | 887 | * to EEH core. |
745 | */ | 888 | */ |
746 | if (phb->get_pe_state) { | 889 | pnv_pe = &phb->ioda.pe_array[pe_no]; |
747 | pnv_pe = &phb->ioda.pe_array[pe_no]; | 890 | if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { |
748 | if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { | 891 | pnv_pe = pnv_pe->master; |
749 | pnv_pe = pnv_pe->master; | 892 | WARN_ON(!pnv_pe || |
750 | WARN_ON(!pnv_pe || | 893 | !(pnv_pe->flags & PNV_IODA_PE_MASTER)); |
751 | !(pnv_pe->flags & PNV_IODA_PE_MASTER)); | 894 | pe_no = pnv_pe->pe_number; |
752 | pe_no = pnv_pe->pe_number; | ||
753 | } | ||
754 | } | 895 | } |
755 | 896 | ||
756 | /* Find the PE according to PE# */ | 897 | /* Find the PE according to PE# */ |
@@ -761,15 +902,37 @@ static int ioda_eeh_get_pe(struct pci_controller *hose, | |||
761 | if (!dev_pe) | 902 | if (!dev_pe) |
762 | return -EEXIST; | 903 | return -EEXIST; |
763 | 904 | ||
764 | /* | 905 | /* Freeze the (compound) PE */ |
765 | * At this point, we're sure the compound PE should | ||
766 | * be put into frozen state. | ||
767 | */ | ||
768 | *pe = dev_pe; | 906 | *pe = dev_pe; |
769 | if (phb->freeze_pe && | 907 | if (!(dev_pe->state & EEH_PE_ISOLATED)) |
770 | !(dev_pe->state & EEH_PE_ISOLATED)) | ||
771 | phb->freeze_pe(phb, pe_no); | 908 | phb->freeze_pe(phb, pe_no); |
772 | 909 | ||
910 | /* | ||
911 | * At this point, we're sure the (compound) PE should | ||
912 | * have been frozen. However, we still need poke until | ||
913 | * hitting the frozen PE on top level. | ||
914 | */ | ||
915 | dev_pe = dev_pe->parent; | ||
916 | while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { | ||
917 | int ret; | ||
918 | int active_flags = (EEH_STATE_MMIO_ACTIVE | | ||
919 | EEH_STATE_DMA_ACTIVE); | ||
920 | |||
921 | ret = eeh_ops->get_state(dev_pe, NULL); | ||
922 | if (ret <= 0 || (ret & active_flags) == active_flags) { | ||
923 | dev_pe = dev_pe->parent; | ||
924 | continue; | ||
925 | } | ||
926 | |||
927 | /* Frozen parent PE */ | ||
928 | *pe = dev_pe; | ||
929 | if (!(dev_pe->state & EEH_PE_ISOLATED)) | ||
930 | phb->freeze_pe(phb, dev_pe->addr); | ||
931 | |||
932 | /* Next one */ | ||
933 | dev_pe = dev_pe->parent; | ||
934 | } | ||
935 | |||
773 | return 0; | 936 | return 0; |
774 | } | 937 | } |
775 | 938 | ||
@@ -971,5 +1134,6 @@ struct pnv_eeh_ops ioda_eeh_ops = { | |||
971 | .reset = ioda_eeh_reset, | 1134 | .reset = ioda_eeh_reset, |
972 | .get_log = ioda_eeh_get_log, | 1135 | .get_log = ioda_eeh_get_log, |
973 | .configure_bridge = ioda_eeh_configure_bridge, | 1136 | .configure_bridge = ioda_eeh_configure_bridge, |
1137 | .err_inject = ioda_eeh_err_inject, | ||
974 | .next_error = ioda_eeh_next_error | 1138 | .next_error = ioda_eeh_next_error |
975 | }; | 1139 | }; |
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index fd7a16f855ed..3e89cbf55885 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c | |||
@@ -359,6 +359,31 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe) | |||
359 | } | 359 | } |
360 | 360 | ||
361 | /** | 361 | /** |
362 | * powernv_pe_err_inject - Inject specified error to the indicated PE | ||
363 | * @pe: the indicated PE | ||
364 | * @type: error type | ||
365 | * @func: specific error type | ||
366 | * @addr: address | ||
367 | * @mask: address mask | ||
368 | * | ||
369 | * The routine is called to inject specified error, which is | ||
370 | * determined by @type and @func, to the indicated PE for | ||
371 | * testing purpose. | ||
372 | */ | ||
373 | static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func, | ||
374 | unsigned long addr, unsigned long mask) | ||
375 | { | ||
376 | struct pci_controller *hose = pe->phb; | ||
377 | struct pnv_phb *phb = hose->private_data; | ||
378 | int ret = -EEXIST; | ||
379 | |||
380 | if (phb->eeh_ops && phb->eeh_ops->err_inject) | ||
381 | ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask); | ||
382 | |||
383 | return ret; | ||
384 | } | ||
385 | |||
386 | /** | ||
362 | * powernv_eeh_next_error - Retrieve next EEH error to handle | 387 | * powernv_eeh_next_error - Retrieve next EEH error to handle |
363 | * @pe: Affected PE | 388 | * @pe: Affected PE |
364 | * | 389 | * |
@@ -414,6 +439,7 @@ static struct eeh_ops powernv_eeh_ops = { | |||
414 | .wait_state = powernv_eeh_wait_state, | 439 | .wait_state = powernv_eeh_wait_state, |
415 | .get_log = powernv_eeh_get_log, | 440 | .get_log = powernv_eeh_get_log, |
416 | .configure_bridge = powernv_eeh_configure_bridge, | 441 | .configure_bridge = powernv_eeh_configure_bridge, |
442 | .err_inject = powernv_eeh_err_inject, | ||
417 | .read_config = pnv_pci_cfg_read, | 443 | .read_config = pnv_pci_cfg_read, |
418 | .write_config = pnv_pci_cfg_write, | 444 | .write_config = pnv_pci_cfg_write, |
419 | .next_error = powernv_eeh_next_error, | 445 | .next_error = powernv_eeh_next_error, |
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index 85bb8fff7947..23260f7dfa7a 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c | |||
@@ -112,7 +112,7 @@ static ssize_t init_dump_show(struct dump_obj *dump_obj, | |||
112 | struct dump_attribute *attr, | 112 | struct dump_attribute *attr, |
113 | char *buf) | 113 | char *buf) |
114 | { | 114 | { |
115 | return sprintf(buf, "1 - initiate dump\n"); | 115 | return sprintf(buf, "1 - initiate Service Processor(FSP) dump\n"); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int64_t dump_fips_init(uint8_t type) | 118 | static int64_t dump_fips_init(uint8_t type) |
@@ -121,7 +121,7 @@ static int64_t dump_fips_init(uint8_t type) | |||
121 | 121 | ||
122 | rc = opal_dump_init(type); | 122 | rc = opal_dump_init(type); |
123 | if (rc) | 123 | if (rc) |
124 | pr_warn("%s: Failed to initiate FipS dump (%d)\n", | 124 | pr_warn("%s: Failed to initiate FSP dump (%d)\n", |
125 | __func__, rc); | 125 | __func__, rc); |
126 | return rc; | 126 | return rc; |
127 | } | 127 | } |
@@ -131,8 +131,12 @@ static ssize_t init_dump_store(struct dump_obj *dump_obj, | |||
131 | const char *buf, | 131 | const char *buf, |
132 | size_t count) | 132 | size_t count) |
133 | { | 133 | { |
134 | dump_fips_init(DUMP_TYPE_FSP); | 134 | int rc; |
135 | pr_info("%s: Initiated FSP dump\n", __func__); | 135 | |
136 | rc = dump_fips_init(DUMP_TYPE_FSP); | ||
137 | if (rc == OPAL_SUCCESS) | ||
138 | pr_info("%s: Initiated FSP dump\n", __func__); | ||
139 | |||
136 | return count; | 140 | return count; |
137 | } | 141 | } |
138 | 142 | ||
@@ -297,7 +301,7 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, | |||
297 | * and rely on userspace to ask us to try | 301 | * and rely on userspace to ask us to try |
298 | * again. | 302 | * again. |
299 | */ | 303 | */ |
300 | pr_info("%s: Platform dump partially read.ID = 0x%x\n", | 304 | pr_info("%s: Platform dump partially read. ID = 0x%x\n", |
301 | __func__, dump->id); | 305 | __func__, dump->id); |
302 | return -EIO; | 306 | return -EIO; |
303 | } | 307 | } |
@@ -423,6 +427,10 @@ void __init opal_platform_dump_init(void) | |||
423 | { | 427 | { |
424 | int rc; | 428 | int rc; |
425 | 429 | ||
430 | /* ELOG not supported by firmware */ | ||
431 | if (!opal_check_token(OPAL_DUMP_READ)) | ||
432 | return; | ||
433 | |||
426 | dump_kset = kset_create_and_add("dump", NULL, opal_kobj); | 434 | dump_kset = kset_create_and_add("dump", NULL, opal_kobj); |
427 | if (!dump_kset) { | 435 | if (!dump_kset) { |
428 | pr_warn("%s: Failed to create dump kset\n", __func__); | 436 | pr_warn("%s: Failed to create dump kset\n", __func__); |
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index bbdb3ffaab98..518fe95dbf24 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c | |||
@@ -295,6 +295,10 @@ int __init opal_elog_init(void) | |||
295 | { | 295 | { |
296 | int rc = 0; | 296 | int rc = 0; |
297 | 297 | ||
298 | /* ELOG not supported by firmware */ | ||
299 | if (!opal_check_token(OPAL_ELOG_READ)) | ||
300 | return -1; | ||
301 | |||
298 | elog_kset = kset_create_and_add("elog", NULL, opal_kobj); | 302 | elog_kset = kset_create_and_add("elog", NULL, opal_kobj); |
299 | if (!elog_kset) { | 303 | if (!elog_kset) { |
300 | pr_warn("%s: failed to create elog kset\n", __func__); | 304 | pr_warn("%s: failed to create elog kset\n", __func__); |
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index ad4b31df779a..dd2c285ad170 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c | |||
@@ -191,6 +191,7 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, | |||
191 | { | 191 | { |
192 | struct lpc_debugfs_entry *lpc = filp->private_data; | 192 | struct lpc_debugfs_entry *lpc = filp->private_data; |
193 | u32 data, pos, len, todo; | 193 | u32 data, pos, len, todo; |
194 | __be32 bedata; | ||
194 | int rc; | 195 | int rc; |
195 | 196 | ||
196 | if (!access_ok(VERIFY_WRITE, ubuf, count)) | 197 | if (!access_ok(VERIFY_WRITE, ubuf, count)) |
@@ -213,9 +214,10 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, | |||
213 | len = 2; | 214 | len = 2; |
214 | } | 215 | } |
215 | rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos, | 216 | rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos, |
216 | &data, len); | 217 | &bedata, len); |
217 | if (rc) | 218 | if (rc) |
218 | return -ENXIO; | 219 | return -ENXIO; |
220 | data = be32_to_cpu(bedata); | ||
219 | switch(len) { | 221 | switch(len) { |
220 | case 4: | 222 | case 4: |
221 | rc = __put_user((u32)data, (u32 __user *)ubuf); | 223 | rc = __put_user((u32)data, (u32 __user *)ubuf); |
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c index acd9f7e96678..f9896fd5d04a 100644 --- a/arch/powerpc/platforms/powernv/opal-nvram.c +++ b/arch/powerpc/platforms/powernv/opal-nvram.c | |||
@@ -78,7 +78,7 @@ void __init opal_nvram_init(void) | |||
78 | } | 78 | } |
79 | nvram_size = be32_to_cpup(nbytes_p); | 79 | nvram_size = be32_to_cpup(nbytes_p); |
80 | 80 | ||
81 | printk(KERN_INFO "OPAL nvram setup, %u bytes\n", nvram_size); | 81 | pr_info("OPAL nvram setup, %u bytes\n", nvram_size); |
82 | of_node_put(np); | 82 | of_node_put(np); |
83 | 83 | ||
84 | ppc_md.nvram_read = opal_nvram_read; | 84 | ppc_md.nvram_read = opal_nvram_read; |
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index b1885db8fdf3..499707ddaa9c 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c | |||
@@ -42,6 +42,9 @@ unsigned long __init opal_get_boot_time(void) | |||
42 | __be64 __h_m_s_ms; | 42 | __be64 __h_m_s_ms; |
43 | long rc = OPAL_BUSY; | 43 | long rc = OPAL_BUSY; |
44 | 44 | ||
45 | if (!opal_check_token(OPAL_RTC_READ)) | ||
46 | goto out; | ||
47 | |||
45 | while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { | 48 | while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { |
46 | rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); | 49 | rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); |
47 | if (rc == OPAL_BUSY_EVENT) | 50 | if (rc == OPAL_BUSY_EVENT) |
@@ -49,16 +52,18 @@ unsigned long __init opal_get_boot_time(void) | |||
49 | else | 52 | else |
50 | mdelay(10); | 53 | mdelay(10); |
51 | } | 54 | } |
52 | if (rc != OPAL_SUCCESS) { | 55 | if (rc != OPAL_SUCCESS) |
53 | ppc_md.get_rtc_time = NULL; | 56 | goto out; |
54 | ppc_md.set_rtc_time = NULL; | 57 | |
55 | return 0; | ||
56 | } | ||
57 | y_m_d = be32_to_cpu(__y_m_d); | 58 | y_m_d = be32_to_cpu(__y_m_d); |
58 | h_m_s_ms = be64_to_cpu(__h_m_s_ms); | 59 | h_m_s_ms = be64_to_cpu(__h_m_s_ms); |
59 | opal_to_tm(y_m_d, h_m_s_ms, &tm); | 60 | opal_to_tm(y_m_d, h_m_s_ms, &tm); |
60 | return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, | 61 | return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, |
61 | tm.tm_hour, tm.tm_min, tm.tm_sec); | 62 | tm.tm_hour, tm.tm_min, tm.tm_sec); |
63 | out: | ||
64 | ppc_md.get_rtc_time = NULL; | ||
65 | ppc_md.set_rtc_time = NULL; | ||
66 | return 0; | ||
62 | } | 67 | } |
63 | 68 | ||
64 | void opal_get_rtc_time(struct rtc_time *tm) | 69 | void opal_get_rtc_time(struct rtc_time *tm) |
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c index d8a000a9988b..ae14c40b4b1c 100644 --- a/arch/powerpc/platforms/powernv/opal-tracepoints.c +++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <linux/jump_label.h> | 2 | #include <linux/jump_label.h> |
3 | #include <asm/trace.h> | 3 | #include <asm/trace.h> |
4 | 4 | ||
5 | #ifdef CONFIG_JUMP_LABEL | 5 | #ifdef HAVE_JUMP_LABEL |
6 | struct static_key opal_tracepoint_key = STATIC_KEY_INIT; | 6 | struct static_key opal_tracepoint_key = STATIC_KEY_INIT; |
7 | 7 | ||
8 | void opal_tracepoint_regfunc(void) | 8 | void opal_tracepoint_regfunc(void) |
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 2e6ce1b8dc8f..e9e2450c1fdd 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S | |||
@@ -184,6 +184,7 @@ OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER) | |||
184 | OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS); | 184 | OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS); |
185 | OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR); | 185 | OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR); |
186 | OPAL_CALL(opal_pci_eeh_freeze_set, OPAL_PCI_EEH_FREEZE_SET); | 186 | OPAL_CALL(opal_pci_eeh_freeze_set, OPAL_PCI_EEH_FREEZE_SET); |
187 | OPAL_CALL(opal_pci_err_inject, OPAL_PCI_ERR_INJECT); | ||
187 | OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC); | 188 | OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC); |
188 | OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE); | 189 | OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE); |
189 | OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW); | 190 | OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW); |
@@ -232,6 +233,7 @@ OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); | |||
232 | OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); | 233 | OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); |
233 | OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); | 234 | OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); |
234 | OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE); | 235 | OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE); |
236 | OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN); | ||
235 | OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT); | 237 | OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT); |
236 | OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO); | 238 | OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO); |
237 | OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2); | 239 | OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2); |
@@ -247,3 +249,4 @@ OPAL_CALL(opal_set_param, OPAL_SET_PARAM); | |||
247 | OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); | 249 | OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); |
248 | OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); | 250 | OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); |
249 | OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); | 251 | OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); |
252 | OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE); | ||
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 4b005ae5dc4b..b642b0562f5a 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
@@ -105,12 +105,12 @@ int __init early_init_dt_scan_opal(unsigned long node, | |||
105 | if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { | 105 | if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { |
106 | powerpc_firmware_features |= FW_FEATURE_OPALv2; | 106 | powerpc_firmware_features |= FW_FEATURE_OPALv2; |
107 | powerpc_firmware_features |= FW_FEATURE_OPALv3; | 107 | powerpc_firmware_features |= FW_FEATURE_OPALv3; |
108 | printk("OPAL V3 detected !\n"); | 108 | pr_info("OPAL V3 detected !\n"); |
109 | } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { | 109 | } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { |
110 | powerpc_firmware_features |= FW_FEATURE_OPALv2; | 110 | powerpc_firmware_features |= FW_FEATURE_OPALv2; |
111 | printk("OPAL V2 detected !\n"); | 111 | pr_info("OPAL V2 detected !\n"); |
112 | } else { | 112 | } else { |
113 | printk("OPAL V1 detected !\n"); | 113 | pr_info("OPAL V1 detected !\n"); |
114 | } | 114 | } |
115 | 115 | ||
116 | /* Reinit all cores with the right endian */ | 116 | /* Reinit all cores with the right endian */ |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index df241b11d4f7..468a0f23c7f2 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -37,41 +37,43 @@ | |||
37 | #include <asm/xics.h> | 37 | #include <asm/xics.h> |
38 | #include <asm/debug.h> | 38 | #include <asm/debug.h> |
39 | #include <asm/firmware.h> | 39 | #include <asm/firmware.h> |
40 | #include <asm/pnv-pci.h> | ||
41 | |||
42 | #include <misc/cxl.h> | ||
40 | 43 | ||
41 | #include "powernv.h" | 44 | #include "powernv.h" |
42 | #include "pci.h" | 45 | #include "pci.h" |
43 | 46 | ||
44 | #define define_pe_printk_level(func, kern_level) \ | 47 | static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, |
45 | static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \ | 48 | const char *fmt, ...) |
46 | { \ | 49 | { |
47 | struct va_format vaf; \ | 50 | struct va_format vaf; |
48 | va_list args; \ | 51 | va_list args; |
49 | char pfix[32]; \ | 52 | char pfix[32]; |
50 | int r; \ | 53 | |
51 | \ | 54 | va_start(args, fmt); |
52 | va_start(args, fmt); \ | 55 | |
53 | \ | 56 | vaf.fmt = fmt; |
54 | vaf.fmt = fmt; \ | 57 | vaf.va = &args; |
55 | vaf.va = &args; \ | 58 | |
56 | \ | 59 | if (pe->pdev) |
57 | if (pe->pdev) \ | 60 | strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); |
58 | strlcpy(pfix, dev_name(&pe->pdev->dev), \ | 61 | else |
59 | sizeof(pfix)); \ | 62 | sprintf(pfix, "%04x:%02x ", |
60 | else \ | 63 | pci_domain_nr(pe->pbus), pe->pbus->number); |
61 | sprintf(pfix, "%04x:%02x ", \ | 64 | |
62 | pci_domain_nr(pe->pbus), \ | 65 | printk("%spci %s: [PE# %.3d] %pV", |
63 | pe->pbus->number); \ | 66 | level, pfix, pe->pe_number, &vaf); |
64 | r = printk(kern_level "pci %s: [PE# %.3d] %pV", \ | 67 | |
65 | pfix, pe->pe_number, &vaf); \ | 68 | va_end(args); |
66 | \ | 69 | } |
67 | va_end(args); \ | 70 | |
68 | \ | 71 | #define pe_err(pe, fmt, ...) \ |
69 | return r; \ | 72 | pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__) |
70 | } \ | 73 | #define pe_warn(pe, fmt, ...) \ |
71 | 74 | pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__) | |
72 | define_pe_printk_level(pe_err, KERN_ERR); | 75 | #define pe_info(pe, fmt, ...) \ |
73 | define_pe_printk_level(pe_warn, KERN_WARNING); | 76 | pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__) |
74 | define_pe_printk_level(pe_info, KERN_INFO); | ||
75 | 77 | ||
76 | /* | 78 | /* |
77 | * stdcix is only supposed to be used in hypervisor real mode as per | 79 | * stdcix is only supposed to be used in hypervisor real mode as per |
@@ -385,7 +387,7 @@ static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no) | |||
385 | } | 387 | } |
386 | } | 388 | } |
387 | 389 | ||
388 | int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt) | 390 | static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt) |
389 | { | 391 | { |
390 | struct pnv_ioda_pe *pe, *slave; | 392 | struct pnv_ioda_pe *pe, *slave; |
391 | s64 rc; | 393 | s64 rc; |
@@ -890,6 +892,28 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, | |||
890 | return 0; | 892 | return 0; |
891 | } | 893 | } |
892 | 894 | ||
895 | static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb, | ||
896 | struct pci_dev *pdev) | ||
897 | { | ||
898 | struct pci_dn *pdn = pci_get_pdn(pdev); | ||
899 | struct pnv_ioda_pe *pe; | ||
900 | u64 end, mask; | ||
901 | |||
902 | if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) | ||
903 | return 0; | ||
904 | |||
905 | pe = &phb->ioda.pe_array[pdn->pe_number]; | ||
906 | if (!pe->tce_bypass_enabled) | ||
907 | return __dma_get_required_mask(&pdev->dev); | ||
908 | |||
909 | |||
910 | end = pe->tce_bypass_base + memblock_end_of_DRAM(); | ||
911 | mask = 1ULL << (fls64(end) - 1); | ||
912 | mask += mask - 1; | ||
913 | |||
914 | return mask; | ||
915 | } | ||
916 | |||
893 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, | 917 | static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, |
894 | struct pci_bus *bus, | 918 | struct pci_bus *bus, |
895 | bool add_to_iommu_group) | 919 | bool add_to_iommu_group) |
@@ -1306,14 +1330,186 @@ static void pnv_ioda2_msi_eoi(struct irq_data *d) | |||
1306 | icp_native_eoi(d); | 1330 | icp_native_eoi(d); |
1307 | } | 1331 | } |
1308 | 1332 | ||
1333 | |||
1334 | static void set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq) | ||
1335 | { | ||
1336 | struct irq_data *idata; | ||
1337 | struct irq_chip *ichip; | ||
1338 | |||
1339 | if (phb->type != PNV_PHB_IODA2) | ||
1340 | return; | ||
1341 | |||
1342 | if (!phb->ioda.irq_chip_init) { | ||
1343 | /* | ||
1344 | * First time we setup an MSI IRQ, we need to setup the | ||
1345 | * corresponding IRQ chip to route correctly. | ||
1346 | */ | ||
1347 | idata = irq_get_irq_data(virq); | ||
1348 | ichip = irq_data_get_irq_chip(idata); | ||
1349 | phb->ioda.irq_chip_init = 1; | ||
1350 | phb->ioda.irq_chip = *ichip; | ||
1351 | phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; | ||
1352 | } | ||
1353 | irq_set_chip(virq, &phb->ioda.irq_chip); | ||
1354 | } | ||
1355 | |||
1356 | #ifdef CONFIG_CXL_BASE | ||
1357 | |||
1358 | struct device_node *pnv_pci_to_phb_node(struct pci_dev *dev) | ||
1359 | { | ||
1360 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1361 | |||
1362 | return hose->dn; | ||
1363 | } | ||
1364 | EXPORT_SYMBOL(pnv_pci_to_phb_node); | ||
1365 | |||
1366 | int pnv_phb_to_cxl(struct pci_dev *dev) | ||
1367 | { | ||
1368 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1369 | struct pnv_phb *phb = hose->private_data; | ||
1370 | struct pnv_ioda_pe *pe; | ||
1371 | int rc; | ||
1372 | |||
1373 | pe = pnv_ioda_get_pe(dev); | ||
1374 | if (!pe) | ||
1375 | return -ENODEV; | ||
1376 | |||
1377 | pe_info(pe, "Switching PHB to CXL\n"); | ||
1378 | |||
1379 | rc = opal_pci_set_phb_cxl_mode(phb->opal_id, 1, pe->pe_number); | ||
1380 | if (rc) | ||
1381 | dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc); | ||
1382 | |||
1383 | return rc; | ||
1384 | } | ||
1385 | EXPORT_SYMBOL(pnv_phb_to_cxl); | ||
1386 | |||
1387 | /* Find PHB for cxl dev and allocate MSI hwirqs? | ||
1388 | * Returns the absolute hardware IRQ number | ||
1389 | */ | ||
1390 | int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num) | ||
1391 | { | ||
1392 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1393 | struct pnv_phb *phb = hose->private_data; | ||
1394 | int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num); | ||
1395 | |||
1396 | if (hwirq < 0) { | ||
1397 | dev_warn(&dev->dev, "Failed to find a free MSI\n"); | ||
1398 | return -ENOSPC; | ||
1399 | } | ||
1400 | |||
1401 | return phb->msi_base + hwirq; | ||
1402 | } | ||
1403 | EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs); | ||
1404 | |||
1405 | void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num) | ||
1406 | { | ||
1407 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1408 | struct pnv_phb *phb = hose->private_data; | ||
1409 | |||
1410 | msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num); | ||
1411 | } | ||
1412 | EXPORT_SYMBOL(pnv_cxl_release_hwirqs); | ||
1413 | |||
1414 | void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, | ||
1415 | struct pci_dev *dev) | ||
1416 | { | ||
1417 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1418 | struct pnv_phb *phb = hose->private_data; | ||
1419 | int i, hwirq; | ||
1420 | |||
1421 | for (i = 1; i < CXL_IRQ_RANGES; i++) { | ||
1422 | if (!irqs->range[i]) | ||
1423 | continue; | ||
1424 | pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n", | ||
1425 | i, irqs->offset[i], | ||
1426 | irqs->range[i]); | ||
1427 | hwirq = irqs->offset[i] - phb->msi_base; | ||
1428 | msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, | ||
1429 | irqs->range[i]); | ||
1430 | } | ||
1431 | } | ||
1432 | EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges); | ||
1433 | |||
1434 | int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs, | ||
1435 | struct pci_dev *dev, int num) | ||
1436 | { | ||
1437 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1438 | struct pnv_phb *phb = hose->private_data; | ||
1439 | int i, hwirq, try; | ||
1440 | |||
1441 | memset(irqs, 0, sizeof(struct cxl_irq_ranges)); | ||
1442 | |||
1443 | /* 0 is reserved for the multiplexed PSL DSI interrupt */ | ||
1444 | for (i = 1; i < CXL_IRQ_RANGES && num; i++) { | ||
1445 | try = num; | ||
1446 | while (try) { | ||
1447 | hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try); | ||
1448 | if (hwirq >= 0) | ||
1449 | break; | ||
1450 | try /= 2; | ||
1451 | } | ||
1452 | if (!try) | ||
1453 | goto fail; | ||
1454 | |||
1455 | irqs->offset[i] = phb->msi_base + hwirq; | ||
1456 | irqs->range[i] = try; | ||
1457 | pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n", | ||
1458 | i, irqs->offset[i], irqs->range[i]); | ||
1459 | num -= try; | ||
1460 | } | ||
1461 | if (num) | ||
1462 | goto fail; | ||
1463 | |||
1464 | return 0; | ||
1465 | fail: | ||
1466 | pnv_cxl_release_hwirq_ranges(irqs, dev); | ||
1467 | return -ENOSPC; | ||
1468 | } | ||
1469 | EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges); | ||
1470 | |||
1471 | int pnv_cxl_get_irq_count(struct pci_dev *dev) | ||
1472 | { | ||
1473 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1474 | struct pnv_phb *phb = hose->private_data; | ||
1475 | |||
1476 | return phb->msi_bmp.irq_count; | ||
1477 | } | ||
1478 | EXPORT_SYMBOL(pnv_cxl_get_irq_count); | ||
1479 | |||
1480 | int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, | ||
1481 | unsigned int virq) | ||
1482 | { | ||
1483 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
1484 | struct pnv_phb *phb = hose->private_data; | ||
1485 | unsigned int xive_num = hwirq - phb->msi_base; | ||
1486 | struct pnv_ioda_pe *pe; | ||
1487 | int rc; | ||
1488 | |||
1489 | if (!(pe = pnv_ioda_get_pe(dev))) | ||
1490 | return -ENODEV; | ||
1491 | |||
1492 | /* Assign XIVE to PE */ | ||
1493 | rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); | ||
1494 | if (rc) { | ||
1495 | pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x " | ||
1496 | "hwirq 0x%x XIVE 0x%x PE\n", | ||
1497 | pci_name(dev), rc, phb->msi_base, hwirq, xive_num); | ||
1498 | return -EIO; | ||
1499 | } | ||
1500 | set_msi_irq_chip(phb, virq); | ||
1501 | |||
1502 | return 0; | ||
1503 | } | ||
1504 | EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup); | ||
1505 | #endif | ||
1506 | |||
1309 | static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, | 1507 | static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, |
1310 | unsigned int hwirq, unsigned int virq, | 1508 | unsigned int hwirq, unsigned int virq, |
1311 | unsigned int is_64, struct msi_msg *msg) | 1509 | unsigned int is_64, struct msi_msg *msg) |
1312 | { | 1510 | { |
1313 | struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); | 1511 | struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); |
1314 | struct pci_dn *pdn = pci_get_pdn(dev); | 1512 | struct pci_dn *pdn = pci_get_pdn(dev); |
1315 | struct irq_data *idata; | ||
1316 | struct irq_chip *ichip; | ||
1317 | unsigned int xive_num = hwirq - phb->msi_base; | 1513 | unsigned int xive_num = hwirq - phb->msi_base; |
1318 | __be32 data; | 1514 | __be32 data; |
1319 | int rc; | 1515 | int rc; |
@@ -1365,22 +1561,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, | |||
1365 | } | 1561 | } |
1366 | msg->data = be32_to_cpu(data); | 1562 | msg->data = be32_to_cpu(data); |
1367 | 1563 | ||
1368 | /* | 1564 | set_msi_irq_chip(phb, virq); |
1369 | * Change the IRQ chip for the MSI interrupts on PHB3. | ||
1370 | * The corresponding IRQ chip should be populated for | ||
1371 | * the first time. | ||
1372 | */ | ||
1373 | if (phb->type == PNV_PHB_IODA2) { | ||
1374 | if (!phb->ioda.irq_chip_init) { | ||
1375 | idata = irq_get_irq_data(virq); | ||
1376 | ichip = irq_data_get_irq_chip(idata); | ||
1377 | phb->ioda.irq_chip_init = 1; | ||
1378 | phb->ioda.irq_chip = *ichip; | ||
1379 | phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; | ||
1380 | } | ||
1381 | |||
1382 | irq_set_chip(virq, &phb->ioda.irq_chip); | ||
1383 | } | ||
1384 | 1565 | ||
1385 | pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d)," | 1566 | pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d)," |
1386 | " address=%x_%08x data=%x PE# %d\n", | 1567 | " address=%x_%08x data=%x PE# %d\n", |
@@ -1627,12 +1808,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, | |||
1627 | 1808 | ||
1628 | static void pnv_pci_ioda_shutdown(struct pnv_phb *phb) | 1809 | static void pnv_pci_ioda_shutdown(struct pnv_phb *phb) |
1629 | { | 1810 | { |
1630 | opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET, | 1811 | opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE, |
1631 | OPAL_ASSERT_RESET); | 1812 | OPAL_ASSERT_RESET); |
1632 | } | 1813 | } |
1633 | 1814 | ||
1634 | void __init pnv_pci_init_ioda_phb(struct device_node *np, | 1815 | static void __init pnv_pci_init_ioda_phb(struct device_node *np, |
1635 | u64 hub_id, int ioda_type) | 1816 | u64 hub_id, int ioda_type) |
1636 | { | 1817 | { |
1637 | struct pci_controller *hose; | 1818 | struct pci_controller *hose; |
1638 | struct pnv_phb *phb; | 1819 | struct pnv_phb *phb; |
@@ -1782,6 +1963,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
1782 | /* Setup TCEs */ | 1963 | /* Setup TCEs */ |
1783 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; | 1964 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; |
1784 | phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; | 1965 | phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; |
1966 | phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask; | ||
1785 | 1967 | ||
1786 | /* Setup shutdown function for kexec */ | 1968 | /* Setup shutdown function for kexec */ |
1787 | phb->shutdown = pnv_pci_ioda_shutdown; | 1969 | phb->shutdown = pnv_pci_ioda_shutdown; |
@@ -1803,7 +1985,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
1803 | pci_add_flags(PCI_REASSIGN_ALL_RSRC); | 1985 | pci_add_flags(PCI_REASSIGN_ALL_RSRC); |
1804 | 1986 | ||
1805 | /* Reset IODA tables to a clean state */ | 1987 | /* Reset IODA tables to a clean state */ |
1806 | rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); | 1988 | rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET); |
1807 | if (rc) | 1989 | if (rc) |
1808 | pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc); | 1990 | pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc); |
1809 | 1991 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index b45c49249a5d..b3ca77ddf36d 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -753,6 +753,17 @@ int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | |||
753 | return __dma_set_mask(&pdev->dev, dma_mask); | 753 | return __dma_set_mask(&pdev->dev, dma_mask); |
754 | } | 754 | } |
755 | 755 | ||
756 | u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev) | ||
757 | { | ||
758 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); | ||
759 | struct pnv_phb *phb = hose->private_data; | ||
760 | |||
761 | if (phb && phb->dma_get_required_mask) | ||
762 | return phb->dma_get_required_mask(phb, pdev); | ||
763 | |||
764 | return __dma_get_required_mask(&pdev->dev); | ||
765 | } | ||
766 | |||
756 | void pnv_pci_shutdown(void) | 767 | void pnv_pci_shutdown(void) |
757 | { | 768 | { |
758 | struct pci_controller *hose; | 769 | struct pci_controller *hose; |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 48494d4b6058..34d29eb2a4de 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -85,6 +85,8 @@ struct pnv_eeh_ops { | |||
85 | int (*get_log)(struct eeh_pe *pe, int severity, | 85 | int (*get_log)(struct eeh_pe *pe, int severity, |
86 | char *drv_log, unsigned long len); | 86 | char *drv_log, unsigned long len); |
87 | int (*configure_bridge)(struct eeh_pe *pe); | 87 | int (*configure_bridge)(struct eeh_pe *pe); |
88 | int (*err_inject)(struct eeh_pe *pe, int type, int func, | ||
89 | unsigned long addr, unsigned long mask); | ||
88 | int (*next_error)(struct eeh_pe **pe); | 90 | int (*next_error)(struct eeh_pe **pe); |
89 | }; | 91 | }; |
90 | #endif /* CONFIG_EEH */ | 92 | #endif /* CONFIG_EEH */ |
@@ -122,6 +124,8 @@ struct pnv_phb { | |||
122 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); | 124 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); |
123 | int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev, | 125 | int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev, |
124 | u64 dma_mask); | 126 | u64 dma_mask); |
127 | u64 (*dma_get_required_mask)(struct pnv_phb *phb, | ||
128 | struct pci_dev *pdev); | ||
125 | void (*fixup_phb)(struct pci_controller *hose); | 129 | void (*fixup_phb)(struct pci_controller *hose); |
126 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); | 130 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); |
127 | void (*shutdown)(struct pnv_phb *phb); | 131 | void (*shutdown)(struct pnv_phb *phb); |
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index 75501bfede7f..6c8e2d188cd0 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h | |||
@@ -13,6 +13,7 @@ struct pci_dev; | |||
13 | extern void pnv_pci_init(void); | 13 | extern void pnv_pci_init(void); |
14 | extern void pnv_pci_shutdown(void); | 14 | extern void pnv_pci_shutdown(void); |
15 | extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask); | 15 | extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask); |
16 | extern u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev); | ||
16 | #else | 17 | #else |
17 | static inline void pnv_pci_init(void) { } | 18 | static inline void pnv_pci_init(void) { } |
18 | static inline void pnv_pci_shutdown(void) { } | 19 | static inline void pnv_pci_shutdown(void) { } |
@@ -21,6 +22,11 @@ static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) | |||
21 | { | 22 | { |
22 | return -ENODEV; | 23 | return -ENODEV; |
23 | } | 24 | } |
25 | |||
26 | static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev) | ||
27 | { | ||
28 | return 0; | ||
29 | } | ||
24 | #endif | 30 | #endif |
25 | 31 | ||
26 | extern void pnv_lpc_init(void); | 32 | extern void pnv_lpc_init(void); |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 5a0e2dc6de5f..3f9546d8a51f 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -173,6 +173,14 @@ static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) | |||
173 | return __dma_set_mask(dev, dma_mask); | 173 | return __dma_set_mask(dev, dma_mask); |
174 | } | 174 | } |
175 | 175 | ||
176 | static u64 pnv_dma_get_required_mask(struct device *dev) | ||
177 | { | ||
178 | if (dev_is_pci(dev)) | ||
179 | return pnv_pci_dma_get_required_mask(to_pci_dev(dev)); | ||
180 | |||
181 | return __dma_get_required_mask(dev); | ||
182 | } | ||
183 | |||
176 | static void pnv_shutdown(void) | 184 | static void pnv_shutdown(void) |
177 | { | 185 | { |
178 | /* Let the PCI code clear up IODA tables */ | 186 | /* Let the PCI code clear up IODA tables */ |
@@ -307,7 +315,7 @@ static int __init pnv_probe(void) | |||
307 | * Returns the cpu frequency for 'cpu' in Hz. This is used by | 315 | * Returns the cpu frequency for 'cpu' in Hz. This is used by |
308 | * /proc/cpuinfo | 316 | * /proc/cpuinfo |
309 | */ | 317 | */ |
310 | unsigned long pnv_get_proc_freq(unsigned int cpu) | 318 | static unsigned long pnv_get_proc_freq(unsigned int cpu) |
311 | { | 319 | { |
312 | unsigned long ret_freq; | 320 | unsigned long ret_freq; |
313 | 321 | ||
@@ -335,6 +343,7 @@ define_machine(powernv) { | |||
335 | .power_save = power7_idle, | 343 | .power_save = power7_idle, |
336 | .calibrate_decr = generic_calibrate_decr, | 344 | .calibrate_decr = generic_calibrate_decr, |
337 | .dma_set_mask = pnv_dma_set_mask, | 345 | .dma_set_mask = pnv_dma_set_mask, |
346 | .dma_get_required_mask = pnv_dma_get_required_mask, | ||
338 | #ifdef CONFIG_KEXEC | 347 | #ifdef CONFIG_KEXEC |
339 | .kexec_cpu_down = pnv_kexec_cpu_down, | 348 | .kexec_cpu_down = pnv_kexec_cpu_down, |
340 | #endif | 349 | #endif |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 5fcfcf44e3a9..4753958cd509 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -54,7 +54,7 @@ static void pnv_smp_setup_cpu(int cpu) | |||
54 | #endif | 54 | #endif |
55 | } | 55 | } |
56 | 56 | ||
57 | int pnv_smp_kick_cpu(int nr) | 57 | static int pnv_smp_kick_cpu(int nr) |
58 | { | 58 | { |
59 | unsigned int pcpu = get_hard_smp_processor_id(nr); | 59 | unsigned int pcpu = get_hard_smp_processor_id(nr); |
60 | unsigned long start_here = | 60 | unsigned long start_here = |
@@ -168,9 +168,9 @@ static void pnv_smp_cpu_kill_self(void) | |||
168 | power7_nap(1); | 168 | power7_nap(1); |
169 | ppc64_runlatch_on(); | 169 | ppc64_runlatch_on(); |
170 | 170 | ||
171 | /* Reenable IRQs briefly to clear the IPI that woke us */ | 171 | /* Clear the IPI that woke us up */ |
172 | local_irq_enable(); | 172 | icp_native_flush_interrupt(); |
173 | local_irq_disable(); | 173 | local_paca->irq_happened &= PACA_IRQ_HARD_DIS; |
174 | mb(); | 174 | mb(); |
175 | 175 | ||
176 | if (cpu_core_split_required()) | 176 | if (cpu_core_split_required()) |
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c index 894ecb3eb596..c87f96b79d1a 100644 --- a/arch/powerpc/platforms/powernv/subcore.c +++ b/arch/powerpc/platforms/powernv/subcore.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/smp.h> | 24 | #include <asm/smp.h> |
25 | 25 | ||
26 | #include "subcore.h" | 26 | #include "subcore.h" |
27 | #include "powernv.h" | ||
27 | 28 | ||
28 | 29 | ||
29 | /* | 30 | /* |
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 2d8bf15879fd..fc44ad0475f8 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c | |||
@@ -555,7 +555,6 @@ static int cmm_mem_going_offline(void *arg) | |||
555 | pa_last = pa_last->next; | 555 | pa_last = pa_last->next; |
556 | free_page((unsigned long)cmm_page_list); | 556 | free_page((unsigned long)cmm_page_list); |
557 | cmm_page_list = pa_last; | 557 | cmm_page_list = pa_last; |
558 | continue; | ||
559 | } | 558 | } |
560 | } | 559 | } |
561 | pa_curr = pa_curr->next; | 560 | pa_curr = pa_curr->next; |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index a2450b8a50a5..fdf01b660d59 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/of.h> | 18 | #include <linux/of.h> |
19 | #include "offline_states.h" | 19 | #include "offline_states.h" |
20 | #include "pseries.h" | ||
20 | 21 | ||
21 | #include <asm/prom.h> | 22 | #include <asm/prom.h> |
22 | #include <asm/machdep.h> | 23 | #include <asm/machdep.h> |
@@ -363,7 +364,8 @@ static int dlpar_online_cpu(struct device_node *dn) | |||
363 | int rc = 0; | 364 | int rc = 0; |
364 | unsigned int cpu; | 365 | unsigned int cpu; |
365 | int len, nthreads, i; | 366 | int len, nthreads, i; |
366 | const u32 *intserv; | 367 | const __be32 *intserv; |
368 | u32 thread; | ||
367 | 369 | ||
368 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); | 370 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); |
369 | if (!intserv) | 371 | if (!intserv) |
@@ -373,8 +375,9 @@ static int dlpar_online_cpu(struct device_node *dn) | |||
373 | 375 | ||
374 | cpu_maps_update_begin(); | 376 | cpu_maps_update_begin(); |
375 | for (i = 0; i < nthreads; i++) { | 377 | for (i = 0; i < nthreads; i++) { |
378 | thread = be32_to_cpu(intserv[i]); | ||
376 | for_each_present_cpu(cpu) { | 379 | for_each_present_cpu(cpu) { |
377 | if (get_hard_smp_processor_id(cpu) != intserv[i]) | 380 | if (get_hard_smp_processor_id(cpu) != thread) |
378 | continue; | 381 | continue; |
379 | BUG_ON(get_cpu_current_state(cpu) | 382 | BUG_ON(get_cpu_current_state(cpu) |
380 | != CPU_STATE_OFFLINE); | 383 | != CPU_STATE_OFFLINE); |
@@ -388,7 +391,7 @@ static int dlpar_online_cpu(struct device_node *dn) | |||
388 | } | 391 | } |
389 | if (cpu == num_possible_cpus()) | 392 | if (cpu == num_possible_cpus()) |
390 | printk(KERN_WARNING "Could not find cpu to online " | 393 | printk(KERN_WARNING "Could not find cpu to online " |
391 | "with physical id 0x%x\n", intserv[i]); | 394 | "with physical id 0x%x\n", thread); |
392 | } | 395 | } |
393 | cpu_maps_update_done(); | 396 | cpu_maps_update_done(); |
394 | 397 | ||
@@ -442,7 +445,8 @@ static int dlpar_offline_cpu(struct device_node *dn) | |||
442 | int rc = 0; | 445 | int rc = 0; |
443 | unsigned int cpu; | 446 | unsigned int cpu; |
444 | int len, nthreads, i; | 447 | int len, nthreads, i; |
445 | const u32 *intserv; | 448 | const __be32 *intserv; |
449 | u32 thread; | ||
446 | 450 | ||
447 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); | 451 | intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); |
448 | if (!intserv) | 452 | if (!intserv) |
@@ -452,8 +456,9 @@ static int dlpar_offline_cpu(struct device_node *dn) | |||
452 | 456 | ||
453 | cpu_maps_update_begin(); | 457 | cpu_maps_update_begin(); |
454 | for (i = 0; i < nthreads; i++) { | 458 | for (i = 0; i < nthreads; i++) { |
459 | thread = be32_to_cpu(intserv[i]); | ||
455 | for_each_present_cpu(cpu) { | 460 | for_each_present_cpu(cpu) { |
456 | if (get_hard_smp_processor_id(cpu) != intserv[i]) | 461 | if (get_hard_smp_processor_id(cpu) != thread) |
457 | continue; | 462 | continue; |
458 | 463 | ||
459 | if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) | 464 | if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) |
@@ -475,14 +480,14 @@ static int dlpar_offline_cpu(struct device_node *dn) | |||
475 | * Upgrade it's state to CPU_STATE_OFFLINE. | 480 | * Upgrade it's state to CPU_STATE_OFFLINE. |
476 | */ | 481 | */ |
477 | set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); | 482 | set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); |
478 | BUG_ON(plpar_hcall_norets(H_PROD, intserv[i]) | 483 | BUG_ON(plpar_hcall_norets(H_PROD, thread) |
479 | != H_SUCCESS); | 484 | != H_SUCCESS); |
480 | __cpu_die(cpu); | 485 | __cpu_die(cpu); |
481 | break; | 486 | break; |
482 | } | 487 | } |
483 | if (cpu == num_possible_cpus()) | 488 | if (cpu == num_possible_cpus()) |
484 | printk(KERN_WARNING "Could not find cpu to offline " | 489 | printk(KERN_WARNING "Could not find cpu to offline " |
485 | "with physical id 0x%x\n", intserv[i]); | 490 | "with physical id 0x%x\n", thread); |
486 | } | 491 | } |
487 | cpu_maps_update_done(); | 492 | cpu_maps_update_done(); |
488 | 493 | ||
@@ -494,15 +499,15 @@ out: | |||
494 | static ssize_t dlpar_cpu_release(const char *buf, size_t count) | 499 | static ssize_t dlpar_cpu_release(const char *buf, size_t count) |
495 | { | 500 | { |
496 | struct device_node *dn; | 501 | struct device_node *dn; |
497 | const u32 *drc_index; | 502 | u32 drc_index; |
498 | int rc; | 503 | int rc; |
499 | 504 | ||
500 | dn = of_find_node_by_path(buf); | 505 | dn = of_find_node_by_path(buf); |
501 | if (!dn) | 506 | if (!dn) |
502 | return -EINVAL; | 507 | return -EINVAL; |
503 | 508 | ||
504 | drc_index = of_get_property(dn, "ibm,my-drc-index", NULL); | 509 | rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); |
505 | if (!drc_index) { | 510 | if (rc) { |
506 | of_node_put(dn); | 511 | of_node_put(dn); |
507 | return -EINVAL; | 512 | return -EINVAL; |
508 | } | 513 | } |
@@ -513,7 +518,7 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count) | |||
513 | return -EINVAL; | 518 | return -EINVAL; |
514 | } | 519 | } |
515 | 520 | ||
516 | rc = dlpar_release_drc(*drc_index); | 521 | rc = dlpar_release_drc(drc_index); |
517 | if (rc) { | 522 | if (rc) { |
518 | of_node_put(dn); | 523 | of_node_put(dn); |
519 | return rc; | 524 | return rc; |
@@ -521,7 +526,7 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count) | |||
521 | 526 | ||
522 | rc = dlpar_detach_node(dn); | 527 | rc = dlpar_detach_node(dn); |
523 | if (rc) { | 528 | if (rc) { |
524 | dlpar_acquire_drc(*drc_index); | 529 | dlpar_acquire_drc(drc_index); |
525 | return rc; | 530 | return rc; |
526 | } | 531 | } |
527 | 532 | ||
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index b08053819d99..a6c7e19f5eb3 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c | |||
@@ -88,29 +88,14 @@ static int pseries_eeh_init(void) | |||
88 | * and its variant since the old firmware probably support address | 88 | * and its variant since the old firmware probably support address |
89 | * of domain/bus/slot/function for EEH RTAS operations. | 89 | * of domain/bus/slot/function for EEH RTAS operations. |
90 | */ | 90 | */ |
91 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { | 91 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE || |
92 | pr_warn("%s: RTAS service <ibm,set-eeh-option> invalid\n", | 92 | ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE || |
93 | __func__); | 93 | (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && |
94 | return -EINVAL; | 94 | ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || |
95 | } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) { | 95 | ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || |
96 | pr_warn("%s: RTAS service <ibm,set-slot-reset> invalid\n", | 96 | (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && |
97 | __func__); | 97 | ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) { |
98 | return -EINVAL; | 98 | pr_info("EEH functionality not supported\n"); |
99 | } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && | ||
100 | ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) { | ||
101 | pr_warn("%s: RTAS service <ibm,read-slot-reset-state2> and " | ||
102 | "<ibm,read-slot-reset-state> invalid\n", | ||
103 | __func__); | ||
104 | return -EINVAL; | ||
105 | } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) { | ||
106 | pr_warn("%s: RTAS service <ibm,slot-error-detail> invalid\n", | ||
107 | __func__); | ||
108 | return -EINVAL; | ||
109 | } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && | ||
110 | ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { | ||
111 | pr_warn("%s: RTAS service <ibm,configure-pe> and " | ||
112 | "<ibm,configure-bridge> invalid\n", | ||
113 | __func__); | ||
114 | return -EINVAL; | 99 | return -EINVAL; |
115 | } | 100 | } |
116 | 101 | ||
@@ -118,11 +103,11 @@ static int pseries_eeh_init(void) | |||
118 | spin_lock_init(&slot_errbuf_lock); | 103 | spin_lock_init(&slot_errbuf_lock); |
119 | eeh_error_buf_size = rtas_token("rtas-error-log-max"); | 104 | eeh_error_buf_size = rtas_token("rtas-error-log-max"); |
120 | if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { | 105 | if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { |
121 | pr_warn("%s: unknown EEH error log size\n", | 106 | pr_info("%s: unknown EEH error log size\n", |
122 | __func__); | 107 | __func__); |
123 | eeh_error_buf_size = 1024; | 108 | eeh_error_buf_size = 1024; |
124 | } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { | 109 | } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { |
125 | pr_warn("%s: EEH error log size %d exceeds the maximal %d\n", | 110 | pr_info("%s: EEH error log size %d exceeds the maximal %d\n", |
126 | __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); | 111 | __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX); |
127 | eeh_error_buf_size = RTAS_ERROR_LOG_MAX; | 112 | eeh_error_buf_size = RTAS_ERROR_LOG_MAX; |
128 | } | 113 | } |
@@ -349,7 +334,9 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option) | |||
349 | if (pe->addr) | 334 | if (pe->addr) |
350 | config_addr = pe->addr; | 335 | config_addr = pe->addr; |
351 | break; | 336 | break; |
352 | 337 | case EEH_OPT_FREEZE_PE: | |
338 | /* Not support */ | ||
339 | return 0; | ||
353 | default: | 340 | default: |
354 | pr_err("%s: Invalid option %d\n", | 341 | pr_err("%s: Invalid option %d\n", |
355 | __func__, option); | 342 | __func__, option); |
@@ -729,6 +716,7 @@ static struct eeh_ops pseries_eeh_ops = { | |||
729 | .wait_state = pseries_eeh_wait_state, | 716 | .wait_state = pseries_eeh_wait_state, |
730 | .get_log = pseries_eeh_get_log, | 717 | .get_log = pseries_eeh_get_log, |
731 | .configure_bridge = pseries_eeh_configure_bridge, | 718 | .configure_bridge = pseries_eeh_configure_bridge, |
719 | .err_inject = NULL, | ||
732 | .read_config = pseries_eeh_read_config, | 720 | .read_config = pseries_eeh_read_config, |
733 | .write_config = pseries_eeh_write_config, | 721 | .write_config = pseries_eeh_write_config, |
734 | .next_error = NULL, | 722 | .next_error = NULL, |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 20d62975856f..b174fa751d26 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -90,7 +90,7 @@ static void rtas_stop_self(void) | |||
90 | { | 90 | { |
91 | static struct rtas_args args = { | 91 | static struct rtas_args args = { |
92 | .nargs = 0, | 92 | .nargs = 0, |
93 | .nret = 1, | 93 | .nret = cpu_to_be32(1), |
94 | .rets = &args.args[0], | 94 | .rets = &args.args[0], |
95 | }; | 95 | }; |
96 | 96 | ||
@@ -312,7 +312,8 @@ static void pseries_remove_processor(struct device_node *np) | |||
312 | { | 312 | { |
313 | unsigned int cpu; | 313 | unsigned int cpu; |
314 | int len, nthreads, i; | 314 | int len, nthreads, i; |
315 | const u32 *intserv; | 315 | const __be32 *intserv; |
316 | u32 thread; | ||
316 | 317 | ||
317 | intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); | 318 | intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); |
318 | if (!intserv) | 319 | if (!intserv) |
@@ -322,8 +323,9 @@ static void pseries_remove_processor(struct device_node *np) | |||
322 | 323 | ||
323 | cpu_maps_update_begin(); | 324 | cpu_maps_update_begin(); |
324 | for (i = 0; i < nthreads; i++) { | 325 | for (i = 0; i < nthreads; i++) { |
326 | thread = be32_to_cpu(intserv[i]); | ||
325 | for_each_present_cpu(cpu) { | 327 | for_each_present_cpu(cpu) { |
326 | if (get_hard_smp_processor_id(cpu) != intserv[i]) | 328 | if (get_hard_smp_processor_id(cpu) != thread) |
327 | continue; | 329 | continue; |
328 | BUG_ON(cpu_online(cpu)); | 330 | BUG_ON(cpu_online(cpu)); |
329 | set_cpu_present(cpu, false); | 331 | set_cpu_present(cpu, false); |
@@ -332,7 +334,7 @@ static void pseries_remove_processor(struct device_node *np) | |||
332 | } | 334 | } |
333 | if (cpu >= nr_cpu_ids) | 335 | if (cpu >= nr_cpu_ids) |
334 | printk(KERN_WARNING "Could not find cpu to remove " | 336 | printk(KERN_WARNING "Could not find cpu to remove " |
335 | "with physical id 0x%x\n", intserv[i]); | 337 | "with physical id 0x%x\n", thread); |
336 | } | 338 | } |
337 | cpu_maps_update_done(); | 339 | cpu_maps_update_done(); |
338 | } | 340 | } |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 34064f50945e..3c4c0dcd90d3 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/machdep.h> | 20 | #include <asm/machdep.h> |
21 | #include <asm/prom.h> | 21 | #include <asm/prom.h> |
22 | #include <asm/sparsemem.h> | 22 | #include <asm/sparsemem.h> |
23 | #include "pseries.h" | ||
23 | 24 | ||
24 | unsigned long pseries_memory_block_size(void) | 25 | unsigned long pseries_memory_block_size(void) |
25 | { | 26 | { |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 4642d6a4d356..de1ec54a2a57 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -329,16 +329,16 @@ struct direct_window { | |||
329 | 329 | ||
330 | /* Dynamic DMA Window support */ | 330 | /* Dynamic DMA Window support */ |
331 | struct ddw_query_response { | 331 | struct ddw_query_response { |
332 | __be32 windows_available; | 332 | u32 windows_available; |
333 | __be32 largest_available_block; | 333 | u32 largest_available_block; |
334 | __be32 page_size; | 334 | u32 page_size; |
335 | __be32 migration_capable; | 335 | u32 migration_capable; |
336 | }; | 336 | }; |
337 | 337 | ||
338 | struct ddw_create_response { | 338 | struct ddw_create_response { |
339 | __be32 liobn; | 339 | u32 liobn; |
340 | __be32 addr_hi; | 340 | u32 addr_hi; |
341 | __be32 addr_lo; | 341 | u32 addr_lo; |
342 | }; | 342 | }; |
343 | 343 | ||
344 | static LIST_HEAD(direct_window_list); | 344 | static LIST_HEAD(direct_window_list); |
@@ -725,16 +725,18 @@ static void remove_ddw(struct device_node *np, bool remove_prop) | |||
725 | { | 725 | { |
726 | struct dynamic_dma_window_prop *dwp; | 726 | struct dynamic_dma_window_prop *dwp; |
727 | struct property *win64; | 727 | struct property *win64; |
728 | const u32 *ddw_avail; | 728 | u32 ddw_avail[3]; |
729 | u64 liobn; | 729 | u64 liobn; |
730 | int len, ret = 0; | 730 | int ret = 0; |
731 | |||
732 | ret = of_property_read_u32_array(np, "ibm,ddw-applicable", | ||
733 | &ddw_avail[0], 3); | ||
731 | 734 | ||
732 | ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len); | ||
733 | win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); | 735 | win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); |
734 | if (!win64) | 736 | if (!win64) |
735 | return; | 737 | return; |
736 | 738 | ||
737 | if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp)) | 739 | if (ret || win64->length < sizeof(*dwp)) |
738 | goto delprop; | 740 | goto delprop; |
739 | 741 | ||
740 | dwp = win64->value; | 742 | dwp = win64->value; |
@@ -872,8 +874,9 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, | |||
872 | 874 | ||
873 | do { | 875 | do { |
874 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ | 876 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ |
875 | ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr, | 877 | ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, |
876 | BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); | 878 | cfg_addr, BUID_HI(buid), BUID_LO(buid), |
879 | page_shift, window_shift); | ||
877 | } while (rtas_busy_delay(ret)); | 880 | } while (rtas_busy_delay(ret)); |
878 | dev_info(&dev->dev, | 881 | dev_info(&dev->dev, |
879 | "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " | 882 | "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " |
@@ -910,7 +913,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
910 | int page_shift; | 913 | int page_shift; |
911 | u64 dma_addr, max_addr; | 914 | u64 dma_addr, max_addr; |
912 | struct device_node *dn; | 915 | struct device_node *dn; |
913 | const u32 *uninitialized_var(ddw_avail); | 916 | u32 ddw_avail[3]; |
914 | struct direct_window *window; | 917 | struct direct_window *window; |
915 | struct property *win64; | 918 | struct property *win64; |
916 | struct dynamic_dma_window_prop *ddwprop; | 919 | struct dynamic_dma_window_prop *ddwprop; |
@@ -942,8 +945,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
942 | * for the given node in that order. | 945 | * for the given node in that order. |
943 | * the property is actually in the parent, not the PE | 946 | * the property is actually in the parent, not the PE |
944 | */ | 947 | */ |
945 | ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); | 948 | ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable", |
946 | if (!ddw_avail || len < 3 * sizeof(u32)) | 949 | &ddw_avail[0], 3); |
950 | if (ret) | ||
947 | goto out_failed; | 951 | goto out_failed; |
948 | 952 | ||
949 | /* | 953 | /* |
@@ -966,11 +970,11 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
966 | dev_dbg(&dev->dev, "no free dynamic windows"); | 970 | dev_dbg(&dev->dev, "no free dynamic windows"); |
967 | goto out_failed; | 971 | goto out_failed; |
968 | } | 972 | } |
969 | if (be32_to_cpu(query.page_size) & 4) { | 973 | if (query.page_size & 4) { |
970 | page_shift = 24; /* 16MB */ | 974 | page_shift = 24; /* 16MB */ |
971 | } else if (be32_to_cpu(query.page_size) & 2) { | 975 | } else if (query.page_size & 2) { |
972 | page_shift = 16; /* 64kB */ | 976 | page_shift = 16; /* 64kB */ |
973 | } else if (be32_to_cpu(query.page_size) & 1) { | 977 | } else if (query.page_size & 1) { |
974 | page_shift = 12; /* 4kB */ | 978 | page_shift = 12; /* 4kB */ |
975 | } else { | 979 | } else { |
976 | dev_dbg(&dev->dev, "no supported direct page size in mask %x", | 980 | dev_dbg(&dev->dev, "no supported direct page size in mask %x", |
@@ -980,7 +984,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
980 | /* verify the window * number of ptes will map the partition */ | 984 | /* verify the window * number of ptes will map the partition */ |
981 | /* check largest block * page size > max memory hotplug addr */ | 985 | /* check largest block * page size > max memory hotplug addr */ |
982 | max_addr = memory_hotplug_max(); | 986 | max_addr = memory_hotplug_max(); |
983 | if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) { | 987 | if (query.largest_available_block < (max_addr >> page_shift)) { |
984 | dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " | 988 | dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " |
985 | "%llu-sized pages\n", max_addr, query.largest_available_block, | 989 | "%llu-sized pages\n", max_addr, query.largest_available_block, |
986 | 1ULL << page_shift); | 990 | 1ULL << page_shift); |
@@ -1006,8 +1010,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
1006 | if (ret != 0) | 1010 | if (ret != 0) |
1007 | goto out_free_prop; | 1011 | goto out_free_prop; |
1008 | 1012 | ||
1009 | ddwprop->liobn = create.liobn; | 1013 | ddwprop->liobn = cpu_to_be32(create.liobn); |
1010 | ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2)); | 1014 | ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) | |
1015 | create.addr_lo); | ||
1011 | ddwprop->tce_shift = cpu_to_be32(page_shift); | 1016 | ddwprop->tce_shift = cpu_to_be32(page_shift); |
1012 | ddwprop->window_shift = cpu_to_be32(len); | 1017 | ddwprop->window_shift = cpu_to_be32(len); |
1013 | 1018 | ||
@@ -1039,7 +1044,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | |||
1039 | list_add(&window->list, &direct_window_list); | 1044 | list_add(&window->list, &direct_window_list); |
1040 | spin_unlock(&direct_window_list_lock); | 1045 | spin_unlock(&direct_window_list_lock); |
1041 | 1046 | ||
1042 | dma_addr = of_read_number(&create.addr_hi, 2); | 1047 | dma_addr = be64_to_cpu(ddwprop->dma_base); |
1043 | goto out_unlock; | 1048 | goto out_unlock; |
1044 | 1049 | ||
1045 | out_free_window: | 1050 | out_free_window: |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 34e64237fff9..8c509d5397c6 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -59,8 +59,6 @@ EXPORT_SYMBOL(plpar_hcall); | |||
59 | EXPORT_SYMBOL(plpar_hcall9); | 59 | EXPORT_SYMBOL(plpar_hcall9); |
60 | EXPORT_SYMBOL(plpar_hcall_norets); | 60 | EXPORT_SYMBOL(plpar_hcall_norets); |
61 | 61 | ||
62 | extern void pSeries_find_serial_port(void); | ||
63 | |||
64 | void vpa_init(int cpu) | 62 | void vpa_init(int cpu) |
65 | { | 63 | { |
66 | int hwcpu = get_hard_smp_processor_id(cpu); | 64 | int hwcpu = get_hard_smp_processor_id(cpu); |
@@ -642,7 +640,7 @@ EXPORT_SYMBOL(arch_free_page); | |||
642 | #endif | 640 | #endif |
643 | 641 | ||
644 | #ifdef CONFIG_TRACEPOINTS | 642 | #ifdef CONFIG_TRACEPOINTS |
645 | #ifdef CONFIG_JUMP_LABEL | 643 | #ifdef HAVE_JUMP_LABEL |
646 | struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; | 644 | struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; |
647 | 645 | ||
648 | void hcall_tracepoint_regfunc(void) | 646 | void hcall_tracepoint_regfunc(void) |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 0cc240b7f694..11a3b617ef5d 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -276,8 +276,10 @@ static ssize_t pSeries_nvram_get_size(void) | |||
276 | * sequence #: The unique sequence # for each event. (until it wraps) | 276 | * sequence #: The unique sequence # for each event. (until it wraps) |
277 | * error log: The error log from event_scan | 277 | * error log: The error log from event_scan |
278 | */ | 278 | */ |
279 | int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, | 279 | static int nvram_write_os_partition(struct nvram_os_partition *part, |
280 | int length, unsigned int err_type, unsigned int error_log_cnt) | 280 | char *buff, int length, |
281 | unsigned int err_type, | ||
282 | unsigned int error_log_cnt) | ||
281 | { | 283 | { |
282 | int rc; | 284 | int rc; |
283 | loff_t tmp_index; | 285 | loff_t tmp_index; |
@@ -330,9 +332,9 @@ int nvram_write_error_log(char * buff, int length, | |||
330 | * | 332 | * |
331 | * Reads nvram partition for at most 'length' | 333 | * Reads nvram partition for at most 'length' |
332 | */ | 334 | */ |
333 | int nvram_read_partition(struct nvram_os_partition *part, char *buff, | 335 | static int nvram_read_partition(struct nvram_os_partition *part, char *buff, |
334 | int length, unsigned int *err_type, | 336 | int length, unsigned int *err_type, |
335 | unsigned int *error_log_cnt) | 337 | unsigned int *error_log_cnt) |
336 | { | 338 | { |
337 | int rc; | 339 | int rc; |
338 | loff_t tmp_index; | 340 | loff_t tmp_index; |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index c413ec158ff5..67e48594040c 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/pci-bridge.h> | 29 | #include <asm/pci-bridge.h> |
30 | #include <asm/prom.h> | 30 | #include <asm/prom.h> |
31 | #include <asm/ppc-pci.h> | 31 | #include <asm/ppc-pci.h> |
32 | #include "pseries.h" | ||
32 | 33 | ||
33 | #if 0 | 34 | #if 0 |
34 | void pcibios_name_device(struct pci_dev *dev) | 35 | void pcibios_name_device(struct pci_dev *dev) |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index dff05b9eb946..5a4d0fc03b03 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -126,7 +126,7 @@ struct epow_errorlog { | |||
126 | #define EPOW_MAIN_ENCLOSURE 5 | 126 | #define EPOW_MAIN_ENCLOSURE 5 |
127 | #define EPOW_POWER_OFF 7 | 127 | #define EPOW_POWER_OFF 7 |
128 | 128 | ||
129 | void rtas_parse_epow_errlog(struct rtas_error_log *log) | 129 | static void rtas_parse_epow_errlog(struct rtas_error_log *log) |
130 | { | 130 | { |
131 | struct pseries_errorlog *pseries_log; | 131 | struct pseries_errorlog *pseries_log; |
132 | struct epow_errorlog *epow_log; | 132 | struct epow_errorlog *epow_log; |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index e724d3186e73..125c589eeef5 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -561,7 +561,7 @@ void pSeries_coalesce_init(void) | |||
561 | * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, | 561 | * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, |
562 | * handle that here. (Stolen from parse_system_parameter_string) | 562 | * handle that here. (Stolen from parse_system_parameter_string) |
563 | */ | 563 | */ |
564 | void pSeries_cmo_feature_init(void) | 564 | static void pSeries_cmo_feature_init(void) |
565 | { | 565 | { |
566 | char *ptr, *key, *value, *end; | 566 | char *ptr, *key, *value, *end; |
567 | int call_status; | 567 | int call_status; |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 47b6b9f81d43..ad56edc39919 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
@@ -314,7 +314,7 @@ axon_ram_remove(struct platform_device *device) | |||
314 | return 0; | 314 | return 0; |
315 | } | 315 | } |
316 | 316 | ||
317 | static struct of_device_id axon_ram_device_id[] = { | 317 | static const struct of_device_id axon_ram_device_id[] = { |
318 | { | 318 | { |
319 | .type = "dma-memory" | 319 | .type = "dma-memory" |
320 | }, | 320 | }, |
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c index e9056e438575..2d8a101b6b9e 100644 --- a/arch/powerpc/sysdev/dcr.c +++ b/arch/powerpc/sysdev/dcr.c | |||
@@ -230,5 +230,6 @@ EXPORT_SYMBOL_GPL(dcr_unmap_mmio); | |||
230 | 230 | ||
231 | #ifdef CONFIG_PPC_DCR_NATIVE | 231 | #ifdef CONFIG_PPC_DCR_NATIVE |
232 | DEFINE_SPINLOCK(dcr_ind_lock); | 232 | DEFINE_SPINLOCK(dcr_ind_lock); |
233 | EXPORT_SYMBOL_GPL(dcr_ind_lock); | ||
233 | #endif /* defined(CONFIG_PPC_DCR_NATIVE) */ | 234 | #endif /* defined(CONFIG_PPC_DCR_NATIVE) */ |
234 | 235 | ||
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c index afc2dbf37011..90545ad1626e 100644 --- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c +++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c | |||
@@ -171,7 +171,7 @@ static int mpc85xx_l2ctlr_of_remove(struct platform_device *dev) | |||
171 | return 0; | 171 | return 0; |
172 | } | 172 | } |
173 | 173 | ||
174 | static struct of_device_id mpc85xx_l2ctlr_of_match[] = { | 174 | static const struct of_device_id mpc85xx_l2ctlr_of_match[] = { |
175 | { | 175 | { |
176 | .compatible = "fsl,p2020-l2-cache-controller", | 176 | .compatible = "fsl,p2020-l2-cache-controller", |
177 | }, | 177 | }, |
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index b32e79dbef4f..de40b48b460e 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/seq_file.h> | ||
21 | #include <sysdev/fsl_soc.h> | 23 | #include <sysdev/fsl_soc.h> |
22 | #include <asm/prom.h> | 24 | #include <asm/prom.h> |
23 | #include <asm/hw_irq.h> | 25 | #include <asm/hw_irq.h> |
@@ -50,6 +52,7 @@ struct fsl_msi_feature { | |||
50 | struct fsl_msi_cascade_data { | 52 | struct fsl_msi_cascade_data { |
51 | struct fsl_msi *msi_data; | 53 | struct fsl_msi *msi_data; |
52 | int index; | 54 | int index; |
55 | int virq; | ||
53 | }; | 56 | }; |
54 | 57 | ||
55 | static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg) | 58 | static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg) |
@@ -65,11 +68,24 @@ static void fsl_msi_end_irq(struct irq_data *d) | |||
65 | { | 68 | { |
66 | } | 69 | } |
67 | 70 | ||
71 | static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) | ||
72 | { | ||
73 | struct fsl_msi *msi_data = irqd->domain->host_data; | ||
74 | irq_hw_number_t hwirq = irqd_to_hwirq(irqd); | ||
75 | int cascade_virq, srs; | ||
76 | |||
77 | srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK; | ||
78 | cascade_virq = msi_data->cascade_array[srs]->virq; | ||
79 | |||
80 | seq_printf(p, " fsl-msi-%d", cascade_virq); | ||
81 | } | ||
82 | |||
83 | |||
68 | static struct irq_chip fsl_msi_chip = { | 84 | static struct irq_chip fsl_msi_chip = { |
69 | .irq_mask = mask_msi_irq, | 85 | .irq_mask = mask_msi_irq, |
70 | .irq_unmask = unmask_msi_irq, | 86 | .irq_unmask = unmask_msi_irq, |
71 | .irq_ack = fsl_msi_end_irq, | 87 | .irq_ack = fsl_msi_end_irq, |
72 | .name = "FSL-MSI", | 88 | .irq_print_chip = fsl_msi_print_chip, |
73 | }; | 89 | }; |
74 | 90 | ||
75 | static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq, | 91 | static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq, |
@@ -175,7 +191,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
175 | np = of_parse_phandle(hose->dn, "fsl,msi", 0); | 191 | np = of_parse_phandle(hose->dn, "fsl,msi", 0); |
176 | if (np) { | 192 | if (np) { |
177 | if (of_device_is_compatible(np, "fsl,mpic-msi") || | 193 | if (of_device_is_compatible(np, "fsl,mpic-msi") || |
178 | of_device_is_compatible(np, "fsl,vmpic-msi")) | 194 | of_device_is_compatible(np, "fsl,vmpic-msi") || |
195 | of_device_is_compatible(np, "fsl,vmpic-msi-v4.3")) | ||
179 | phandle = np->phandle; | 196 | phandle = np->phandle; |
180 | else { | 197 | else { |
181 | dev_err(&pdev->dev, | 198 | dev_err(&pdev->dev, |
@@ -234,40 +251,24 @@ out_free: | |||
234 | return rc; | 251 | return rc; |
235 | } | 252 | } |
236 | 253 | ||
237 | static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | 254 | static irqreturn_t fsl_msi_cascade(int irq, void *data) |
238 | { | 255 | { |
239 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
240 | struct irq_data *idata = irq_desc_get_irq_data(desc); | ||
241 | unsigned int cascade_irq; | 256 | unsigned int cascade_irq; |
242 | struct fsl_msi *msi_data; | 257 | struct fsl_msi *msi_data; |
243 | int msir_index = -1; | 258 | int msir_index = -1; |
244 | u32 msir_value = 0; | 259 | u32 msir_value = 0; |
245 | u32 intr_index; | 260 | u32 intr_index; |
246 | u32 have_shift = 0; | 261 | u32 have_shift = 0; |
247 | struct fsl_msi_cascade_data *cascade_data; | 262 | struct fsl_msi_cascade_data *cascade_data = data; |
263 | irqreturn_t ret = IRQ_NONE; | ||
248 | 264 | ||
249 | cascade_data = irq_get_handler_data(irq); | ||
250 | msi_data = cascade_data->msi_data; | 265 | msi_data = cascade_data->msi_data; |
251 | 266 | ||
252 | raw_spin_lock(&desc->lock); | ||
253 | if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { | ||
254 | if (chip->irq_mask_ack) | ||
255 | chip->irq_mask_ack(idata); | ||
256 | else { | ||
257 | chip->irq_mask(idata); | ||
258 | chip->irq_ack(idata); | ||
259 | } | ||
260 | } | ||
261 | |||
262 | if (unlikely(irqd_irq_inprogress(idata))) | ||
263 | goto unlock; | ||
264 | |||
265 | msir_index = cascade_data->index; | 267 | msir_index = cascade_data->index; |
266 | 268 | ||
267 | if (msir_index >= NR_MSI_REG_MAX) | 269 | if (msir_index >= NR_MSI_REG_MAX) |
268 | cascade_irq = NO_IRQ; | 270 | cascade_irq = NO_IRQ; |
269 | 271 | ||
270 | irqd_set_chained_irq_inprogress(idata); | ||
271 | switch (msi_data->feature & FSL_PIC_IP_MASK) { | 272 | switch (msi_data->feature & FSL_PIC_IP_MASK) { |
272 | case FSL_PIC_IP_MPIC: | 273 | case FSL_PIC_IP_MPIC: |
273 | msir_value = fsl_msi_read(msi_data->msi_regs, | 274 | msir_value = fsl_msi_read(msi_data->msi_regs, |
@@ -296,40 +297,32 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
296 | cascade_irq = irq_linear_revmap(msi_data->irqhost, | 297 | cascade_irq = irq_linear_revmap(msi_data->irqhost, |
297 | msi_hwirq(msi_data, msir_index, | 298 | msi_hwirq(msi_data, msir_index, |
298 | intr_index + have_shift)); | 299 | intr_index + have_shift)); |
299 | if (cascade_irq != NO_IRQ) | 300 | if (cascade_irq != NO_IRQ) { |
300 | generic_handle_irq(cascade_irq); | 301 | generic_handle_irq(cascade_irq); |
302 | ret = IRQ_HANDLED; | ||
303 | } | ||
301 | have_shift += intr_index + 1; | 304 | have_shift += intr_index + 1; |
302 | msir_value = msir_value >> (intr_index + 1); | 305 | msir_value = msir_value >> (intr_index + 1); |
303 | } | 306 | } |
304 | irqd_clr_chained_irq_inprogress(idata); | ||
305 | 307 | ||
306 | switch (msi_data->feature & FSL_PIC_IP_MASK) { | 308 | return ret; |
307 | case FSL_PIC_IP_MPIC: | ||
308 | case FSL_PIC_IP_VMPIC: | ||
309 | chip->irq_eoi(idata); | ||
310 | break; | ||
311 | case FSL_PIC_IP_IPIC: | ||
312 | if (!irqd_irq_disabled(idata) && chip->irq_unmask) | ||
313 | chip->irq_unmask(idata); | ||
314 | break; | ||
315 | } | ||
316 | unlock: | ||
317 | raw_spin_unlock(&desc->lock); | ||
318 | } | 309 | } |
319 | 310 | ||
320 | static int fsl_of_msi_remove(struct platform_device *ofdev) | 311 | static int fsl_of_msi_remove(struct platform_device *ofdev) |
321 | { | 312 | { |
322 | struct fsl_msi *msi = platform_get_drvdata(ofdev); | 313 | struct fsl_msi *msi = platform_get_drvdata(ofdev); |
323 | int virq, i; | 314 | int virq, i; |
324 | struct fsl_msi_cascade_data *cascade_data; | ||
325 | 315 | ||
326 | if (msi->list.prev != NULL) | 316 | if (msi->list.prev != NULL) |
327 | list_del(&msi->list); | 317 | list_del(&msi->list); |
328 | for (i = 0; i < NR_MSI_REG_MAX; i++) { | 318 | for (i = 0; i < NR_MSI_REG_MAX; i++) { |
329 | virq = msi->msi_virqs[i]; | 319 | if (msi->cascade_array[i]) { |
330 | if (virq != NO_IRQ) { | 320 | virq = msi->cascade_array[i]->virq; |
331 | cascade_data = irq_get_handler_data(virq); | 321 | |
332 | kfree(cascade_data); | 322 | BUG_ON(virq == NO_IRQ); |
323 | |||
324 | free_irq(virq, msi->cascade_array[i]); | ||
325 | kfree(msi->cascade_array[i]); | ||
333 | irq_dispose_mapping(virq); | 326 | irq_dispose_mapping(virq); |
334 | } | 327 | } |
335 | } | 328 | } |
@@ -348,7 +341,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, | |||
348 | int offset, int irq_index) | 341 | int offset, int irq_index) |
349 | { | 342 | { |
350 | struct fsl_msi_cascade_data *cascade_data = NULL; | 343 | struct fsl_msi_cascade_data *cascade_data = NULL; |
351 | int virt_msir, i; | 344 | int virt_msir, i, ret; |
352 | 345 | ||
353 | virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index); | 346 | virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index); |
354 | if (virt_msir == NO_IRQ) { | 347 | if (virt_msir == NO_IRQ) { |
@@ -363,11 +356,18 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, | |||
363 | return -ENOMEM; | 356 | return -ENOMEM; |
364 | } | 357 | } |
365 | irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); | 358 | irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); |
366 | msi->msi_virqs[irq_index] = virt_msir; | ||
367 | cascade_data->index = offset; | 359 | cascade_data->index = offset; |
368 | cascade_data->msi_data = msi; | 360 | cascade_data->msi_data = msi; |
369 | irq_set_handler_data(virt_msir, cascade_data); | 361 | cascade_data->virq = virt_msir; |
370 | irq_set_chained_handler(virt_msir, fsl_msi_cascade); | 362 | msi->cascade_array[irq_index] = cascade_data; |
363 | |||
364 | ret = request_irq(virt_msir, fsl_msi_cascade, 0, | ||
365 | "fsl-msi-cascade", cascade_data); | ||
366 | if (ret) { | ||
367 | dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n", | ||
368 | virt_msir, ret); | ||
369 | return ret; | ||
370 | } | ||
371 | 371 | ||
372 | /* Release the hwirqs corresponding to this MSI register */ | 372 | /* Release the hwirqs corresponding to this MSI register */ |
373 | for (i = 0; i < IRQS_PER_MSI_REG; i++) | 373 | for (i = 0; i < IRQS_PER_MSI_REG; i++) |
@@ -461,7 +461,8 @@ static int fsl_of_msi_probe(struct platform_device *dev) | |||
461 | 461 | ||
462 | p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); | 462 | p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); |
463 | 463 | ||
464 | if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3")) { | 464 | if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") || |
465 | of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) { | ||
465 | msi->srs_shift = MSIIR1_SRS_SHIFT; | 466 | msi->srs_shift = MSIIR1_SRS_SHIFT; |
466 | msi->ibs_shift = MSIIR1_IBS_SHIFT; | 467 | msi->ibs_shift = MSIIR1_IBS_SHIFT; |
467 | if (p) | 468 | if (p) |
@@ -566,6 +567,10 @@ static const struct of_device_id fsl_of_msi_ids[] = { | |||
566 | .compatible = "fsl,vmpic-msi", | 567 | .compatible = "fsl,vmpic-msi", |
567 | .data = &vmpic_msi_feature, | 568 | .data = &vmpic_msi_feature, |
568 | }, | 569 | }, |
570 | { | ||
571 | .compatible = "fsl,vmpic-msi-v4.3", | ||
572 | .data = &vmpic_msi_feature, | ||
573 | }, | ||
569 | #endif | 574 | #endif |
570 | {} | 575 | {} |
571 | }; | 576 | }; |
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h index df9aa9fe0933..420cfcbdac01 100644 --- a/arch/powerpc/sysdev/fsl_msi.h +++ b/arch/powerpc/sysdev/fsl_msi.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #define FSL_PIC_IP_IPIC 0x00000002 | 27 | #define FSL_PIC_IP_IPIC 0x00000002 |
28 | #define FSL_PIC_IP_VMPIC 0x00000003 | 28 | #define FSL_PIC_IP_VMPIC 0x00000003 |
29 | 29 | ||
30 | struct fsl_msi_cascade_data; | ||
31 | |||
30 | struct fsl_msi { | 32 | struct fsl_msi { |
31 | struct irq_domain *irqhost; | 33 | struct irq_domain *irqhost; |
32 | 34 | ||
@@ -37,7 +39,7 @@ struct fsl_msi { | |||
37 | u32 srs_shift; /* Shift of the shared interrupt register select */ | 39 | u32 srs_shift; /* Shift of the shared interrupt register select */ |
38 | void __iomem *msi_regs; | 40 | void __iomem *msi_regs; |
39 | u32 feature; | 41 | u32 feature; |
40 | int msi_virqs[NR_MSI_REG_MAX]; | 42 | struct fsl_msi_cascade_data *cascade_array[NR_MSI_REG_MAX]; |
41 | 43 | ||
42 | struct msi_bitmap bitmap; | 44 | struct msi_bitmap bitmap; |
43 | 45 | ||
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index c5077673bd94..65d2ed4549e6 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
@@ -522,7 +522,8 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) | |||
522 | } else { | 522 | } else { |
523 | /* For PCI read PROG to identify controller mode */ | 523 | /* For PCI read PROG to identify controller mode */ |
524 | early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif); | 524 | early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif); |
525 | if ((progif & 1) == 1) | 525 | if ((progif & 1) && |
526 | !of_property_read_bool(dev, "fsl,pci-agent-force-enum")) | ||
526 | goto no_bridge; | 527 | goto no_bridge; |
527 | } | 528 | } |
528 | 529 | ||
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index be33c9768ea1..89cec0ed6a58 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -960,7 +960,7 @@ void mpic_set_vector(unsigned int virq, unsigned int vector) | |||
960 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); | 960 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); |
961 | } | 961 | } |
962 | 962 | ||
963 | void mpic_set_destination(unsigned int virq, unsigned int cpuid) | 963 | static void mpic_set_destination(unsigned int virq, unsigned int cpuid) |
964 | { | 964 | { |
965 | struct mpic *mpic = mpic_from_irq(virq); | 965 | struct mpic *mpic = mpic_from_irq(virq); |
966 | unsigned int src = virq_to_hw(virq); | 966 | unsigned int src = virq_to_hw(virq); |
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 2ff630267e9e..0c75214b6f92 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c | |||
@@ -20,32 +20,37 @@ int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num) | |||
20 | int offset, order = get_count_order(num); | 20 | int offset, order = get_count_order(num); |
21 | 21 | ||
22 | spin_lock_irqsave(&bmp->lock, flags); | 22 | spin_lock_irqsave(&bmp->lock, flags); |
23 | /* | 23 | |
24 | * This is fast, but stricter than we need. We might want to add | 24 | offset = bitmap_find_next_zero_area(bmp->bitmap, bmp->irq_count, 0, |
25 | * a fallback routine which does a linear search with no alignment. | 25 | num, (1 << order) - 1); |
26 | */ | 26 | if (offset > bmp->irq_count) |
27 | offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order); | 27 | goto err; |
28 | |||
29 | bitmap_set(bmp->bitmap, offset, num); | ||
28 | spin_unlock_irqrestore(&bmp->lock, flags); | 30 | spin_unlock_irqrestore(&bmp->lock, flags); |
29 | 31 | ||
30 | pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n", | 32 | pr_debug("msi_bitmap: allocated 0x%x at offset 0x%x\n", num, offset); |
31 | num, order, offset); | ||
32 | 33 | ||
33 | return offset; | 34 | return offset; |
35 | err: | ||
36 | spin_unlock_irqrestore(&bmp->lock, flags); | ||
37 | return -ENOMEM; | ||
34 | } | 38 | } |
39 | EXPORT_SYMBOL(msi_bitmap_alloc_hwirqs); | ||
35 | 40 | ||
36 | void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset, | 41 | void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset, |
37 | unsigned int num) | 42 | unsigned int num) |
38 | { | 43 | { |
39 | unsigned long flags; | 44 | unsigned long flags; |
40 | int order = get_count_order(num); | ||
41 | 45 | ||
42 | pr_debug("msi_bitmap: freeing 0x%x (2^%d) at offset 0x%x\n", | 46 | pr_debug("msi_bitmap: freeing 0x%x at offset 0x%x\n", |
43 | num, order, offset); | 47 | num, offset); |
44 | 48 | ||
45 | spin_lock_irqsave(&bmp->lock, flags); | 49 | spin_lock_irqsave(&bmp->lock, flags); |
46 | bitmap_release_region(bmp->bitmap, offset, order); | 50 | bitmap_clear(bmp->bitmap, offset, num); |
47 | spin_unlock_irqrestore(&bmp->lock, flags); | 51 | spin_unlock_irqrestore(&bmp->lock, flags); |
48 | } | 52 | } |
53 | EXPORT_SYMBOL(msi_bitmap_free_hwirqs); | ||
49 | 54 | ||
50 | void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq) | 55 | void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq) |
51 | { | 56 | { |
@@ -143,7 +148,7 @@ void msi_bitmap_free(struct msi_bitmap *bmp) | |||
143 | #define check(x) \ | 148 | #define check(x) \ |
144 | if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__); | 149 | if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__); |
145 | 150 | ||
146 | void __init test_basics(void) | 151 | static void __init test_basics(void) |
147 | { | 152 | { |
148 | struct msi_bitmap bmp; | 153 | struct msi_bitmap bmp; |
149 | int i, size = 512; | 154 | int i, size = 512; |
@@ -180,6 +185,15 @@ void __init test_basics(void) | |||
180 | msi_bitmap_free_hwirqs(&bmp, size / 2, 1); | 185 | msi_bitmap_free_hwirqs(&bmp, size / 2, 1); |
181 | check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2); | 186 | check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2); |
182 | 187 | ||
188 | /* Check we get a naturally aligned offset */ | ||
189 | check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0); | ||
190 | check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0); | ||
191 | check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0); | ||
192 | check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0); | ||
193 | check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0); | ||
194 | check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0); | ||
195 | check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0); | ||
196 | |||
183 | msi_bitmap_free(&bmp); | 197 | msi_bitmap_free(&bmp); |
184 | 198 | ||
185 | /* Clients may check bitmap == NULL for "not-allocated" */ | 199 | /* Clients may check bitmap == NULL for "not-allocated" */ |
@@ -188,7 +202,7 @@ void __init test_basics(void) | |||
188 | kfree(bmp.bitmap); | 202 | kfree(bmp.bitmap); |
189 | } | 203 | } |
190 | 204 | ||
191 | void __init test_of_node(void) | 205 | static void __init test_of_node(void) |
192 | { | 206 | { |
193 | u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 }; | 207 | u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 }; |
194 | const char *expected_str = "0-9,20-24,28-39,41-99,220-255"; | 208 | const char *expected_str = "0-9,20-24,28-39,41-99,220-255"; |
@@ -236,7 +250,7 @@ void __init test_of_node(void) | |||
236 | kfree(bmp.bitmap); | 250 | kfree(bmp.bitmap); |
237 | } | 251 | } |
238 | 252 | ||
239 | int __init msi_bitmap_selftest(void) | 253 | static int __init msi_bitmap_selftest(void) |
240 | { | 254 | { |
241 | printk(KERN_DEBUG "Running MSI bitmap self-tests ...\n"); | 255 | printk(KERN_DEBUG "Running MSI bitmap self-tests ...\n"); |
242 | 256 | ||
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c index c2dba7db71ad..026bbc3b2c47 100644 --- a/arch/powerpc/sysdev/mv64x60_dev.c +++ b/arch/powerpc/sysdev/mv64x60_dev.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | /* These functions provide the necessary setup for the mv64x60 drivers. */ | 24 | /* These functions provide the necessary setup for the mv64x60 drivers. */ |
25 | 25 | ||
26 | static struct of_device_id __initdata of_mv64x60_devices[] = { | 26 | static const struct of_device_id of_mv64x60_devices[] __initconst = { |
27 | { .compatible = "marvell,mv64306-devctrl", }, | 27 | { .compatible = "marvell,mv64306-devctrl", }, |
28 | {} | 28 | {} |
29 | }; | 29 | }; |
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c index 5aaf86c03893..13e67d93a7c1 100644 --- a/arch/powerpc/sysdev/pmi.c +++ b/arch/powerpc/sysdev/pmi.c | |||
@@ -101,7 +101,7 @@ out: | |||
101 | } | 101 | } |
102 | 102 | ||
103 | 103 | ||
104 | static struct of_device_id pmi_match[] = { | 104 | static const struct of_device_id pmi_match[] = { |
105 | { .type = "ibm,pmi", .name = "ibm,pmi" }, | 105 | { .type = "ibm,pmi", .name = "ibm,pmi" }, |
106 | { .type = "ibm,pmi" }, | 106 | { .type = "ibm,pmi" }, |
107 | {}, | 107 | {}, |
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index de8d9483bbe8..2fc4cf1b7557 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c | |||
@@ -155,6 +155,31 @@ static void icp_native_cause_ipi(int cpu, unsigned long data) | |||
155 | icp_native_set_qirr(cpu, IPI_PRIORITY); | 155 | icp_native_set_qirr(cpu, IPI_PRIORITY); |
156 | } | 156 | } |
157 | 157 | ||
158 | /* | ||
159 | * Called when an interrupt is received on an off-line CPU to | ||
160 | * clear the interrupt, so that the CPU can go back to nap mode. | ||
161 | */ | ||
162 | void icp_native_flush_interrupt(void) | ||
163 | { | ||
164 | unsigned int xirr = icp_native_get_xirr(); | ||
165 | unsigned int vec = xirr & 0x00ffffff; | ||
166 | |||
167 | if (vec == XICS_IRQ_SPURIOUS) | ||
168 | return; | ||
169 | if (vec == XICS_IPI) { | ||
170 | /* Clear pending IPI */ | ||
171 | int cpu = smp_processor_id(); | ||
172 | kvmppc_set_host_ipi(cpu, 0); | ||
173 | icp_native_set_qirr(cpu, 0xff); | ||
174 | } else { | ||
175 | pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n", | ||
176 | vec); | ||
177 | xics_mask_unknown_vec(vec); | ||
178 | } | ||
179 | /* EOI the interrupt */ | ||
180 | icp_native_set_xirr(xirr); | ||
181 | } | ||
182 | |||
158 | void xics_wake_cpu(int cpu) | 183 | void xics_wake_cpu(int cpu) |
159 | { | 184 | { |
160 | icp_native_set_qirr(cpu, IPI_PRIORITY); | 185 | icp_native_set_qirr(cpu, IPI_PRIORITY); |
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 83f943a8e0db..56f0524e47a6 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c | |||
@@ -265,7 +265,7 @@ static void __init xilinx_i8259_setup_cascade(void) | |||
265 | static inline void xilinx_i8259_setup_cascade(void) { return; } | 265 | static inline void xilinx_i8259_setup_cascade(void) { return; } |
266 | #endif /* defined(CONFIG_PPC_I8259) */ | 266 | #endif /* defined(CONFIG_PPC_I8259) */ |
267 | 267 | ||
268 | static struct of_device_id xilinx_intc_match[] __initconst = { | 268 | static const struct of_device_id xilinx_intc_match[] __initconst = { |
269 | { .compatible = "xlnx,opb-intc-1.00.c", }, | 269 | { .compatible = "xlnx,opb-intc-1.00.c", }, |
270 | { .compatible = "xlnx,xps-intc-1.00.a", }, | 270 | { .compatible = "xlnx,xps-intc-1.00.a", }, |
271 | {} | 271 | {} |
diff --git a/arch/powerpc/sysdev/xilinx_pci.c b/arch/powerpc/sysdev/xilinx_pci.c index 1453b0eed220..fea5667699ed 100644 --- a/arch/powerpc/sysdev/xilinx_pci.c +++ b/arch/powerpc/sysdev/xilinx_pci.c | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | #define PCI_HOST_ENABLE_CMD PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | 28 | #define PCI_HOST_ENABLE_CMD PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
29 | 29 | ||
30 | static struct of_device_id xilinx_pci_match[] = { | 30 | static const struct of_device_id xilinx_pci_match[] = { |
31 | { .compatible = "xlnx,plbv46-pci-1.03.a", }, | 31 | { .compatible = "xlnx,plbv46-pci-1.03.a", }, |
32 | {} | 32 | {} |
33 | }; | 33 | }; |
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index 7615180d7ee3..1f49d97a70ea 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c | |||
@@ -611,7 +611,7 @@ static int __init pmac_cpufreq_setup(void) | |||
611 | struct device_node *cpunode; | 611 | struct device_node *cpunode; |
612 | const u32 *value; | 612 | const u32 *value; |
613 | 613 | ||
614 | if (strstr(cmd_line, "nocpufreq")) | 614 | if (strstr(boot_command_line, "nocpufreq")) |
615 | return 0; | 615 | return 0; |
616 | 616 | ||
617 | /* Get first CPU node */ | 617 | /* Get first CPU node */ |
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 9e9c56758a08..226179b975a0 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c | |||
@@ -411,6 +411,7 @@ adb_poll(void) | |||
411 | return; | 411 | return; |
412 | adb_controller->poll(); | 412 | adb_controller->poll(); |
413 | } | 413 | } |
414 | EXPORT_SYMBOL(adb_poll); | ||
414 | 415 | ||
415 | static void adb_sync_req_done(struct adb_request *req) | 416 | static void adb_sync_req_done(struct adb_request *req) |
416 | { | 417 | { |
@@ -460,6 +461,7 @@ adb_request(struct adb_request *req, void (*done)(struct adb_request *), | |||
460 | 461 | ||
461 | return rc; | 462 | return rc; |
462 | } | 463 | } |
464 | EXPORT_SYMBOL(adb_request); | ||
463 | 465 | ||
464 | /* Ultimately this should return the number of devices with | 466 | /* Ultimately this should return the number of devices with |
465 | the given default id. | 467 | the given default id. |
@@ -495,6 +497,7 @@ adb_register(int default_id, int handler_id, struct adb_ids *ids, | |||
495 | mutex_unlock(&adb_handler_mutex); | 497 | mutex_unlock(&adb_handler_mutex); |
496 | return ids->nids; | 498 | return ids->nids; |
497 | } | 499 | } |
500 | EXPORT_SYMBOL(adb_register); | ||
498 | 501 | ||
499 | int | 502 | int |
500 | adb_unregister(int index) | 503 | adb_unregister(int index) |
@@ -516,6 +519,7 @@ adb_unregister(int index) | |||
516 | mutex_unlock(&adb_handler_mutex); | 519 | mutex_unlock(&adb_handler_mutex); |
517 | return ret; | 520 | return ret; |
518 | } | 521 | } |
522 | EXPORT_SYMBOL(adb_unregister); | ||
519 | 523 | ||
520 | void | 524 | void |
521 | adb_input(unsigned char *buf, int nb, int autopoll) | 525 | adb_input(unsigned char *buf, int nb, int autopoll) |
@@ -582,6 +586,7 @@ adb_try_handler_change(int address, int new_id) | |||
582 | mutex_unlock(&adb_handler_mutex); | 586 | mutex_unlock(&adb_handler_mutex); |
583 | return ret; | 587 | return ret; |
584 | } | 588 | } |
589 | EXPORT_SYMBOL(adb_try_handler_change); | ||
585 | 590 | ||
586 | int | 591 | int |
587 | adb_get_infos(int address, int *original_address, int *handler_id) | 592 | adb_get_infos(int address, int *original_address, int *handler_id) |
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c index d61f271d2207..bad18130f125 100644 --- a/drivers/macintosh/via-cuda.c +++ b/drivers/macintosh/via-cuda.c | |||
@@ -379,6 +379,7 @@ cuda_request(struct adb_request *req, void (*done)(struct adb_request *), | |||
379 | req->reply_expected = 1; | 379 | req->reply_expected = 1; |
380 | return cuda_write(req); | 380 | return cuda_write(req); |
381 | } | 381 | } |
382 | EXPORT_SYMBOL(cuda_request); | ||
382 | 383 | ||
383 | static int | 384 | static int |
384 | cuda_write(struct adb_request *req) | 385 | cuda_write(struct adb_request *req) |
@@ -441,6 +442,7 @@ cuda_poll(void) | |||
441 | if (cuda_irq) | 442 | if (cuda_irq) |
442 | enable_irq(cuda_irq); | 443 | enable_irq(cuda_irq); |
443 | } | 444 | } |
445 | EXPORT_SYMBOL(cuda_poll); | ||
444 | 446 | ||
445 | static irqreturn_t | 447 | static irqreturn_t |
446 | cuda_interrupt(int irq, void *arg) | 448 | cuda_interrupt(int irq, void *arg) |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index b841180c7c74..bbeb4516facf 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -527,4 +527,5 @@ source "drivers/misc/vmw_vmci/Kconfig" | |||
527 | source "drivers/misc/mic/Kconfig" | 527 | source "drivers/misc/mic/Kconfig" |
528 | source "drivers/misc/genwqe/Kconfig" | 528 | source "drivers/misc/genwqe/Kconfig" |
529 | source "drivers/misc/echo/Kconfig" | 529 | source "drivers/misc/echo/Kconfig" |
530 | source "drivers/misc/cxl/Kconfig" | ||
530 | endmenu | 531 | endmenu |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 5497d026e651..7d5c4cd118c4 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -55,3 +55,4 @@ obj-y += mic/ | |||
55 | obj-$(CONFIG_GENWQE) += genwqe/ | 55 | obj-$(CONFIG_GENWQE) += genwqe/ |
56 | obj-$(CONFIG_ECHO) += echo/ | 56 | obj-$(CONFIG_ECHO) += echo/ |
57 | obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o | 57 | obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o |
58 | obj-$(CONFIG_CXL_BASE) += cxl/ | ||
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig new file mode 100644 index 000000000000..a990b39b4dfb --- /dev/null +++ b/drivers/misc/cxl/Kconfig | |||
@@ -0,0 +1,25 @@ | |||
1 | # | ||
2 | # IBM Coherent Accelerator (CXL) compatible devices | ||
3 | # | ||
4 | |||
5 | config CXL_BASE | ||
6 | bool | ||
7 | default n | ||
8 | select PPC_COPRO_BASE | ||
9 | |||
10 | config CXL | ||
11 | tristate "Support for IBM Coherent Accelerators (CXL)" | ||
12 | depends on PPC_POWERNV && PCI_MSI | ||
13 | select CXL_BASE | ||
14 | default m | ||
15 | help | ||
16 | Select this option to enable driver support for IBM Coherent | ||
17 | Accelerators (CXL). CXL is otherwise known as Coherent Accelerator | ||
18 | Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be | ||
19 | coherently attached to a CPU via an MMU. This driver enables | ||
20 | userspace programs to access these accelerators via /dev/cxl/afuM.N | ||
21 | devices. | ||
22 | |||
23 | CAPI adapters are found in POWER8 based systems. | ||
24 | |||
25 | If unsure, say N. | ||
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile new file mode 100644 index 000000000000..165e98fef2c2 --- /dev/null +++ b/drivers/misc/cxl/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | cxl-y += main.o file.o irq.o fault.o native.o context.o sysfs.o debugfs.o pci.o | ||
2 | obj-$(CONFIG_CXL) += cxl.o | ||
3 | obj-$(CONFIG_CXL_BASE) += base.o | ||
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c new file mode 100644 index 000000000000..0654ad83675e --- /dev/null +++ b/drivers/misc/cxl/base.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/rcupdate.h> | ||
12 | #include <asm/errno.h> | ||
13 | #include <misc/cxl.h> | ||
14 | #include "cxl.h" | ||
15 | |||
16 | /* protected by rcu */ | ||
17 | static struct cxl_calls *cxl_calls; | ||
18 | |||
19 | atomic_t cxl_use_count = ATOMIC_INIT(0); | ||
20 | EXPORT_SYMBOL(cxl_use_count); | ||
21 | |||
22 | #ifdef CONFIG_CXL_MODULE | ||
23 | |||
24 | static inline struct cxl_calls *cxl_calls_get(void) | ||
25 | { | ||
26 | struct cxl_calls *calls = NULL; | ||
27 | |||
28 | rcu_read_lock(); | ||
29 | calls = rcu_dereference(cxl_calls); | ||
30 | if (calls && !try_module_get(calls->owner)) | ||
31 | calls = NULL; | ||
32 | rcu_read_unlock(); | ||
33 | |||
34 | return calls; | ||
35 | } | ||
36 | |||
37 | static inline void cxl_calls_put(struct cxl_calls *calls) | ||
38 | { | ||
39 | BUG_ON(calls != cxl_calls); | ||
40 | |||
41 | /* we don't need to rcu this, as we hold a reference to the module */ | ||
42 | module_put(cxl_calls->owner); | ||
43 | } | ||
44 | |||
45 | #else /* !defined CONFIG_CXL_MODULE */ | ||
46 | |||
47 | static inline struct cxl_calls *cxl_calls_get(void) | ||
48 | { | ||
49 | return cxl_calls; | ||
50 | } | ||
51 | |||
52 | static inline void cxl_calls_put(struct cxl_calls *calls) { } | ||
53 | |||
54 | #endif /* CONFIG_CXL_MODULE */ | ||
55 | |||
56 | void cxl_slbia(struct mm_struct *mm) | ||
57 | { | ||
58 | struct cxl_calls *calls; | ||
59 | |||
60 | calls = cxl_calls_get(); | ||
61 | if (!calls) | ||
62 | return; | ||
63 | |||
64 | if (cxl_ctx_in_use()) | ||
65 | calls->cxl_slbia(mm); | ||
66 | |||
67 | cxl_calls_put(calls); | ||
68 | } | ||
69 | |||
70 | int register_cxl_calls(struct cxl_calls *calls) | ||
71 | { | ||
72 | if (cxl_calls) | ||
73 | return -EBUSY; | ||
74 | |||
75 | rcu_assign_pointer(cxl_calls, calls); | ||
76 | return 0; | ||
77 | } | ||
78 | EXPORT_SYMBOL_GPL(register_cxl_calls); | ||
79 | |||
80 | void unregister_cxl_calls(struct cxl_calls *calls) | ||
81 | { | ||
82 | BUG_ON(cxl_calls->owner != calls->owner); | ||
83 | RCU_INIT_POINTER(cxl_calls, NULL); | ||
84 | synchronize_rcu(); | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(unregister_cxl_calls); | ||
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c new file mode 100644 index 000000000000..cca472109135 --- /dev/null +++ b/drivers/misc/cxl/context.c | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/bitmap.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/pid.h> | ||
15 | #include <linux/fs.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/debugfs.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/idr.h> | ||
20 | #include <asm/cputable.h> | ||
21 | #include <asm/current.h> | ||
22 | #include <asm/copro.h> | ||
23 | |||
24 | #include "cxl.h" | ||
25 | |||
26 | /* | ||
27 | * Allocates space for a CXL context. | ||
28 | */ | ||
29 | struct cxl_context *cxl_context_alloc(void) | ||
30 | { | ||
31 | return kzalloc(sizeof(struct cxl_context), GFP_KERNEL); | ||
32 | } | ||
33 | |||
34 | /* | ||
35 | * Initialises a CXL context. | ||
36 | */ | ||
37 | int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) | ||
38 | { | ||
39 | int i; | ||
40 | |||
41 | spin_lock_init(&ctx->sste_lock); | ||
42 | ctx->afu = afu; | ||
43 | ctx->master = master; | ||
44 | ctx->pid = NULL; /* Set in start work ioctl */ | ||
45 | |||
46 | /* | ||
47 | * Allocate the segment table before we put it in the IDR so that we | ||
48 | * can always access it when dereferenced from IDR. For the same | ||
49 | * reason, the segment table is only destroyed after the context is | ||
50 | * removed from the IDR. Access to this in the IOCTL is protected by | ||
51 | * Linux filesytem symantics (can't IOCTL until open is complete). | ||
52 | */ | ||
53 | i = cxl_alloc_sst(ctx); | ||
54 | if (i) | ||
55 | return i; | ||
56 | |||
57 | INIT_WORK(&ctx->fault_work, cxl_handle_fault); | ||
58 | |||
59 | init_waitqueue_head(&ctx->wq); | ||
60 | spin_lock_init(&ctx->lock); | ||
61 | |||
62 | ctx->irq_bitmap = NULL; | ||
63 | ctx->pending_irq = false; | ||
64 | ctx->pending_fault = false; | ||
65 | ctx->pending_afu_err = false; | ||
66 | |||
67 | /* | ||
68 | * When we have to destroy all contexts in cxl_context_detach_all() we | ||
69 | * end up with afu_release_irqs() called from inside a | ||
70 | * idr_for_each_entry(). Hence we need to make sure that anything | ||
71 | * dereferenced from this IDR is ok before we allocate the IDR here. | ||
72 | * This clears out the IRQ ranges to ensure this. | ||
73 | */ | ||
74 | for (i = 0; i < CXL_IRQ_RANGES; i++) | ||
75 | ctx->irqs.range[i] = 0; | ||
76 | |||
77 | mutex_init(&ctx->status_mutex); | ||
78 | |||
79 | ctx->status = OPENED; | ||
80 | |||
81 | /* | ||
82 | * Allocating IDR! We better make sure everything's setup that | ||
83 | * dereferences from it. | ||
84 | */ | ||
85 | idr_preload(GFP_KERNEL); | ||
86 | spin_lock(&afu->contexts_lock); | ||
87 | i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, | ||
88 | ctx->afu->num_procs, GFP_NOWAIT); | ||
89 | spin_unlock(&afu->contexts_lock); | ||
90 | idr_preload_end(); | ||
91 | if (i < 0) | ||
92 | return i; | ||
93 | |||
94 | ctx->pe = i; | ||
95 | ctx->elem = &ctx->afu->spa[i]; | ||
96 | ctx->pe_inserted = false; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Map a per-context mmio space into the given vma. | ||
102 | */ | ||
103 | int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) | ||
104 | { | ||
105 | u64 len = vma->vm_end - vma->vm_start; | ||
106 | len = min(len, ctx->psn_size); | ||
107 | |||
108 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { | ||
109 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
110 | return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); | ||
111 | } | ||
112 | |||
113 | /* make sure there is a valid per process space for this AFU */ | ||
114 | if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { | ||
115 | pr_devel("AFU doesn't support mmio space\n"); | ||
116 | return -EINVAL; | ||
117 | } | ||
118 | |||
119 | /* Can't mmap until the AFU is enabled */ | ||
120 | if (!ctx->afu->enabled) | ||
121 | return -EBUSY; | ||
122 | |||
123 | pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, | ||
124 | ctx->psn_phys, ctx->pe , ctx->master); | ||
125 | |||
126 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
127 | return vm_iomap_memory(vma, ctx->psn_phys, len); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Detach a context from the hardware. This disables interrupts and doesn't | ||
132 | * return until all outstanding interrupts for this context have completed. The | ||
133 | * hardware should no longer access *ctx after this has returned. | ||
134 | */ | ||
135 | static void __detach_context(struct cxl_context *ctx) | ||
136 | { | ||
137 | enum cxl_context_status status; | ||
138 | |||
139 | mutex_lock(&ctx->status_mutex); | ||
140 | status = ctx->status; | ||
141 | ctx->status = CLOSED; | ||
142 | mutex_unlock(&ctx->status_mutex); | ||
143 | if (status != STARTED) | ||
144 | return; | ||
145 | |||
146 | WARN_ON(cxl_detach_process(ctx)); | ||
147 | afu_release_irqs(ctx); | ||
148 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ | ||
149 | wake_up_all(&ctx->wq); | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * Detach the given context from the AFU. This doesn't actually | ||
154 | * free the context but it should stop the context running in hardware | ||
155 | * (ie. prevent this context from generating any further interrupts | ||
156 | * so that it can be freed). | ||
157 | */ | ||
158 | void cxl_context_detach(struct cxl_context *ctx) | ||
159 | { | ||
160 | __detach_context(ctx); | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Detach all contexts on the given AFU. | ||
165 | */ | ||
166 | void cxl_context_detach_all(struct cxl_afu *afu) | ||
167 | { | ||
168 | struct cxl_context *ctx; | ||
169 | int tmp; | ||
170 | |||
171 | rcu_read_lock(); | ||
172 | idr_for_each_entry(&afu->contexts_idr, ctx, tmp) | ||
173 | /* | ||
174 | * Anything done in here needs to be setup before the IDR is | ||
175 | * created and torn down after the IDR removed | ||
176 | */ | ||
177 | __detach_context(ctx); | ||
178 | rcu_read_unlock(); | ||
179 | } | ||
180 | |||
181 | void cxl_context_free(struct cxl_context *ctx) | ||
182 | { | ||
183 | spin_lock(&ctx->afu->contexts_lock); | ||
184 | idr_remove(&ctx->afu->contexts_idr, ctx->pe); | ||
185 | spin_unlock(&ctx->afu->contexts_lock); | ||
186 | synchronize_rcu(); | ||
187 | |||
188 | free_page((u64)ctx->sstp); | ||
189 | ctx->sstp = NULL; | ||
190 | |||
191 | put_pid(ctx->pid); | ||
192 | kfree(ctx); | ||
193 | } | ||
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h new file mode 100644 index 000000000000..3d2b8677ec8a --- /dev/null +++ b/drivers/misc/cxl/cxl.h | |||
@@ -0,0 +1,629 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _CXL_H_ | ||
11 | #define _CXL_H_ | ||
12 | |||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/semaphore.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/cdev.h> | ||
18 | #include <linux/pid.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <asm/cputable.h> | ||
22 | #include <asm/mmu.h> | ||
23 | #include <asm/reg.h> | ||
24 | #include <misc/cxl.h> | ||
25 | |||
26 | #include <uapi/misc/cxl.h> | ||
27 | |||
28 | extern uint cxl_verbose; | ||
29 | |||
30 | #define CXL_TIMEOUT 5 | ||
31 | |||
32 | /* | ||
33 | * Bump version each time a user API change is made, whether it is | ||
34 | * backwards compatible ot not. | ||
35 | */ | ||
36 | #define CXL_API_VERSION 1 | ||
37 | #define CXL_API_VERSION_COMPATIBLE 1 | ||
38 | |||
39 | /* | ||
40 | * Opaque types to avoid accidentally passing registers for the wrong MMIO | ||
41 | * | ||
42 | * At the end of the day, I'm not married to using typedef here, but it might | ||
43 | * (and has!) help avoid bugs like mixing up CXL_PSL_CtxTime and | ||
44 | * CXL_PSL_CtxTime_An, or calling cxl_p1n_write instead of cxl_p1_write. | ||
45 | * | ||
46 | * I'm quite happy if these are changed back to #defines before upstreaming, it | ||
47 | * should be little more than a regexp search+replace operation in this file. | ||
48 | */ | ||
49 | typedef struct { | ||
50 | const int x; | ||
51 | } cxl_p1_reg_t; | ||
52 | typedef struct { | ||
53 | const int x; | ||
54 | } cxl_p1n_reg_t; | ||
55 | typedef struct { | ||
56 | const int x; | ||
57 | } cxl_p2n_reg_t; | ||
58 | #define cxl_reg_off(reg) \ | ||
59 | (reg.x) | ||
60 | |||
61 | /* Memory maps. Ref CXL Appendix A */ | ||
62 | |||
63 | /* PSL Privilege 1 Memory Map */ | ||
64 | /* Configuration and Control area */ | ||
65 | static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000}; | ||
66 | static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008}; | ||
67 | static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010}; | ||
68 | static const cxl_p1_reg_t CXL_PSL_KEY2 = {0x0018}; | ||
69 | static const cxl_p1_reg_t CXL_PSL_Control = {0x0020}; | ||
70 | /* Downloading */ | ||
71 | static const cxl_p1_reg_t CXL_PSL_DLCNTL = {0x0060}; | ||
72 | static const cxl_p1_reg_t CXL_PSL_DLADDR = {0x0068}; | ||
73 | |||
74 | /* PSL Lookaside Buffer Management Area */ | ||
75 | static const cxl_p1_reg_t CXL_PSL_LBISEL = {0x0080}; | ||
76 | static const cxl_p1_reg_t CXL_PSL_SLBIE = {0x0088}; | ||
77 | static const cxl_p1_reg_t CXL_PSL_SLBIA = {0x0090}; | ||
78 | static const cxl_p1_reg_t CXL_PSL_TLBIE = {0x00A0}; | ||
79 | static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8}; | ||
80 | static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0}; | ||
81 | |||
82 | /* 0x00C0:7EFF Implementation dependent area */ | ||
83 | static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100}; | ||
84 | static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108}; | ||
85 | static const cxl_p1_reg_t CXL_PSL_VERSION = {0x0118}; | ||
86 | static const cxl_p1_reg_t CXL_PSL_RESLCKTO = {0x0128}; | ||
87 | static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148}; | ||
88 | static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150}; | ||
89 | static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158}; | ||
90 | static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170}; | ||
91 | /* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */ | ||
92 | /* 0x8000:FFFF Reserved PCIe MSI-X Table Area */ | ||
93 | |||
94 | /* PSL Slice Privilege 1 Memory Map */ | ||
95 | /* Configuration Area */ | ||
96 | static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00}; | ||
97 | static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08}; | ||
98 | static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10}; | ||
99 | static const cxl_p1n_reg_t CXL_PSL_SPOffset_An = {0x18}; | ||
100 | static const cxl_p1n_reg_t CXL_PSL_ID_An = {0x20}; | ||
101 | static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28}; | ||
102 | /* Memory Management and Lookaside Buffer Management */ | ||
103 | static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30}; | ||
104 | static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38}; | ||
105 | /* Pointer Area */ | ||
106 | static const cxl_p1n_reg_t CXL_HAURP_An = {0x80}; | ||
107 | static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88}; | ||
108 | static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90}; | ||
109 | /* Control Area */ | ||
110 | static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0}; | ||
111 | static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8}; | ||
112 | static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0}; | ||
113 | static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8}; | ||
114 | /* 0xC0:FF Implementation Dependent Area */ | ||
115 | static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0}; | ||
116 | static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8}; | ||
117 | static const cxl_p1n_reg_t CXL_PSL_APCALLOC_A = {0xD0}; | ||
118 | static const cxl_p1n_reg_t CXL_PSL_COALLOC_A = {0xD8}; | ||
119 | static const cxl_p1n_reg_t CXL_PSL_RXCTL_A = {0xE0}; | ||
120 | static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8}; | ||
121 | |||
122 | /* PSL Slice Privilege 2 Memory Map */ | ||
123 | /* Configuration and Control Area */ | ||
124 | static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000}; | ||
125 | static const cxl_p2n_reg_t CXL_CSRP_An = {0x008}; | ||
126 | static const cxl_p2n_reg_t CXL_AURP0_An = {0x010}; | ||
127 | static const cxl_p2n_reg_t CXL_AURP1_An = {0x018}; | ||
128 | static const cxl_p2n_reg_t CXL_SSTP0_An = {0x020}; | ||
129 | static const cxl_p2n_reg_t CXL_SSTP1_An = {0x028}; | ||
130 | static const cxl_p2n_reg_t CXL_PSL_AMR_An = {0x030}; | ||
131 | /* Segment Lookaside Buffer Management */ | ||
132 | static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040}; | ||
133 | static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048}; | ||
134 | static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050}; | ||
135 | /* Interrupt Registers */ | ||
136 | static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060}; | ||
137 | static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068}; | ||
138 | static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070}; | ||
139 | static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078}; | ||
140 | static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080}; | ||
141 | static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088}; | ||
142 | /* AFU Registers */ | ||
143 | static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090}; | ||
144 | static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098}; | ||
145 | /* Work Element Descriptor */ | ||
146 | static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; | ||
147 | /* 0x0C0:FFF Implementation Dependent Area */ | ||
148 | |||
149 | #define CXL_PSL_SPAP_Addr 0x0ffffffffffff000ULL | ||
150 | #define CXL_PSL_SPAP_Size 0x0000000000000ff0ULL | ||
151 | #define CXL_PSL_SPAP_Size_Shift 4 | ||
152 | #define CXL_PSL_SPAP_V 0x0000000000000001ULL | ||
153 | |||
154 | /****** CXL_PSL_DLCNTL *****************************************************/ | ||
155 | #define CXL_PSL_DLCNTL_D (0x1ull << (63-28)) | ||
156 | #define CXL_PSL_DLCNTL_C (0x1ull << (63-29)) | ||
157 | #define CXL_PSL_DLCNTL_E (0x1ull << (63-30)) | ||
158 | #define CXL_PSL_DLCNTL_S (0x1ull << (63-31)) | ||
159 | #define CXL_PSL_DLCNTL_CE (CXL_PSL_DLCNTL_C | CXL_PSL_DLCNTL_E) | ||
160 | #define CXL_PSL_DLCNTL_DCES (CXL_PSL_DLCNTL_D | CXL_PSL_DLCNTL_CE | CXL_PSL_DLCNTL_S) | ||
161 | |||
162 | /****** CXL_PSL_SR_An ******************************************************/ | ||
163 | #define CXL_PSL_SR_An_SF MSR_SF /* 64bit */ | ||
164 | #define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */ | ||
165 | #define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */ | ||
166 | #define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */ | ||
167 | #define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */ | ||
168 | #define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */ | ||
169 | #define CXL_PSL_SR_An_US (1ull << (63-56)) /* User state, GA1: X */ | ||
170 | #define CXL_PSL_SR_An_SC (1ull << (63-58)) /* Segment Table secondary hash */ | ||
171 | #define CXL_PSL_SR_An_R MSR_DR /* Relocate, GA1: 1 */ | ||
172 | #define CXL_PSL_SR_An_MP (1ull << (63-62)) /* Master Process */ | ||
173 | #define CXL_PSL_SR_An_LE (1ull << (63-63)) /* Little Endian */ | ||
174 | |||
175 | /****** CXL_PSL_LLCMD_An ****************************************************/ | ||
176 | #define CXL_LLCMD_TERMINATE 0x0001000000000000ULL | ||
177 | #define CXL_LLCMD_REMOVE 0x0002000000000000ULL | ||
178 | #define CXL_LLCMD_SUSPEND 0x0003000000000000ULL | ||
179 | #define CXL_LLCMD_RESUME 0x0004000000000000ULL | ||
180 | #define CXL_LLCMD_ADD 0x0005000000000000ULL | ||
181 | #define CXL_LLCMD_UPDATE 0x0006000000000000ULL | ||
182 | #define CXL_LLCMD_HANDLE_MASK 0x000000000000ffffULL | ||
183 | |||
184 | /****** CXL_PSL_ID_An ****************************************************/ | ||
185 | #define CXL_PSL_ID_An_F (1ull << (63-31)) | ||
186 | #define CXL_PSL_ID_An_L (1ull << (63-30)) | ||
187 | |||
188 | /****** CXL_PSL_SCNTL_An ****************************************************/ | ||
189 | #define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15)) | ||
190 | /* Programming Modes: */ | ||
191 | #define CXL_PSL_SCNTL_An_PM_MASK (0xffffull << (63-31)) | ||
192 | #define CXL_PSL_SCNTL_An_PM_Shared (0x0000ull << (63-31)) | ||
193 | #define CXL_PSL_SCNTL_An_PM_OS (0x0001ull << (63-31)) | ||
194 | #define CXL_PSL_SCNTL_An_PM_Process (0x0002ull << (63-31)) | ||
195 | #define CXL_PSL_SCNTL_An_PM_AFU (0x0004ull << (63-31)) | ||
196 | #define CXL_PSL_SCNTL_An_PM_AFU_PBT (0x0104ull << (63-31)) | ||
197 | /* Purge Status (ro) */ | ||
198 | #define CXL_PSL_SCNTL_An_Ps_MASK (0x3ull << (63-39)) | ||
199 | #define CXL_PSL_SCNTL_An_Ps_Pending (0x1ull << (63-39)) | ||
200 | #define CXL_PSL_SCNTL_An_Ps_Complete (0x3ull << (63-39)) | ||
201 | /* Purge */ | ||
202 | #define CXL_PSL_SCNTL_An_Pc (0x1ull << (63-48)) | ||
203 | /* Suspend Status (ro) */ | ||
204 | #define CXL_PSL_SCNTL_An_Ss_MASK (0x3ull << (63-55)) | ||
205 | #define CXL_PSL_SCNTL_An_Ss_Pending (0x1ull << (63-55)) | ||
206 | #define CXL_PSL_SCNTL_An_Ss_Complete (0x3ull << (63-55)) | ||
207 | /* Suspend Control */ | ||
208 | #define CXL_PSL_SCNTL_An_Sc (0x1ull << (63-63)) | ||
209 | |||
210 | /* AFU Slice Enable Status (ro) */ | ||
211 | #define CXL_AFU_Cntl_An_ES_MASK (0x7ull << (63-2)) | ||
212 | #define CXL_AFU_Cntl_An_ES_Disabled (0x0ull << (63-2)) | ||
213 | #define CXL_AFU_Cntl_An_ES_Enabled (0x4ull << (63-2)) | ||
214 | /* AFU Slice Enable */ | ||
215 | #define CXL_AFU_Cntl_An_E (0x1ull << (63-3)) | ||
216 | /* AFU Slice Reset status (ro) */ | ||
217 | #define CXL_AFU_Cntl_An_RS_MASK (0x3ull << (63-5)) | ||
218 | #define CXL_AFU_Cntl_An_RS_Pending (0x1ull << (63-5)) | ||
219 | #define CXL_AFU_Cntl_An_RS_Complete (0x2ull << (63-5)) | ||
220 | /* AFU Slice Reset */ | ||
221 | #define CXL_AFU_Cntl_An_RA (0x1ull << (63-7)) | ||
222 | |||
223 | /****** CXL_SSTP0/1_An ******************************************************/ | ||
224 | /* These top bits are for the segment that CONTAINS the segment table */ | ||
225 | #define CXL_SSTP0_An_B_SHIFT SLB_VSID_SSIZE_SHIFT | ||
226 | #define CXL_SSTP0_An_KS (1ull << (63-2)) | ||
227 | #define CXL_SSTP0_An_KP (1ull << (63-3)) | ||
228 | #define CXL_SSTP0_An_N (1ull << (63-4)) | ||
229 | #define CXL_SSTP0_An_L (1ull << (63-5)) | ||
230 | #define CXL_SSTP0_An_C (1ull << (63-6)) | ||
231 | #define CXL_SSTP0_An_TA (1ull << (63-7)) | ||
232 | #define CXL_SSTP0_An_LP_SHIFT (63-9) /* 2 Bits */ | ||
233 | /* And finally, the virtual address & size of the segment table: */ | ||
234 | #define CXL_SSTP0_An_SegTableSize_SHIFT (63-31) /* 12 Bits */ | ||
235 | #define CXL_SSTP0_An_SegTableSize_MASK \ | ||
236 | (((1ull << 12) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT) | ||
237 | #define CXL_SSTP0_An_STVA_U_MASK ((1ull << (63-49))-1) | ||
238 | #define CXL_SSTP1_An_STVA_L_MASK (~((1ull << (63-55))-1)) | ||
239 | #define CXL_SSTP1_An_V (1ull << (63-63)) | ||
240 | |||
241 | /****** CXL_PSL_SLBIE_[An] **************************************************/ | ||
242 | /* write: */ | ||
243 | #define CXL_SLBIE_C PPC_BIT(36) /* Class */ | ||
244 | #define CXL_SLBIE_SS PPC_BITMASK(37, 38) /* Segment Size */ | ||
245 | #define CXL_SLBIE_SS_SHIFT PPC_BITLSHIFT(38) | ||
246 | #define CXL_SLBIE_TA PPC_BIT(38) /* Tags Active */ | ||
247 | /* read: */ | ||
248 | #define CXL_SLBIE_MAX PPC_BITMASK(24, 31) | ||
249 | #define CXL_SLBIE_PENDING PPC_BITMASK(56, 63) | ||
250 | |||
251 | /****** Common to all CXL_TLBIA/SLBIA_[An] **********************************/ | ||
252 | #define CXL_TLB_SLB_P (1ull) /* Pending (read) */ | ||
253 | |||
254 | /****** Common to all CXL_TLB/SLB_IA/IE_[An] registers **********************/ | ||
255 | #define CXL_TLB_SLB_IQ_ALL (0ull) /* Inv qualifier */ | ||
256 | #define CXL_TLB_SLB_IQ_LPID (1ull) /* Inv qualifier */ | ||
257 | #define CXL_TLB_SLB_IQ_LPIDPID (3ull) /* Inv qualifier */ | ||
258 | |||
259 | /****** CXL_PSL_AFUSEL ******************************************************/ | ||
260 | #define CXL_PSL_AFUSEL_A (1ull << (63-55)) /* Adapter wide invalidates affect all AFUs */ | ||
261 | |||
262 | /****** CXL_PSL_DSISR_An ****************************************************/ | ||
263 | #define CXL_PSL_DSISR_An_DS (1ull << (63-0)) /* Segment not found */ | ||
264 | #define CXL_PSL_DSISR_An_DM (1ull << (63-1)) /* PTE not found (See also: M) or protection fault */ | ||
265 | #define CXL_PSL_DSISR_An_ST (1ull << (63-2)) /* Segment Table PTE not found */ | ||
266 | #define CXL_PSL_DSISR_An_UR (1ull << (63-3)) /* AURP PTE not found */ | ||
267 | #define CXL_PSL_DSISR_TRANS (CXL_PSL_DSISR_An_DS | CXL_PSL_DSISR_An_DM | CXL_PSL_DSISR_An_ST | CXL_PSL_DSISR_An_UR) | ||
268 | #define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ | ||
269 | #define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ | ||
270 | #define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ | ||
271 | /* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ | ||
272 | #define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ | ||
273 | #define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ | ||
274 | #define CXL_PSL_DSISR_An_A (1ull << (63-37)) /* AFU lock access to write through or cache inhibited storage */ | ||
275 | #define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */ | ||
276 | #define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */ | ||
277 | |||
278 | /****** CXL_PSL_TFC_An ******************************************************/ | ||
279 | #define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ | ||
280 | #define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */ | ||
281 | #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */ | ||
282 | #define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */ | ||
283 | |||
284 | /* cxl_process_element->software_status */ | ||
285 | #define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */ | ||
286 | #define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */ | ||
287 | #define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */ | ||
288 | #define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */ | ||
289 | |||
290 | /* SPA->sw_command_status */ | ||
291 | #define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL | ||
292 | #define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL | ||
293 | #define CXL_SPA_SW_CMD_REMOVE 0x0002000000000000ULL | ||
294 | #define CXL_SPA_SW_CMD_SUSPEND 0x0003000000000000ULL | ||
295 | #define CXL_SPA_SW_CMD_RESUME 0x0004000000000000ULL | ||
296 | #define CXL_SPA_SW_CMD_ADD 0x0005000000000000ULL | ||
297 | #define CXL_SPA_SW_CMD_UPDATE 0x0006000000000000ULL | ||
298 | #define CXL_SPA_SW_STATE_MASK 0x0000ffff00000000ULL | ||
299 | #define CXL_SPA_SW_STATE_TERMINATED 0x0000000100000000ULL | ||
300 | #define CXL_SPA_SW_STATE_REMOVED 0x0000000200000000ULL | ||
301 | #define CXL_SPA_SW_STATE_SUSPENDED 0x0000000300000000ULL | ||
302 | #define CXL_SPA_SW_STATE_RESUMED 0x0000000400000000ULL | ||
303 | #define CXL_SPA_SW_STATE_ADDED 0x0000000500000000ULL | ||
304 | #define CXL_SPA_SW_STATE_UPDATED 0x0000000600000000ULL | ||
305 | #define CXL_SPA_SW_PSL_ID_MASK 0x00000000ffff0000ULL | ||
306 | #define CXL_SPA_SW_LINK_MASK 0x000000000000ffffULL | ||
307 | |||
308 | #define CXL_MAX_SLICES 4 | ||
309 | #define MAX_AFU_MMIO_REGS 3 | ||
310 | |||
311 | #define CXL_MODE_DEDICATED 0x1 | ||
312 | #define CXL_MODE_DIRECTED 0x2 | ||
313 | #define CXL_MODE_TIME_SLICED 0x4 | ||
314 | #define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED) | ||
315 | |||
316 | enum cxl_context_status { | ||
317 | CLOSED, | ||
318 | OPENED, | ||
319 | STARTED | ||
320 | }; | ||
321 | |||
322 | enum prefault_modes { | ||
323 | CXL_PREFAULT_NONE, | ||
324 | CXL_PREFAULT_WED, | ||
325 | CXL_PREFAULT_ALL, | ||
326 | }; | ||
327 | |||
328 | struct cxl_sste { | ||
329 | __be64 esid_data; | ||
330 | __be64 vsid_data; | ||
331 | }; | ||
332 | |||
333 | #define to_cxl_adapter(d) container_of(d, struct cxl, dev) | ||
334 | #define to_cxl_afu(d) container_of(d, struct cxl_afu, dev) | ||
335 | |||
336 | struct cxl_afu { | ||
337 | irq_hw_number_t psl_hwirq; | ||
338 | irq_hw_number_t serr_hwirq; | ||
339 | unsigned int serr_virq; | ||
340 | void __iomem *p1n_mmio; | ||
341 | void __iomem *p2n_mmio; | ||
342 | phys_addr_t psn_phys; | ||
343 | u64 pp_offset; | ||
344 | u64 pp_size; | ||
345 | void __iomem *afu_desc_mmio; | ||
346 | struct cxl *adapter; | ||
347 | struct device dev; | ||
348 | struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d; | ||
349 | struct device *chardev_s, *chardev_m, *chardev_d; | ||
350 | struct idr contexts_idr; | ||
351 | struct dentry *debugfs; | ||
352 | spinlock_t contexts_lock; | ||
353 | struct mutex spa_mutex; | ||
354 | spinlock_t afu_cntl_lock; | ||
355 | |||
356 | /* | ||
357 | * Only the first part of the SPA is used for the process element | ||
358 | * linked list. The only other part that software needs to worry about | ||
359 | * is sw_command_status, which we store a separate pointer to. | ||
360 | * Everything else in the SPA is only used by hardware | ||
361 | */ | ||
362 | struct cxl_process_element *spa; | ||
363 | __be64 *sw_command_status; | ||
364 | unsigned int spa_size; | ||
365 | int spa_order; | ||
366 | int spa_max_procs; | ||
367 | unsigned int psl_virq; | ||
368 | |||
369 | int pp_irqs; | ||
370 | int irqs_max; | ||
371 | int num_procs; | ||
372 | int max_procs_virtualised; | ||
373 | int slice; | ||
374 | int modes_supported; | ||
375 | int current_mode; | ||
376 | enum prefault_modes prefault_mode; | ||
377 | bool psa; | ||
378 | bool pp_psa; | ||
379 | bool enabled; | ||
380 | }; | ||
381 | |||
382 | /* | ||
383 | * This is a cxl context. If the PSL is in dedicated mode, there will be one | ||
384 | * of these per AFU. If in AFU directed there can be lots of these. | ||
385 | */ | ||
386 | struct cxl_context { | ||
387 | struct cxl_afu *afu; | ||
388 | |||
389 | /* Problem state MMIO */ | ||
390 | phys_addr_t psn_phys; | ||
391 | u64 psn_size; | ||
392 | |||
393 | spinlock_t sste_lock; /* Protects segment table entries */ | ||
394 | struct cxl_sste *sstp; | ||
395 | u64 sstp0, sstp1; | ||
396 | unsigned int sst_size, sst_lru; | ||
397 | |||
398 | wait_queue_head_t wq; | ||
399 | struct pid *pid; | ||
400 | spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */ | ||
401 | /* Only used in PR mode */ | ||
402 | u64 process_token; | ||
403 | |||
404 | unsigned long *irq_bitmap; /* Accessed from IRQ context */ | ||
405 | struct cxl_irq_ranges irqs; | ||
406 | u64 fault_addr; | ||
407 | u64 fault_dsisr; | ||
408 | u64 afu_err; | ||
409 | |||
410 | /* | ||
411 | * This status and it's lock pretects start and detach context | ||
412 | * from racing. It also prevents detach from racing with | ||
413 | * itself | ||
414 | */ | ||
415 | enum cxl_context_status status; | ||
416 | struct mutex status_mutex; | ||
417 | |||
418 | |||
419 | /* XXX: Is it possible to need multiple work items at once? */ | ||
420 | struct work_struct fault_work; | ||
421 | u64 dsisr; | ||
422 | u64 dar; | ||
423 | |||
424 | struct cxl_process_element *elem; | ||
425 | |||
426 | int pe; /* process element handle */ | ||
427 | u32 irq_count; | ||
428 | bool pe_inserted; | ||
429 | bool master; | ||
430 | bool kernel; | ||
431 | bool pending_irq; | ||
432 | bool pending_fault; | ||
433 | bool pending_afu_err; | ||
434 | }; | ||
435 | |||
436 | struct cxl { | ||
437 | void __iomem *p1_mmio; | ||
438 | void __iomem *p2_mmio; | ||
439 | irq_hw_number_t err_hwirq; | ||
440 | unsigned int err_virq; | ||
441 | spinlock_t afu_list_lock; | ||
442 | struct cxl_afu *afu[CXL_MAX_SLICES]; | ||
443 | struct device dev; | ||
444 | struct dentry *trace; | ||
445 | struct dentry *psl_err_chk; | ||
446 | struct dentry *debugfs; | ||
447 | struct bin_attribute cxl_attr; | ||
448 | int adapter_num; | ||
449 | int user_irqs; | ||
450 | u64 afu_desc_off; | ||
451 | u64 afu_desc_size; | ||
452 | u64 ps_off; | ||
453 | u64 ps_size; | ||
454 | u16 psl_rev; | ||
455 | u16 base_image; | ||
456 | u8 vsec_status; | ||
457 | u8 caia_major; | ||
458 | u8 caia_minor; | ||
459 | u8 slices; | ||
460 | bool user_image_loaded; | ||
461 | bool perst_loads_image; | ||
462 | bool perst_select_user; | ||
463 | }; | ||
464 | |||
465 | int cxl_alloc_one_irq(struct cxl *adapter); | ||
466 | void cxl_release_one_irq(struct cxl *adapter, int hwirq); | ||
467 | int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num); | ||
468 | void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter); | ||
469 | int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq); | ||
470 | |||
471 | /* common == phyp + powernv */ | ||
472 | struct cxl_process_element_common { | ||
473 | __be32 tid; | ||
474 | __be32 pid; | ||
475 | __be64 csrp; | ||
476 | __be64 aurp0; | ||
477 | __be64 aurp1; | ||
478 | __be64 sstp0; | ||
479 | __be64 sstp1; | ||
480 | __be64 amr; | ||
481 | u8 reserved3[4]; | ||
482 | __be64 wed; | ||
483 | } __packed; | ||
484 | |||
485 | /* just powernv */ | ||
486 | struct cxl_process_element { | ||
487 | __be64 sr; | ||
488 | __be64 SPOffset; | ||
489 | __be64 sdr; | ||
490 | __be64 haurp; | ||
491 | __be32 ctxtime; | ||
492 | __be16 ivte_offsets[4]; | ||
493 | __be16 ivte_ranges[4]; | ||
494 | __be32 lpid; | ||
495 | struct cxl_process_element_common common; | ||
496 | __be32 software_state; | ||
497 | } __packed; | ||
498 | |||
499 | static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg) | ||
500 | { | ||
501 | WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); | ||
502 | return cxl->p1_mmio + cxl_reg_off(reg); | ||
503 | } | ||
504 | |||
505 | #define cxl_p1_write(cxl, reg, val) \ | ||
506 | out_be64(_cxl_p1_addr(cxl, reg), val) | ||
507 | #define cxl_p1_read(cxl, reg) \ | ||
508 | in_be64(_cxl_p1_addr(cxl, reg)) | ||
509 | |||
510 | static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg) | ||
511 | { | ||
512 | WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE)); | ||
513 | return afu->p1n_mmio + cxl_reg_off(reg); | ||
514 | } | ||
515 | |||
516 | #define cxl_p1n_write(afu, reg, val) \ | ||
517 | out_be64(_cxl_p1n_addr(afu, reg), val) | ||
518 | #define cxl_p1n_read(afu, reg) \ | ||
519 | in_be64(_cxl_p1n_addr(afu, reg)) | ||
520 | |||
521 | static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg) | ||
522 | { | ||
523 | return afu->p2n_mmio + cxl_reg_off(reg); | ||
524 | } | ||
525 | |||
526 | #define cxl_p2n_write(afu, reg, val) \ | ||
527 | out_be64(_cxl_p2n_addr(afu, reg), val) | ||
528 | #define cxl_p2n_read(afu, reg) \ | ||
529 | in_be64(_cxl_p2n_addr(afu, reg)) | ||
530 | |||
531 | struct cxl_calls { | ||
532 | void (*cxl_slbia)(struct mm_struct *mm); | ||
533 | struct module *owner; | ||
534 | }; | ||
535 | int register_cxl_calls(struct cxl_calls *calls); | ||
536 | void unregister_cxl_calls(struct cxl_calls *calls); | ||
537 | |||
538 | int cxl_alloc_adapter_nr(struct cxl *adapter); | ||
539 | void cxl_remove_adapter_nr(struct cxl *adapter); | ||
540 | |||
541 | int cxl_file_init(void); | ||
542 | void cxl_file_exit(void); | ||
543 | int cxl_register_adapter(struct cxl *adapter); | ||
544 | int cxl_register_afu(struct cxl_afu *afu); | ||
545 | int cxl_chardev_d_afu_add(struct cxl_afu *afu); | ||
546 | int cxl_chardev_m_afu_add(struct cxl_afu *afu); | ||
547 | int cxl_chardev_s_afu_add(struct cxl_afu *afu); | ||
548 | void cxl_chardev_afu_remove(struct cxl_afu *afu); | ||
549 | |||
550 | void cxl_context_detach_all(struct cxl_afu *afu); | ||
551 | void cxl_context_free(struct cxl_context *ctx); | ||
552 | void cxl_context_detach(struct cxl_context *ctx); | ||
553 | |||
554 | int cxl_sysfs_adapter_add(struct cxl *adapter); | ||
555 | void cxl_sysfs_adapter_remove(struct cxl *adapter); | ||
556 | int cxl_sysfs_afu_add(struct cxl_afu *afu); | ||
557 | void cxl_sysfs_afu_remove(struct cxl_afu *afu); | ||
558 | int cxl_sysfs_afu_m_add(struct cxl_afu *afu); | ||
559 | void cxl_sysfs_afu_m_remove(struct cxl_afu *afu); | ||
560 | |||
561 | int cxl_afu_activate_mode(struct cxl_afu *afu, int mode); | ||
562 | int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode); | ||
563 | int cxl_afu_deactivate_mode(struct cxl_afu *afu); | ||
564 | int cxl_afu_select_best_mode(struct cxl_afu *afu); | ||
565 | |||
566 | unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, | ||
567 | irq_handler_t handler, void *cookie); | ||
568 | void cxl_unmap_irq(unsigned int virq, void *cookie); | ||
569 | int cxl_register_psl_irq(struct cxl_afu *afu); | ||
570 | void cxl_release_psl_irq(struct cxl_afu *afu); | ||
571 | int cxl_register_psl_err_irq(struct cxl *adapter); | ||
572 | void cxl_release_psl_err_irq(struct cxl *adapter); | ||
573 | int cxl_register_serr_irq(struct cxl_afu *afu); | ||
574 | void cxl_release_serr_irq(struct cxl_afu *afu); | ||
575 | int afu_register_irqs(struct cxl_context *ctx, u32 count); | ||
576 | void afu_release_irqs(struct cxl_context *ctx); | ||
577 | irqreturn_t cxl_slice_irq_err(int irq, void *data); | ||
578 | |||
579 | int cxl_debugfs_init(void); | ||
580 | void cxl_debugfs_exit(void); | ||
581 | int cxl_debugfs_adapter_add(struct cxl *adapter); | ||
582 | void cxl_debugfs_adapter_remove(struct cxl *adapter); | ||
583 | int cxl_debugfs_afu_add(struct cxl_afu *afu); | ||
584 | void cxl_debugfs_afu_remove(struct cxl_afu *afu); | ||
585 | |||
586 | void cxl_handle_fault(struct work_struct *work); | ||
587 | void cxl_prefault(struct cxl_context *ctx, u64 wed); | ||
588 | |||
589 | struct cxl *get_cxl_adapter(int num); | ||
590 | int cxl_alloc_sst(struct cxl_context *ctx); | ||
591 | |||
592 | void init_cxl_native(void); | ||
593 | |||
594 | struct cxl_context *cxl_context_alloc(void); | ||
595 | int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master); | ||
596 | void cxl_context_free(struct cxl_context *ctx); | ||
597 | int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma); | ||
598 | |||
599 | /* This matches the layout of the H_COLLECT_CA_INT_INFO retbuf */ | ||
600 | struct cxl_irq_info { | ||
601 | u64 dsisr; | ||
602 | u64 dar; | ||
603 | u64 dsr; | ||
604 | u32 pid; | ||
605 | u32 tid; | ||
606 | u64 afu_err; | ||
607 | u64 errstat; | ||
608 | u64 padding[3]; /* to match the expected retbuf size for plpar_hcall9 */ | ||
609 | }; | ||
610 | |||
611 | int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, | ||
612 | u64 amr); | ||
613 | int cxl_detach_process(struct cxl_context *ctx); | ||
614 | |||
615 | int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info); | ||
616 | int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); | ||
617 | |||
618 | int cxl_check_error(struct cxl_afu *afu); | ||
619 | int cxl_afu_slbia(struct cxl_afu *afu); | ||
620 | int cxl_tlb_slb_invalidate(struct cxl *adapter); | ||
621 | int cxl_afu_disable(struct cxl_afu *afu); | ||
622 | int cxl_afu_reset(struct cxl_afu *afu); | ||
623 | int cxl_psl_purge(struct cxl_afu *afu); | ||
624 | |||
625 | void cxl_stop_trace(struct cxl *cxl); | ||
626 | |||
627 | extern struct pci_driver cxl_pci_driver; | ||
628 | |||
629 | #endif | ||
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c new file mode 100644 index 000000000000..825c412580bc --- /dev/null +++ b/drivers/misc/cxl/debugfs.c | |||
@@ -0,0 +1,132 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/slab.h> | ||
13 | |||
14 | #include "cxl.h" | ||
15 | |||
16 | static struct dentry *cxl_debugfs; | ||
17 | |||
18 | void cxl_stop_trace(struct cxl *adapter) | ||
19 | { | ||
20 | int slice; | ||
21 | |||
22 | /* Stop the trace */ | ||
23 | cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL); | ||
24 | |||
25 | /* Stop the slice traces */ | ||
26 | spin_lock(&adapter->afu_list_lock); | ||
27 | for (slice = 0; slice < adapter->slices; slice++) { | ||
28 | if (adapter->afu[slice]) | ||
29 | cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, 0x8000000000000000LL); | ||
30 | } | ||
31 | spin_unlock(&adapter->afu_list_lock); | ||
32 | } | ||
33 | |||
34 | /* Helpers to export CXL mmaped IO registers via debugfs */ | ||
35 | static int debugfs_io_u64_get(void *data, u64 *val) | ||
36 | { | ||
37 | *val = in_be64((u64 __iomem *)data); | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static int debugfs_io_u64_set(void *data, u64 val) | ||
42 | { | ||
43 | out_be64((u64 __iomem *)data, val); | ||
44 | return 0; | ||
45 | } | ||
46 | DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n"); | ||
47 | |||
48 | static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode, | ||
49 | struct dentry *parent, u64 __iomem *value) | ||
50 | { | ||
51 | return debugfs_create_file(name, mode, parent, (void *)value, &fops_io_x64); | ||
52 | } | ||
53 | |||
54 | int cxl_debugfs_adapter_add(struct cxl *adapter) | ||
55 | { | ||
56 | struct dentry *dir; | ||
57 | char buf[32]; | ||
58 | |||
59 | if (!cxl_debugfs) | ||
60 | return -ENODEV; | ||
61 | |||
62 | snprintf(buf, 32, "card%i", adapter->adapter_num); | ||
63 | dir = debugfs_create_dir(buf, cxl_debugfs); | ||
64 | if (IS_ERR(dir)) | ||
65 | return PTR_ERR(dir); | ||
66 | adapter->debugfs = dir; | ||
67 | |||
68 | debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); | ||
69 | debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2)); | ||
70 | debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL)); | ||
71 | debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE)); | ||
72 | |||
73 | debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE)); | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | void cxl_debugfs_adapter_remove(struct cxl *adapter) | ||
79 | { | ||
80 | debugfs_remove_recursive(adapter->debugfs); | ||
81 | } | ||
82 | |||
83 | int cxl_debugfs_afu_add(struct cxl_afu *afu) | ||
84 | { | ||
85 | struct dentry *dir; | ||
86 | char buf[32]; | ||
87 | |||
88 | if (!afu->adapter->debugfs) | ||
89 | return -ENODEV; | ||
90 | |||
91 | snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice); | ||
92 | dir = debugfs_create_dir(buf, afu->adapter->debugfs); | ||
93 | if (IS_ERR(dir)) | ||
94 | return PTR_ERR(dir); | ||
95 | afu->debugfs = dir; | ||
96 | |||
97 | debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An)); | ||
98 | debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An)); | ||
99 | debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An)); | ||
100 | debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An)); | ||
101 | |||
102 | debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An)); | ||
103 | debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An)); | ||
104 | debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); | ||
105 | debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An)); | ||
106 | debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An)); | ||
107 | |||
108 | debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE)); | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | void cxl_debugfs_afu_remove(struct cxl_afu *afu) | ||
114 | { | ||
115 | debugfs_remove_recursive(afu->debugfs); | ||
116 | } | ||
117 | |||
118 | int __init cxl_debugfs_init(void) | ||
119 | { | ||
120 | struct dentry *ent; | ||
121 | ent = debugfs_create_dir("cxl", NULL); | ||
122 | if (IS_ERR(ent)) | ||
123 | return PTR_ERR(ent); | ||
124 | cxl_debugfs = ent; | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | void cxl_debugfs_exit(void) | ||
130 | { | ||
131 | debugfs_remove_recursive(cxl_debugfs); | ||
132 | } | ||
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c new file mode 100644 index 000000000000..69506ebd4d07 --- /dev/null +++ b/drivers/misc/cxl/fault.c | |||
@@ -0,0 +1,291 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/workqueue.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/pid.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | |||
16 | #undef MODULE_PARAM_PREFIX | ||
17 | #define MODULE_PARAM_PREFIX "cxl" "." | ||
18 | #include <asm/current.h> | ||
19 | #include <asm/copro.h> | ||
20 | #include <asm/mmu.h> | ||
21 | |||
22 | #include "cxl.h" | ||
23 | |||
24 | static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group, | ||
25 | bool sec_hash, | ||
26 | struct cxl_sste *secondary_group, | ||
27 | unsigned int *lru) | ||
28 | { | ||
29 | unsigned int i, entry; | ||
30 | struct cxl_sste *sste, *group = primary_group; | ||
31 | |||
32 | for (i = 0; i < 2; i++) { | ||
33 | for (entry = 0; entry < 8; entry++) { | ||
34 | sste = group + entry; | ||
35 | if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) | ||
36 | return sste; | ||
37 | } | ||
38 | if (!sec_hash) | ||
39 | break; | ||
40 | group = secondary_group; | ||
41 | } | ||
42 | /* Nothing free, select an entry to cast out */ | ||
43 | if (sec_hash && (*lru & 0x8)) | ||
44 | sste = secondary_group + (*lru & 0x7); | ||
45 | else | ||
46 | sste = primary_group + (*lru & 0x7); | ||
47 | *lru = (*lru + 1) & 0xf; | ||
48 | |||
49 | return sste; | ||
50 | } | ||
51 | |||
52 | static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) | ||
53 | { | ||
54 | /* mask is the group index, we search primary and secondary here. */ | ||
55 | unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */ | ||
56 | bool sec_hash = 1; | ||
57 | struct cxl_sste *sste; | ||
58 | unsigned int hash; | ||
59 | unsigned long flags; | ||
60 | |||
61 | |||
62 | sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC); | ||
63 | |||
64 | if (slb->vsid & SLB_VSID_B_1T) | ||
65 | hash = (slb->esid >> SID_SHIFT_1T) & mask; | ||
66 | else /* 256M */ | ||
67 | hash = (slb->esid >> SID_SHIFT) & mask; | ||
68 | |||
69 | spin_lock_irqsave(&ctx->sste_lock, flags); | ||
70 | sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash, | ||
71 | ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru); | ||
72 | |||
73 | pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", | ||
74 | sste - ctx->sstp, slb->vsid, slb->esid); | ||
75 | |||
76 | sste->vsid_data = cpu_to_be64(slb->vsid); | ||
77 | sste->esid_data = cpu_to_be64(slb->esid); | ||
78 | spin_unlock_irqrestore(&ctx->sste_lock, flags); | ||
79 | } | ||
80 | |||
81 | static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, | ||
82 | u64 ea) | ||
83 | { | ||
84 | struct copro_slb slb = {0,0}; | ||
85 | int rc; | ||
86 | |||
87 | if (!(rc = copro_calculate_slb(mm, ea, &slb))) { | ||
88 | cxl_load_segment(ctx, &slb); | ||
89 | } | ||
90 | |||
91 | return rc; | ||
92 | } | ||
93 | |||
94 | static void cxl_ack_ae(struct cxl_context *ctx) | ||
95 | { | ||
96 | unsigned long flags; | ||
97 | |||
98 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); | ||
99 | |||
100 | spin_lock_irqsave(&ctx->lock, flags); | ||
101 | ctx->pending_fault = true; | ||
102 | ctx->fault_addr = ctx->dar; | ||
103 | ctx->fault_dsisr = ctx->dsisr; | ||
104 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
105 | |||
106 | wake_up_all(&ctx->wq); | ||
107 | } | ||
108 | |||
109 | static int cxl_handle_segment_miss(struct cxl_context *ctx, | ||
110 | struct mm_struct *mm, u64 ea) | ||
111 | { | ||
112 | int rc; | ||
113 | |||
114 | pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); | ||
115 | |||
116 | if ((rc = cxl_fault_segment(ctx, mm, ea))) | ||
117 | cxl_ack_ae(ctx); | ||
118 | else { | ||
119 | |||
120 | mb(); /* Order seg table write to TFC MMIO write */ | ||
121 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | ||
122 | } | ||
123 | |||
124 | return IRQ_HANDLED; | ||
125 | } | ||
126 | |||
127 | static void cxl_handle_page_fault(struct cxl_context *ctx, | ||
128 | struct mm_struct *mm, u64 dsisr, u64 dar) | ||
129 | { | ||
130 | unsigned flt = 0; | ||
131 | int result; | ||
132 | unsigned long access, flags; | ||
133 | |||
134 | if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { | ||
135 | pr_devel("copro_handle_mm_fault failed: %#x\n", result); | ||
136 | return cxl_ack_ae(ctx); | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * update_mmu_cache() will not have loaded the hash since current->trap | ||
141 | * is not a 0x400 or 0x300, so just call hash_page_mm() here. | ||
142 | */ | ||
143 | access = _PAGE_PRESENT; | ||
144 | if (dsisr & CXL_PSL_DSISR_An_S) | ||
145 | access |= _PAGE_RW; | ||
146 | if ((!ctx->kernel) || ~(dar & (1ULL << 63))) | ||
147 | access |= _PAGE_USER; | ||
148 | local_irq_save(flags); | ||
149 | hash_page_mm(mm, dar, access, 0x300); | ||
150 | local_irq_restore(flags); | ||
151 | |||
152 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); | ||
153 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); | ||
154 | } | ||
155 | |||
156 | void cxl_handle_fault(struct work_struct *fault_work) | ||
157 | { | ||
158 | struct cxl_context *ctx = | ||
159 | container_of(fault_work, struct cxl_context, fault_work); | ||
160 | u64 dsisr = ctx->dsisr; | ||
161 | u64 dar = ctx->dar; | ||
162 | struct task_struct *task; | ||
163 | struct mm_struct *mm; | ||
164 | |||
165 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || | ||
166 | cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || | ||
167 | cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { | ||
168 | /* Most likely explanation is harmless - a dedicated process | ||
169 | * has detached and these were cleared by the PSL purge, but | ||
170 | * warn about it just in case */ | ||
171 | dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); | ||
172 | return; | ||
173 | } | ||
174 | |||
175 | pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " | ||
176 | "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); | ||
177 | |||
178 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | ||
179 | pr_devel("cxl_handle_fault unable to get task %i\n", | ||
180 | pid_nr(ctx->pid)); | ||
181 | cxl_ack_ae(ctx); | ||
182 | return; | ||
183 | } | ||
184 | if (!(mm = get_task_mm(task))) { | ||
185 | pr_devel("cxl_handle_fault unable to get mm %i\n", | ||
186 | pid_nr(ctx->pid)); | ||
187 | cxl_ack_ae(ctx); | ||
188 | goto out; | ||
189 | } | ||
190 | |||
191 | if (dsisr & CXL_PSL_DSISR_An_DS) | ||
192 | cxl_handle_segment_miss(ctx, mm, dar); | ||
193 | else if (dsisr & CXL_PSL_DSISR_An_DM) | ||
194 | cxl_handle_page_fault(ctx, mm, dsisr, dar); | ||
195 | else | ||
196 | WARN(1, "cxl_handle_fault has nothing to handle\n"); | ||
197 | |||
198 | mmput(mm); | ||
199 | out: | ||
200 | put_task_struct(task); | ||
201 | } | ||
202 | |||
203 | static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) | ||
204 | { | ||
205 | int rc; | ||
206 | struct task_struct *task; | ||
207 | struct mm_struct *mm; | ||
208 | |||
209 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | ||
210 | pr_devel("cxl_prefault_one unable to get task %i\n", | ||
211 | pid_nr(ctx->pid)); | ||
212 | return; | ||
213 | } | ||
214 | if (!(mm = get_task_mm(task))) { | ||
215 | pr_devel("cxl_prefault_one unable to get mm %i\n", | ||
216 | pid_nr(ctx->pid)); | ||
217 | put_task_struct(task); | ||
218 | return; | ||
219 | } | ||
220 | |||
221 | rc = cxl_fault_segment(ctx, mm, ea); | ||
222 | |||
223 | mmput(mm); | ||
224 | put_task_struct(task); | ||
225 | } | ||
226 | |||
227 | static u64 next_segment(u64 ea, u64 vsid) | ||
228 | { | ||
229 | if (vsid & SLB_VSID_B_1T) | ||
230 | ea |= (1ULL << 40) - 1; | ||
231 | else | ||
232 | ea |= (1ULL << 28) - 1; | ||
233 | |||
234 | return ea + 1; | ||
235 | } | ||
236 | |||
237 | static void cxl_prefault_vma(struct cxl_context *ctx) | ||
238 | { | ||
239 | u64 ea, last_esid = 0; | ||
240 | struct copro_slb slb; | ||
241 | struct vm_area_struct *vma; | ||
242 | int rc; | ||
243 | struct task_struct *task; | ||
244 | struct mm_struct *mm; | ||
245 | |||
246 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | ||
247 | pr_devel("cxl_prefault_vma unable to get task %i\n", | ||
248 | pid_nr(ctx->pid)); | ||
249 | return; | ||
250 | } | ||
251 | if (!(mm = get_task_mm(task))) { | ||
252 | pr_devel("cxl_prefault_vm unable to get mm %i\n", | ||
253 | pid_nr(ctx->pid)); | ||
254 | goto out1; | ||
255 | } | ||
256 | |||
257 | down_read(&mm->mmap_sem); | ||
258 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | ||
259 | for (ea = vma->vm_start; ea < vma->vm_end; | ||
260 | ea = next_segment(ea, slb.vsid)) { | ||
261 | rc = copro_calculate_slb(mm, ea, &slb); | ||
262 | if (rc) | ||
263 | continue; | ||
264 | |||
265 | if (last_esid == slb.esid) | ||
266 | continue; | ||
267 | |||
268 | cxl_load_segment(ctx, &slb); | ||
269 | last_esid = slb.esid; | ||
270 | } | ||
271 | } | ||
272 | up_read(&mm->mmap_sem); | ||
273 | |||
274 | mmput(mm); | ||
275 | out1: | ||
276 | put_task_struct(task); | ||
277 | } | ||
278 | |||
279 | void cxl_prefault(struct cxl_context *ctx, u64 wed) | ||
280 | { | ||
281 | switch (ctx->afu->prefault_mode) { | ||
282 | case CXL_PREFAULT_WED: | ||
283 | cxl_prefault_one(ctx, wed); | ||
284 | break; | ||
285 | case CXL_PREFAULT_ALL: | ||
286 | cxl_prefault_vma(ctx); | ||
287 | break; | ||
288 | default: | ||
289 | break; | ||
290 | } | ||
291 | } | ||
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c new file mode 100644 index 000000000000..378b099e7c0b --- /dev/null +++ b/drivers/misc/cxl/file.c | |||
@@ -0,0 +1,518 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/export.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/bitmap.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/poll.h> | ||
17 | #include <linux/pid.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <asm/cputable.h> | ||
22 | #include <asm/current.h> | ||
23 | #include <asm/copro.h> | ||
24 | |||
25 | #include "cxl.h" | ||
26 | |||
27 | #define CXL_NUM_MINORS 256 /* Total to reserve */ | ||
28 | #define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */ | ||
29 | |||
30 | #define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS) | ||
31 | #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice)) | ||
32 | #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1) | ||
33 | #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2) | ||
34 | #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu)) | ||
35 | #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu)) | ||
36 | #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu)) | ||
37 | |||
38 | #define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS) | ||
39 | #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3) | ||
40 | |||
41 | #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0) | ||
42 | |||
43 | static dev_t cxl_dev; | ||
44 | |||
45 | static struct class *cxl_class; | ||
46 | |||
47 | static int __afu_open(struct inode *inode, struct file *file, bool master) | ||
48 | { | ||
49 | struct cxl *adapter; | ||
50 | struct cxl_afu *afu; | ||
51 | struct cxl_context *ctx; | ||
52 | int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev); | ||
53 | int slice = CXL_DEVT_AFU(inode->i_rdev); | ||
54 | int rc = -ENODEV; | ||
55 | |||
56 | pr_devel("afu_open afu%i.%i\n", slice, adapter_num); | ||
57 | |||
58 | if (!(adapter = get_cxl_adapter(adapter_num))) | ||
59 | return -ENODEV; | ||
60 | |||
61 | if (slice > adapter->slices) | ||
62 | goto err_put_adapter; | ||
63 | |||
64 | spin_lock(&adapter->afu_list_lock); | ||
65 | if (!(afu = adapter->afu[slice])) { | ||
66 | spin_unlock(&adapter->afu_list_lock); | ||
67 | goto err_put_adapter; | ||
68 | } | ||
69 | get_device(&afu->dev); | ||
70 | spin_unlock(&adapter->afu_list_lock); | ||
71 | |||
72 | if (!afu->current_mode) | ||
73 | goto err_put_afu; | ||
74 | |||
75 | if (!(ctx = cxl_context_alloc())) { | ||
76 | rc = -ENOMEM; | ||
77 | goto err_put_afu; | ||
78 | } | ||
79 | |||
80 | if ((rc = cxl_context_init(ctx, afu, master))) | ||
81 | goto err_put_afu; | ||
82 | |||
83 | pr_devel("afu_open pe: %i\n", ctx->pe); | ||
84 | file->private_data = ctx; | ||
85 | cxl_ctx_get(); | ||
86 | |||
87 | /* Our ref on the AFU will now hold the adapter */ | ||
88 | put_device(&adapter->dev); | ||
89 | |||
90 | return 0; | ||
91 | |||
92 | err_put_afu: | ||
93 | put_device(&afu->dev); | ||
94 | err_put_adapter: | ||
95 | put_device(&adapter->dev); | ||
96 | return rc; | ||
97 | } | ||
98 | static int afu_open(struct inode *inode, struct file *file) | ||
99 | { | ||
100 | return __afu_open(inode, file, false); | ||
101 | } | ||
102 | |||
103 | static int afu_master_open(struct inode *inode, struct file *file) | ||
104 | { | ||
105 | return __afu_open(inode, file, true); | ||
106 | } | ||
107 | |||
108 | static int afu_release(struct inode *inode, struct file *file) | ||
109 | { | ||
110 | struct cxl_context *ctx = file->private_data; | ||
111 | |||
112 | pr_devel("%s: closing cxl file descriptor. pe: %i\n", | ||
113 | __func__, ctx->pe); | ||
114 | cxl_context_detach(ctx); | ||
115 | |||
116 | put_device(&ctx->afu->dev); | ||
117 | |||
118 | /* | ||
119 | * At this this point all bottom halfs have finished and we should be | ||
120 | * getting no more IRQs from the hardware for this context. Once it's | ||
121 | * removed from the IDR (and RCU synchronised) it's safe to free the | ||
122 | * sstp and context. | ||
123 | */ | ||
124 | cxl_context_free(ctx); | ||
125 | |||
126 | cxl_ctx_put(); | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static long afu_ioctl_start_work(struct cxl_context *ctx, | ||
131 | struct cxl_ioctl_start_work __user *uwork) | ||
132 | { | ||
133 | struct cxl_ioctl_start_work work; | ||
134 | u64 amr = 0; | ||
135 | int rc; | ||
136 | |||
137 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); | ||
138 | |||
139 | mutex_lock(&ctx->status_mutex); | ||
140 | if (ctx->status != OPENED) { | ||
141 | rc = -EIO; | ||
142 | goto out; | ||
143 | } | ||
144 | |||
145 | if (copy_from_user(&work, uwork, | ||
146 | sizeof(struct cxl_ioctl_start_work))) { | ||
147 | rc = -EFAULT; | ||
148 | goto out; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * if any of the reserved fields are set or any of the unused | ||
153 | * flags are set it's invalid | ||
154 | */ | ||
155 | if (work.reserved1 || work.reserved2 || work.reserved3 || | ||
156 | work.reserved4 || work.reserved5 || work.reserved6 || | ||
157 | (work.flags & ~CXL_START_WORK_ALL)) { | ||
158 | rc = -EINVAL; | ||
159 | goto out; | ||
160 | } | ||
161 | |||
162 | if (!(work.flags & CXL_START_WORK_NUM_IRQS)) | ||
163 | work.num_interrupts = ctx->afu->pp_irqs; | ||
164 | else if ((work.num_interrupts < ctx->afu->pp_irqs) || | ||
165 | (work.num_interrupts > ctx->afu->irqs_max)) { | ||
166 | rc = -EINVAL; | ||
167 | goto out; | ||
168 | } | ||
169 | if ((rc = afu_register_irqs(ctx, work.num_interrupts))) | ||
170 | goto out; | ||
171 | |||
172 | if (work.flags & CXL_START_WORK_AMR) | ||
173 | amr = work.amr & mfspr(SPRN_UAMOR); | ||
174 | |||
175 | /* | ||
176 | * We grab the PID here and not in the file open to allow for the case | ||
177 | * where a process (master, some daemon, etc) has opened the chardev on | ||
178 | * behalf of another process, so the AFU's mm gets bound to the process | ||
179 | * that performs this ioctl and not the process that opened the file. | ||
180 | */ | ||
181 | ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID)); | ||
182 | |||
183 | if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor, | ||
184 | amr))) | ||
185 | goto out; | ||
186 | |||
187 | ctx->status = STARTED; | ||
188 | rc = 0; | ||
189 | out: | ||
190 | mutex_unlock(&ctx->status_mutex); | ||
191 | return rc; | ||
192 | } | ||
193 | static long afu_ioctl_process_element(struct cxl_context *ctx, | ||
194 | int __user *upe) | ||
195 | { | ||
196 | pr_devel("%s: pe: %i\n", __func__, ctx->pe); | ||
197 | |||
198 | if (copy_to_user(upe, &ctx->pe, sizeof(__u32))) | ||
199 | return -EFAULT; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
205 | { | ||
206 | struct cxl_context *ctx = file->private_data; | ||
207 | |||
208 | if (ctx->status == CLOSED) | ||
209 | return -EIO; | ||
210 | |||
211 | pr_devel("afu_ioctl\n"); | ||
212 | switch (cmd) { | ||
213 | case CXL_IOCTL_START_WORK: | ||
214 | return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg); | ||
215 | case CXL_IOCTL_GET_PROCESS_ELEMENT: | ||
216 | return afu_ioctl_process_element(ctx, (__u32 __user *)arg); | ||
217 | } | ||
218 | return -EINVAL; | ||
219 | } | ||
220 | |||
221 | static long afu_compat_ioctl(struct file *file, unsigned int cmd, | ||
222 | unsigned long arg) | ||
223 | { | ||
224 | return afu_ioctl(file, cmd, arg); | ||
225 | } | ||
226 | |||
227 | static int afu_mmap(struct file *file, struct vm_area_struct *vm) | ||
228 | { | ||
229 | struct cxl_context *ctx = file->private_data; | ||
230 | |||
231 | /* AFU must be started before we can MMIO */ | ||
232 | if (ctx->status != STARTED) | ||
233 | return -EIO; | ||
234 | |||
235 | return cxl_context_iomap(ctx, vm); | ||
236 | } | ||
237 | |||
238 | static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) | ||
239 | { | ||
240 | struct cxl_context *ctx = file->private_data; | ||
241 | int mask = 0; | ||
242 | unsigned long flags; | ||
243 | |||
244 | |||
245 | poll_wait(file, &ctx->wq, poll); | ||
246 | |||
247 | pr_devel("afu_poll wait done pe: %i\n", ctx->pe); | ||
248 | |||
249 | spin_lock_irqsave(&ctx->lock, flags); | ||
250 | if (ctx->pending_irq || ctx->pending_fault || | ||
251 | ctx->pending_afu_err) | ||
252 | mask |= POLLIN | POLLRDNORM; | ||
253 | else if (ctx->status == CLOSED) | ||
254 | /* Only error on closed when there are no futher events pending | ||
255 | */ | ||
256 | mask |= POLLERR; | ||
257 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
258 | |||
259 | pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask); | ||
260 | |||
261 | return mask; | ||
262 | } | ||
263 | |||
264 | static inline int ctx_event_pending(struct cxl_context *ctx) | ||
265 | { | ||
266 | return (ctx->pending_irq || ctx->pending_fault || | ||
267 | ctx->pending_afu_err || (ctx->status == CLOSED)); | ||
268 | } | ||
269 | |||
270 | static ssize_t afu_read(struct file *file, char __user *buf, size_t count, | ||
271 | loff_t *off) | ||
272 | { | ||
273 | struct cxl_context *ctx = file->private_data; | ||
274 | struct cxl_event event; | ||
275 | unsigned long flags; | ||
276 | int rc; | ||
277 | DEFINE_WAIT(wait); | ||
278 | |||
279 | if (count < CXL_READ_MIN_SIZE) | ||
280 | return -EINVAL; | ||
281 | |||
282 | spin_lock_irqsave(&ctx->lock, flags); | ||
283 | |||
284 | for (;;) { | ||
285 | prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); | ||
286 | if (ctx_event_pending(ctx)) | ||
287 | break; | ||
288 | |||
289 | if (file->f_flags & O_NONBLOCK) { | ||
290 | rc = -EAGAIN; | ||
291 | goto out; | ||
292 | } | ||
293 | |||
294 | if (signal_pending(current)) { | ||
295 | rc = -ERESTARTSYS; | ||
296 | goto out; | ||
297 | } | ||
298 | |||
299 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
300 | pr_devel("afu_read going to sleep...\n"); | ||
301 | schedule(); | ||
302 | pr_devel("afu_read woken up\n"); | ||
303 | spin_lock_irqsave(&ctx->lock, flags); | ||
304 | } | ||
305 | |||
306 | finish_wait(&ctx->wq, &wait); | ||
307 | |||
308 | memset(&event, 0, sizeof(event)); | ||
309 | event.header.process_element = ctx->pe; | ||
310 | event.header.size = sizeof(struct cxl_event_header); | ||
311 | if (ctx->pending_irq) { | ||
312 | pr_devel("afu_read delivering AFU interrupt\n"); | ||
313 | event.header.size += sizeof(struct cxl_event_afu_interrupt); | ||
314 | event.header.type = CXL_EVENT_AFU_INTERRUPT; | ||
315 | event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1; | ||
316 | clear_bit(event.irq.irq - 1, ctx->irq_bitmap); | ||
317 | if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count)) | ||
318 | ctx->pending_irq = false; | ||
319 | } else if (ctx->pending_fault) { | ||
320 | pr_devel("afu_read delivering data storage fault\n"); | ||
321 | event.header.size += sizeof(struct cxl_event_data_storage); | ||
322 | event.header.type = CXL_EVENT_DATA_STORAGE; | ||
323 | event.fault.addr = ctx->fault_addr; | ||
324 | event.fault.dsisr = ctx->fault_dsisr; | ||
325 | ctx->pending_fault = false; | ||
326 | } else if (ctx->pending_afu_err) { | ||
327 | pr_devel("afu_read delivering afu error\n"); | ||
328 | event.header.size += sizeof(struct cxl_event_afu_error); | ||
329 | event.header.type = CXL_EVENT_AFU_ERROR; | ||
330 | event.afu_error.error = ctx->afu_err; | ||
331 | ctx->pending_afu_err = false; | ||
332 | } else if (ctx->status == CLOSED) { | ||
333 | pr_devel("afu_read fatal error\n"); | ||
334 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
335 | return -EIO; | ||
336 | } else | ||
337 | WARN(1, "afu_read must be buggy\n"); | ||
338 | |||
339 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
340 | |||
341 | if (copy_to_user(buf, &event, event.header.size)) | ||
342 | return -EFAULT; | ||
343 | return event.header.size; | ||
344 | |||
345 | out: | ||
346 | finish_wait(&ctx->wq, &wait); | ||
347 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
348 | return rc; | ||
349 | } | ||
350 | |||
351 | static const struct file_operations afu_fops = { | ||
352 | .owner = THIS_MODULE, | ||
353 | .open = afu_open, | ||
354 | .poll = afu_poll, | ||
355 | .read = afu_read, | ||
356 | .release = afu_release, | ||
357 | .unlocked_ioctl = afu_ioctl, | ||
358 | .compat_ioctl = afu_compat_ioctl, | ||
359 | .mmap = afu_mmap, | ||
360 | }; | ||
361 | |||
362 | static const struct file_operations afu_master_fops = { | ||
363 | .owner = THIS_MODULE, | ||
364 | .open = afu_master_open, | ||
365 | .poll = afu_poll, | ||
366 | .read = afu_read, | ||
367 | .release = afu_release, | ||
368 | .unlocked_ioctl = afu_ioctl, | ||
369 | .compat_ioctl = afu_compat_ioctl, | ||
370 | .mmap = afu_mmap, | ||
371 | }; | ||
372 | |||
373 | |||
374 | static char *cxl_devnode(struct device *dev, umode_t *mode) | ||
375 | { | ||
376 | if (CXL_DEVT_IS_CARD(dev->devt)) { | ||
377 | /* | ||
378 | * These minor numbers will eventually be used to program the | ||
379 | * PSL and AFUs once we have dynamic reprogramming support | ||
380 | */ | ||
381 | return NULL; | ||
382 | } | ||
383 | return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); | ||
384 | } | ||
385 | |||
386 | extern struct class *cxl_class; | ||
387 | |||
388 | static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev, | ||
389 | struct device **chardev, char *postfix, char *desc, | ||
390 | const struct file_operations *fops) | ||
391 | { | ||
392 | struct device *dev; | ||
393 | int rc; | ||
394 | |||
395 | cdev_init(cdev, fops); | ||
396 | if ((rc = cdev_add(cdev, devt, 1))) { | ||
397 | dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc); | ||
398 | return rc; | ||
399 | } | ||
400 | |||
401 | dev = device_create(cxl_class, &afu->dev, devt, afu, | ||
402 | "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix); | ||
403 | if (IS_ERR(dev)) { | ||
404 | dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc); | ||
405 | rc = PTR_ERR(dev); | ||
406 | goto err; | ||
407 | } | ||
408 | |||
409 | *chardev = dev; | ||
410 | |||
411 | return 0; | ||
412 | err: | ||
413 | cdev_del(cdev); | ||
414 | return rc; | ||
415 | } | ||
416 | |||
417 | int cxl_chardev_d_afu_add(struct cxl_afu *afu) | ||
418 | { | ||
419 | return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d, | ||
420 | &afu->chardev_d, "d", "dedicated", | ||
421 | &afu_master_fops); /* Uses master fops */ | ||
422 | } | ||
423 | |||
424 | int cxl_chardev_m_afu_add(struct cxl_afu *afu) | ||
425 | { | ||
426 | return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m, | ||
427 | &afu->chardev_m, "m", "master", | ||
428 | &afu_master_fops); | ||
429 | } | ||
430 | |||
431 | int cxl_chardev_s_afu_add(struct cxl_afu *afu) | ||
432 | { | ||
433 | return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s, | ||
434 | &afu->chardev_s, "s", "shared", | ||
435 | &afu_fops); | ||
436 | } | ||
437 | |||
438 | void cxl_chardev_afu_remove(struct cxl_afu *afu) | ||
439 | { | ||
440 | if (afu->chardev_d) { | ||
441 | cdev_del(&afu->afu_cdev_d); | ||
442 | device_unregister(afu->chardev_d); | ||
443 | afu->chardev_d = NULL; | ||
444 | } | ||
445 | if (afu->chardev_m) { | ||
446 | cdev_del(&afu->afu_cdev_m); | ||
447 | device_unregister(afu->chardev_m); | ||
448 | afu->chardev_m = NULL; | ||
449 | } | ||
450 | if (afu->chardev_s) { | ||
451 | cdev_del(&afu->afu_cdev_s); | ||
452 | device_unregister(afu->chardev_s); | ||
453 | afu->chardev_s = NULL; | ||
454 | } | ||
455 | } | ||
456 | |||
457 | int cxl_register_afu(struct cxl_afu *afu) | ||
458 | { | ||
459 | afu->dev.class = cxl_class; | ||
460 | |||
461 | return device_register(&afu->dev); | ||
462 | } | ||
463 | |||
464 | int cxl_register_adapter(struct cxl *adapter) | ||
465 | { | ||
466 | adapter->dev.class = cxl_class; | ||
467 | |||
468 | /* | ||
469 | * Future: When we support dynamically reprogramming the PSL & AFU we | ||
470 | * will expose the interface to do that via a chardev: | ||
471 | * adapter->dev.devt = CXL_CARD_MKDEV(adapter); | ||
472 | */ | ||
473 | |||
474 | return device_register(&adapter->dev); | ||
475 | } | ||
476 | |||
477 | int __init cxl_file_init(void) | ||
478 | { | ||
479 | int rc; | ||
480 | |||
481 | /* | ||
482 | * If these change we really need to update API. Either change some | ||
483 | * flags or update API version number CXL_API_VERSION. | ||
484 | */ | ||
485 | BUILD_BUG_ON(CXL_API_VERSION != 1); | ||
486 | BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64); | ||
487 | BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8); | ||
488 | BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8); | ||
489 | BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32); | ||
490 | BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16); | ||
491 | |||
492 | if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) { | ||
493 | pr_err("Unable to allocate CXL major number: %i\n", rc); | ||
494 | return rc; | ||
495 | } | ||
496 | |||
497 | pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev)); | ||
498 | |||
499 | cxl_class = class_create(THIS_MODULE, "cxl"); | ||
500 | if (IS_ERR(cxl_class)) { | ||
501 | pr_err("Unable to create CXL class\n"); | ||
502 | rc = PTR_ERR(cxl_class); | ||
503 | goto err; | ||
504 | } | ||
505 | cxl_class->devnode = cxl_devnode; | ||
506 | |||
507 | return 0; | ||
508 | |||
509 | err: | ||
510 | unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS); | ||
511 | return rc; | ||
512 | } | ||
513 | |||
514 | void cxl_file_exit(void) | ||
515 | { | ||
516 | unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS); | ||
517 | class_destroy(cxl_class); | ||
518 | } | ||
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c new file mode 100644 index 000000000000..336020c8e1af --- /dev/null +++ b/drivers/misc/cxl/irq.c | |||
@@ -0,0 +1,402 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/workqueue.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/wait.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/pid.h> | ||
16 | #include <asm/cputable.h> | ||
17 | #include <misc/cxl.h> | ||
18 | |||
19 | #include "cxl.h" | ||
20 | |||
21 | /* XXX: This is implementation specific */ | ||
22 | static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat) | ||
23 | { | ||
24 | u64 fir1, fir2, fir_slice, serr, afu_debug; | ||
25 | |||
26 | fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); | ||
27 | fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); | ||
28 | fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); | ||
29 | serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); | ||
30 | afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); | ||
31 | |||
32 | dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); | ||
33 | dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1); | ||
34 | dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2); | ||
35 | dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); | ||
36 | dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice); | ||
37 | dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug); | ||
38 | |||
39 | dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); | ||
40 | cxl_stop_trace(ctx->afu->adapter); | ||
41 | |||
42 | return cxl_ack_irq(ctx, 0, errstat); | ||
43 | } | ||
44 | |||
45 | irqreturn_t cxl_slice_irq_err(int irq, void *data) | ||
46 | { | ||
47 | struct cxl_afu *afu = data; | ||
48 | u64 fir_slice, errstat, serr, afu_debug; | ||
49 | |||
50 | WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); | ||
51 | |||
52 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
53 | fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); | ||
54 | errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); | ||
55 | afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); | ||
56 | dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); | ||
57 | dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice); | ||
58 | dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat); | ||
59 | dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug); | ||
60 | |||
61 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | ||
62 | |||
63 | return IRQ_HANDLED; | ||
64 | } | ||
65 | |||
66 | static irqreturn_t cxl_irq_err(int irq, void *data) | ||
67 | { | ||
68 | struct cxl *adapter = data; | ||
69 | u64 fir1, fir2, err_ivte; | ||
70 | |||
71 | WARN(1, "CXL ERROR interrupt %i\n", irq); | ||
72 | |||
73 | err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); | ||
74 | dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte); | ||
75 | |||
76 | dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); | ||
77 | cxl_stop_trace(adapter); | ||
78 | |||
79 | fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); | ||
80 | fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); | ||
81 | |||
82 | dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2); | ||
83 | |||
84 | return IRQ_HANDLED; | ||
85 | } | ||
86 | |||
87 | static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) | ||
88 | { | ||
89 | ctx->dsisr = dsisr; | ||
90 | ctx->dar = dar; | ||
91 | schedule_work(&ctx->fault_work); | ||
92 | return IRQ_HANDLED; | ||
93 | } | ||
94 | |||
95 | static irqreturn_t cxl_irq(int irq, void *data) | ||
96 | { | ||
97 | struct cxl_context *ctx = data; | ||
98 | struct cxl_irq_info irq_info; | ||
99 | u64 dsisr, dar; | ||
100 | int result; | ||
101 | |||
102 | if ((result = cxl_get_irq(ctx, &irq_info))) { | ||
103 | WARN(1, "Unable to get CXL IRQ Info: %i\n", result); | ||
104 | return IRQ_HANDLED; | ||
105 | } | ||
106 | |||
107 | dsisr = irq_info.dsisr; | ||
108 | dar = irq_info.dar; | ||
109 | |||
110 | pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); | ||
111 | |||
112 | if (dsisr & CXL_PSL_DSISR_An_DS) { | ||
113 | /* | ||
114 | * We don't inherently need to sleep to handle this, but we do | ||
115 | * need to get a ref to the task's mm, which we can't do from | ||
116 | * irq context without the potential for a deadlock since it | ||
117 | * takes the task_lock. An alternate option would be to keep a | ||
118 | * reference to the task's mm the entire time it has cxl open, | ||
119 | * but to do that we need to solve the issue where we hold a | ||
120 | * ref to the mm, but the mm can hold a ref to the fd after an | ||
121 | * mmap preventing anything from being cleaned up. | ||
122 | */ | ||
123 | pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); | ||
124 | return schedule_cxl_fault(ctx, dsisr, dar); | ||
125 | } | ||
126 | |||
127 | if (dsisr & CXL_PSL_DSISR_An_M) | ||
128 | pr_devel("CXL interrupt: PTE not found\n"); | ||
129 | if (dsisr & CXL_PSL_DSISR_An_P) | ||
130 | pr_devel("CXL interrupt: Storage protection violation\n"); | ||
131 | if (dsisr & CXL_PSL_DSISR_An_A) | ||
132 | pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n"); | ||
133 | if (dsisr & CXL_PSL_DSISR_An_S) | ||
134 | pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n"); | ||
135 | if (dsisr & CXL_PSL_DSISR_An_K) | ||
136 | pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n"); | ||
137 | |||
138 | if (dsisr & CXL_PSL_DSISR_An_DM) { | ||
139 | /* | ||
140 | * In some cases we might be able to handle the fault | ||
141 | * immediately if hash_page would succeed, but we still need | ||
142 | * the task's mm, which as above we can't get without a lock | ||
143 | */ | ||
144 | pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); | ||
145 | return schedule_cxl_fault(ctx, dsisr, dar); | ||
146 | } | ||
147 | if (dsisr & CXL_PSL_DSISR_An_ST) | ||
148 | WARN(1, "CXL interrupt: Segment Table PTE not found\n"); | ||
149 | if (dsisr & CXL_PSL_DSISR_An_UR) | ||
150 | pr_devel("CXL interrupt: AURP PTE not found\n"); | ||
151 | if (dsisr & CXL_PSL_DSISR_An_PE) | ||
152 | return handle_psl_slice_error(ctx, dsisr, irq_info.errstat); | ||
153 | if (dsisr & CXL_PSL_DSISR_An_AE) { | ||
154 | pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err); | ||
155 | |||
156 | if (ctx->pending_afu_err) { | ||
157 | /* | ||
158 | * This shouldn't happen - the PSL treats these errors | ||
159 | * as fatal and will have reset the AFU, so there's not | ||
160 | * much point buffering multiple AFU errors. | ||
161 | * OTOH if we DO ever see a storm of these come in it's | ||
162 | * probably best that we log them somewhere: | ||
163 | */ | ||
164 | dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " | ||
165 | "undelivered to pe %i: %.llx\n", | ||
166 | ctx->pe, irq_info.afu_err); | ||
167 | } else { | ||
168 | spin_lock(&ctx->lock); | ||
169 | ctx->afu_err = irq_info.afu_err; | ||
170 | ctx->pending_afu_err = 1; | ||
171 | spin_unlock(&ctx->lock); | ||
172 | |||
173 | wake_up_all(&ctx->wq); | ||
174 | } | ||
175 | |||
176 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0); | ||
177 | } | ||
178 | if (dsisr & CXL_PSL_DSISR_An_OC) | ||
179 | pr_devel("CXL interrupt: OS Context Warning\n"); | ||
180 | |||
181 | WARN(1, "Unhandled CXL PSL IRQ\n"); | ||
182 | return IRQ_HANDLED; | ||
183 | } | ||
184 | |||
185 | static irqreturn_t cxl_irq_multiplexed(int irq, void *data) | ||
186 | { | ||
187 | struct cxl_afu *afu = data; | ||
188 | struct cxl_context *ctx; | ||
189 | int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; | ||
190 | int ret; | ||
191 | |||
192 | rcu_read_lock(); | ||
193 | ctx = idr_find(&afu->contexts_idr, ph); | ||
194 | if (ctx) { | ||
195 | ret = cxl_irq(irq, ctx); | ||
196 | rcu_read_unlock(); | ||
197 | return ret; | ||
198 | } | ||
199 | rcu_read_unlock(); | ||
200 | |||
201 | WARN(1, "Unable to demultiplex CXL PSL IRQ\n"); | ||
202 | return IRQ_HANDLED; | ||
203 | } | ||
204 | |||
205 | static irqreturn_t cxl_irq_afu(int irq, void *data) | ||
206 | { | ||
207 | struct cxl_context *ctx = data; | ||
208 | irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); | ||
209 | int irq_off, afu_irq = 1; | ||
210 | __u16 range; | ||
211 | int r; | ||
212 | |||
213 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | ||
214 | irq_off = hwirq - ctx->irqs.offset[r]; | ||
215 | range = ctx->irqs.range[r]; | ||
216 | if (irq_off >= 0 && irq_off < range) { | ||
217 | afu_irq += irq_off; | ||
218 | break; | ||
219 | } | ||
220 | afu_irq += range; | ||
221 | } | ||
222 | if (unlikely(r >= CXL_IRQ_RANGES)) { | ||
223 | WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", | ||
224 | ctx->pe, irq, hwirq); | ||
225 | return IRQ_HANDLED; | ||
226 | } | ||
227 | |||
228 | pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n", | ||
229 | afu_irq, ctx->pe, irq, hwirq); | ||
230 | |||
231 | if (unlikely(!ctx->irq_bitmap)) { | ||
232 | WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n"); | ||
233 | return IRQ_HANDLED; | ||
234 | } | ||
235 | spin_lock(&ctx->lock); | ||
236 | set_bit(afu_irq - 1, ctx->irq_bitmap); | ||
237 | ctx->pending_irq = true; | ||
238 | spin_unlock(&ctx->lock); | ||
239 | |||
240 | wake_up_all(&ctx->wq); | ||
241 | |||
242 | return IRQ_HANDLED; | ||
243 | } | ||
244 | |||
245 | unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, | ||
246 | irq_handler_t handler, void *cookie) | ||
247 | { | ||
248 | unsigned int virq; | ||
249 | int result; | ||
250 | |||
251 | /* IRQ Domain? */ | ||
252 | virq = irq_create_mapping(NULL, hwirq); | ||
253 | if (!virq) { | ||
254 | dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n"); | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | cxl_setup_irq(adapter, hwirq, virq); | ||
259 | |||
260 | pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); | ||
261 | |||
262 | result = request_irq(virq, handler, 0, "cxl", cookie); | ||
263 | if (result) { | ||
264 | dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | return virq; | ||
269 | } | ||
270 | |||
271 | void cxl_unmap_irq(unsigned int virq, void *cookie) | ||
272 | { | ||
273 | free_irq(virq, cookie); | ||
274 | irq_dispose_mapping(virq); | ||
275 | } | ||
276 | |||
277 | static int cxl_register_one_irq(struct cxl *adapter, | ||
278 | irq_handler_t handler, | ||
279 | void *cookie, | ||
280 | irq_hw_number_t *dest_hwirq, | ||
281 | unsigned int *dest_virq) | ||
282 | { | ||
283 | int hwirq, virq; | ||
284 | |||
285 | if ((hwirq = cxl_alloc_one_irq(adapter)) < 0) | ||
286 | return hwirq; | ||
287 | |||
288 | if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie))) | ||
289 | goto err; | ||
290 | |||
291 | *dest_hwirq = hwirq; | ||
292 | *dest_virq = virq; | ||
293 | |||
294 | return 0; | ||
295 | |||
296 | err: | ||
297 | cxl_release_one_irq(adapter, hwirq); | ||
298 | return -ENOMEM; | ||
299 | } | ||
300 | |||
301 | int cxl_register_psl_err_irq(struct cxl *adapter) | ||
302 | { | ||
303 | int rc; | ||
304 | |||
305 | if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter, | ||
306 | &adapter->err_hwirq, | ||
307 | &adapter->err_virq))) | ||
308 | return rc; | ||
309 | |||
310 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); | ||
311 | |||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | void cxl_release_psl_err_irq(struct cxl *adapter) | ||
316 | { | ||
317 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); | ||
318 | cxl_unmap_irq(adapter->err_virq, adapter); | ||
319 | cxl_release_one_irq(adapter, adapter->err_hwirq); | ||
320 | } | ||
321 | |||
322 | int cxl_register_serr_irq(struct cxl_afu *afu) | ||
323 | { | ||
324 | u64 serr; | ||
325 | int rc; | ||
326 | |||
327 | if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu, | ||
328 | &afu->serr_hwirq, | ||
329 | &afu->serr_virq))) | ||
330 | return rc; | ||
331 | |||
332 | serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
333 | serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); | ||
334 | cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | void cxl_release_serr_irq(struct cxl_afu *afu) | ||
340 | { | ||
341 | cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); | ||
342 | cxl_unmap_irq(afu->serr_virq, afu); | ||
343 | cxl_release_one_irq(afu->adapter, afu->serr_hwirq); | ||
344 | } | ||
345 | |||
346 | int cxl_register_psl_irq(struct cxl_afu *afu) | ||
347 | { | ||
348 | return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, | ||
349 | &afu->psl_hwirq, &afu->psl_virq); | ||
350 | } | ||
351 | |||
352 | void cxl_release_psl_irq(struct cxl_afu *afu) | ||
353 | { | ||
354 | cxl_unmap_irq(afu->psl_virq, afu); | ||
355 | cxl_release_one_irq(afu->adapter, afu->psl_hwirq); | ||
356 | } | ||
357 | |||
358 | int afu_register_irqs(struct cxl_context *ctx, u32 count) | ||
359 | { | ||
360 | irq_hw_number_t hwirq; | ||
361 | int rc, r, i; | ||
362 | |||
363 | if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) | ||
364 | return rc; | ||
365 | |||
366 | /* Multiplexed PSL Interrupt */ | ||
367 | ctx->irqs.offset[0] = ctx->afu->psl_hwirq; | ||
368 | ctx->irqs.range[0] = 1; | ||
369 | |||
370 | ctx->irq_count = count; | ||
371 | ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), | ||
372 | sizeof(*ctx->irq_bitmap), GFP_KERNEL); | ||
373 | if (!ctx->irq_bitmap) | ||
374 | return -ENOMEM; | ||
375 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | ||
376 | hwirq = ctx->irqs.offset[r]; | ||
377 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | ||
378 | cxl_map_irq(ctx->afu->adapter, hwirq, | ||
379 | cxl_irq_afu, ctx); | ||
380 | } | ||
381 | } | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | void afu_release_irqs(struct cxl_context *ctx) | ||
387 | { | ||
388 | irq_hw_number_t hwirq; | ||
389 | unsigned int virq; | ||
390 | int r, i; | ||
391 | |||
392 | for (r = 1; r < CXL_IRQ_RANGES; r++) { | ||
393 | hwirq = ctx->irqs.offset[r]; | ||
394 | for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { | ||
395 | virq = irq_find_mapping(NULL, hwirq); | ||
396 | if (virq) | ||
397 | cxl_unmap_irq(virq, ctx); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); | ||
402 | } | ||
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c new file mode 100644 index 000000000000..4cde9b661642 --- /dev/null +++ b/drivers/misc/cxl/main.c | |||
@@ -0,0 +1,230 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/mutex.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/idr.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <asm/cputable.h> | ||
23 | #include <misc/cxl.h> | ||
24 | |||
25 | #include "cxl.h" | ||
26 | |||
27 | static DEFINE_SPINLOCK(adapter_idr_lock); | ||
28 | static DEFINE_IDR(cxl_adapter_idr); | ||
29 | |||
30 | uint cxl_verbose; | ||
31 | module_param_named(verbose, cxl_verbose, uint, 0600); | ||
32 | MODULE_PARM_DESC(verbose, "Enable verbose dmesg output"); | ||
33 | |||
34 | static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm) | ||
35 | { | ||
36 | struct task_struct *task; | ||
37 | unsigned long flags; | ||
38 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { | ||
39 | pr_devel("%s unable to get task %i\n", | ||
40 | __func__, pid_nr(ctx->pid)); | ||
41 | return; | ||
42 | } | ||
43 | |||
44 | if (task->mm != mm) | ||
45 | goto out_put; | ||
46 | |||
47 | pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__, | ||
48 | ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe); | ||
49 | |||
50 | spin_lock_irqsave(&ctx->sste_lock, flags); | ||
51 | memset(ctx->sstp, 0, ctx->sst_size); | ||
52 | spin_unlock_irqrestore(&ctx->sste_lock, flags); | ||
53 | mb(); | ||
54 | cxl_afu_slbia(ctx->afu); | ||
55 | out_put: | ||
56 | put_task_struct(task); | ||
57 | } | ||
58 | |||
59 | static inline void cxl_slbia_core(struct mm_struct *mm) | ||
60 | { | ||
61 | struct cxl *adapter; | ||
62 | struct cxl_afu *afu; | ||
63 | struct cxl_context *ctx; | ||
64 | int card, slice, id; | ||
65 | |||
66 | pr_devel("%s called\n", __func__); | ||
67 | |||
68 | spin_lock(&adapter_idr_lock); | ||
69 | idr_for_each_entry(&cxl_adapter_idr, adapter, card) { | ||
70 | /* XXX: Make this lookup faster with link from mm to ctx */ | ||
71 | spin_lock(&adapter->afu_list_lock); | ||
72 | for (slice = 0; slice < adapter->slices; slice++) { | ||
73 | afu = adapter->afu[slice]; | ||
74 | if (!afu->enabled) | ||
75 | continue; | ||
76 | rcu_read_lock(); | ||
77 | idr_for_each_entry(&afu->contexts_idr, ctx, id) | ||
78 | _cxl_slbia(ctx, mm); | ||
79 | rcu_read_unlock(); | ||
80 | } | ||
81 | spin_unlock(&adapter->afu_list_lock); | ||
82 | } | ||
83 | spin_unlock(&adapter_idr_lock); | ||
84 | } | ||
85 | |||
86 | static struct cxl_calls cxl_calls = { | ||
87 | .cxl_slbia = cxl_slbia_core, | ||
88 | .owner = THIS_MODULE, | ||
89 | }; | ||
90 | |||
91 | int cxl_alloc_sst(struct cxl_context *ctx) | ||
92 | { | ||
93 | unsigned long vsid; | ||
94 | u64 ea_mask, size, sstp0, sstp1; | ||
95 | |||
96 | sstp0 = 0; | ||
97 | sstp1 = 0; | ||
98 | |||
99 | ctx->sst_size = PAGE_SIZE; | ||
100 | ctx->sst_lru = 0; | ||
101 | ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL); | ||
102 | if (!ctx->sstp) { | ||
103 | pr_err("cxl_alloc_sst: Unable to allocate segment table\n"); | ||
104 | return -ENOMEM; | ||
105 | } | ||
106 | pr_devel("SSTP allocated at 0x%p\n", ctx->sstp); | ||
107 | |||
108 | vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12; | ||
109 | |||
110 | sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT; | ||
111 | sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50; | ||
112 | |||
113 | size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT; | ||
114 | if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) { | ||
115 | WARN(1, "Impossible segment table size\n"); | ||
116 | return -EINVAL; | ||
117 | } | ||
118 | sstp0 |= size; | ||
119 | |||
120 | if (mmu_kernel_ssize == MMU_SEGSIZE_256M) | ||
121 | ea_mask = 0xfffff00ULL; | ||
122 | else | ||
123 | ea_mask = 0xffffffff00ULL; | ||
124 | |||
125 | sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */ | ||
126 | sstp1 |= (vsid << (64-(50-14))) & ~ea_mask; | ||
127 | sstp1 |= (u64)ctx->sstp & ea_mask; | ||
128 | sstp1 |= CXL_SSTP1_An_V; | ||
129 | |||
130 | pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n", | ||
131 | (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1); | ||
132 | |||
133 | /* Store calculated sstp hardware points for use later */ | ||
134 | ctx->sstp0 = sstp0; | ||
135 | ctx->sstp1 = sstp1; | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | /* Find a CXL adapter by it's number and increase it's refcount */ | ||
141 | struct cxl *get_cxl_adapter(int num) | ||
142 | { | ||
143 | struct cxl *adapter; | ||
144 | |||
145 | spin_lock(&adapter_idr_lock); | ||
146 | if ((adapter = idr_find(&cxl_adapter_idr, num))) | ||
147 | get_device(&adapter->dev); | ||
148 | spin_unlock(&adapter_idr_lock); | ||
149 | |||
150 | return adapter; | ||
151 | } | ||
152 | |||
153 | int cxl_alloc_adapter_nr(struct cxl *adapter) | ||
154 | { | ||
155 | int i; | ||
156 | |||
157 | idr_preload(GFP_KERNEL); | ||
158 | spin_lock(&adapter_idr_lock); | ||
159 | i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT); | ||
160 | spin_unlock(&adapter_idr_lock); | ||
161 | idr_preload_end(); | ||
162 | if (i < 0) | ||
163 | return i; | ||
164 | |||
165 | adapter->adapter_num = i; | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | void cxl_remove_adapter_nr(struct cxl *adapter) | ||
171 | { | ||
172 | idr_remove(&cxl_adapter_idr, adapter->adapter_num); | ||
173 | } | ||
174 | |||
175 | int cxl_afu_select_best_mode(struct cxl_afu *afu) | ||
176 | { | ||
177 | if (afu->modes_supported & CXL_MODE_DIRECTED) | ||
178 | return cxl_afu_activate_mode(afu, CXL_MODE_DIRECTED); | ||
179 | |||
180 | if (afu->modes_supported & CXL_MODE_DEDICATED) | ||
181 | return cxl_afu_activate_mode(afu, CXL_MODE_DEDICATED); | ||
182 | |||
183 | dev_warn(&afu->dev, "No supported programming modes available\n"); | ||
184 | /* We don't fail this so the user can inspect sysfs */ | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static int __init init_cxl(void) | ||
189 | { | ||
190 | int rc = 0; | ||
191 | |||
192 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | ||
193 | return -EPERM; | ||
194 | |||
195 | if ((rc = cxl_file_init())) | ||
196 | return rc; | ||
197 | |||
198 | cxl_debugfs_init(); | ||
199 | |||
200 | if ((rc = register_cxl_calls(&cxl_calls))) | ||
201 | goto err; | ||
202 | |||
203 | if ((rc = pci_register_driver(&cxl_pci_driver))) | ||
204 | goto err1; | ||
205 | |||
206 | return 0; | ||
207 | err1: | ||
208 | unregister_cxl_calls(&cxl_calls); | ||
209 | err: | ||
210 | cxl_debugfs_exit(); | ||
211 | cxl_file_exit(); | ||
212 | |||
213 | return rc; | ||
214 | } | ||
215 | |||
216 | static void exit_cxl(void) | ||
217 | { | ||
218 | pci_unregister_driver(&cxl_pci_driver); | ||
219 | |||
220 | cxl_debugfs_exit(); | ||
221 | cxl_file_exit(); | ||
222 | unregister_cxl_calls(&cxl_calls); | ||
223 | } | ||
224 | |||
225 | module_init(init_cxl); | ||
226 | module_exit(exit_cxl); | ||
227 | |||
228 | MODULE_DESCRIPTION("IBM Coherent Accelerator"); | ||
229 | MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>"); | ||
230 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c new file mode 100644 index 000000000000..623286a77114 --- /dev/null +++ b/drivers/misc/cxl/native.c | |||
@@ -0,0 +1,683 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/mutex.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <asm/synch.h> | ||
18 | #include <misc/cxl.h> | ||
19 | |||
20 | #include "cxl.h" | ||
21 | |||
22 | static int afu_control(struct cxl_afu *afu, u64 command, | ||
23 | u64 result, u64 mask, bool enabled) | ||
24 | { | ||
25 | u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); | ||
26 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | ||
27 | |||
28 | spin_lock(&afu->afu_cntl_lock); | ||
29 | pr_devel("AFU command starting: %llx\n", command); | ||
30 | |||
31 | cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); | ||
32 | |||
33 | AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); | ||
34 | while ((AFU_Cntl & mask) != result) { | ||
35 | if (time_after_eq(jiffies, timeout)) { | ||
36 | dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); | ||
37 | spin_unlock(&afu->afu_cntl_lock); | ||
38 | return -EBUSY; | ||
39 | } | ||
40 | pr_devel_ratelimited("AFU control... (0x%.16llx)\n", | ||
41 | AFU_Cntl | command); | ||
42 | cpu_relax(); | ||
43 | AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); | ||
44 | }; | ||
45 | pr_devel("AFU command complete: %llx\n", command); | ||
46 | afu->enabled = enabled; | ||
47 | spin_unlock(&afu->afu_cntl_lock); | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static int afu_enable(struct cxl_afu *afu) | ||
53 | { | ||
54 | pr_devel("AFU enable request\n"); | ||
55 | |||
56 | return afu_control(afu, CXL_AFU_Cntl_An_E, | ||
57 | CXL_AFU_Cntl_An_ES_Enabled, | ||
58 | CXL_AFU_Cntl_An_ES_MASK, true); | ||
59 | } | ||
60 | |||
61 | int cxl_afu_disable(struct cxl_afu *afu) | ||
62 | { | ||
63 | pr_devel("AFU disable request\n"); | ||
64 | |||
65 | return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, | ||
66 | CXL_AFU_Cntl_An_ES_MASK, false); | ||
67 | } | ||
68 | |||
69 | /* This will disable as well as reset */ | ||
70 | int cxl_afu_reset(struct cxl_afu *afu) | ||
71 | { | ||
72 | pr_devel("AFU reset request\n"); | ||
73 | |||
74 | return afu_control(afu, CXL_AFU_Cntl_An_RA, | ||
75 | CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, | ||
76 | CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, | ||
77 | false); | ||
78 | } | ||
79 | |||
80 | static int afu_check_and_enable(struct cxl_afu *afu) | ||
81 | { | ||
82 | if (afu->enabled) | ||
83 | return 0; | ||
84 | return afu_enable(afu); | ||
85 | } | ||
86 | |||
87 | int cxl_psl_purge(struct cxl_afu *afu) | ||
88 | { | ||
89 | u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); | ||
90 | u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); | ||
91 | u64 dsisr, dar; | ||
92 | u64 start, end; | ||
93 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | ||
94 | |||
95 | pr_devel("PSL purge request\n"); | ||
96 | |||
97 | if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { | ||
98 | WARN(1, "psl_purge request while AFU not disabled!\n"); | ||
99 | cxl_afu_disable(afu); | ||
100 | } | ||
101 | |||
102 | cxl_p1n_write(afu, CXL_PSL_SCNTL_An, | ||
103 | PSL_CNTL | CXL_PSL_SCNTL_An_Pc); | ||
104 | start = local_clock(); | ||
105 | PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); | ||
106 | while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK) | ||
107 | == CXL_PSL_SCNTL_An_Ps_Pending) { | ||
108 | if (time_after_eq(jiffies, timeout)) { | ||
109 | dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); | ||
110 | return -EBUSY; | ||
111 | } | ||
112 | dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); | ||
113 | pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr); | ||
114 | if (dsisr & CXL_PSL_DSISR_TRANS) { | ||
115 | dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); | ||
116 | dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar); | ||
117 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); | ||
118 | } else if (dsisr) { | ||
119 | dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr); | ||
120 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); | ||
121 | } else { | ||
122 | cpu_relax(); | ||
123 | } | ||
124 | PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); | ||
125 | }; | ||
126 | end = local_clock(); | ||
127 | pr_devel("PSL purged in %lld ns\n", end - start); | ||
128 | |||
129 | cxl_p1n_write(afu, CXL_PSL_SCNTL_An, | ||
130 | PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static int spa_max_procs(int spa_size) | ||
135 | { | ||
136 | /* | ||
137 | * From the CAIA: | ||
138 | * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255 | ||
139 | * Most of that junk is really just an overly-complicated way of saying | ||
140 | * the last 256 bytes are __aligned(128), so it's really: | ||
141 | * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255 | ||
142 | * and | ||
143 | * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 | ||
144 | * so | ||
145 | * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256 | ||
146 | * Ignore the alignment (which is safe in this case as long as we are | ||
147 | * careful with our rounding) and solve for n: | ||
148 | */ | ||
149 | return ((spa_size / 8) - 96) / 17; | ||
150 | } | ||
151 | |||
152 | static int alloc_spa(struct cxl_afu *afu) | ||
153 | { | ||
154 | u64 spap; | ||
155 | |||
156 | /* Work out how many pages to allocate */ | ||
157 | afu->spa_order = 0; | ||
158 | do { | ||
159 | afu->spa_order++; | ||
160 | afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE; | ||
161 | afu->spa_max_procs = spa_max_procs(afu->spa_size); | ||
162 | } while (afu->spa_max_procs < afu->num_procs); | ||
163 | |||
164 | WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */ | ||
165 | |||
166 | if (!(afu->spa = (struct cxl_process_element *) | ||
167 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) { | ||
168 | pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); | ||
169 | return -ENOMEM; | ||
170 | } | ||
171 | pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", | ||
172 | 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs); | ||
173 | |||
174 | afu->sw_command_status = (__be64 *)((char *)afu->spa + | ||
175 | ((afu->spa_max_procs + 3) * 128)); | ||
176 | |||
177 | spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr; | ||
178 | spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; | ||
179 | spap |= CXL_PSL_SPAP_V; | ||
180 | pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap); | ||
181 | cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static void release_spa(struct cxl_afu *afu) | ||
187 | { | ||
188 | free_pages((unsigned long) afu->spa, afu->spa_order); | ||
189 | } | ||
190 | |||
191 | int cxl_tlb_slb_invalidate(struct cxl *adapter) | ||
192 | { | ||
193 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | ||
194 | |||
195 | pr_devel("CXL adapter wide TLBIA & SLBIA\n"); | ||
196 | |||
197 | cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A); | ||
198 | |||
199 | cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL); | ||
200 | while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) { | ||
201 | if (time_after_eq(jiffies, timeout)) { | ||
202 | dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); | ||
203 | return -EBUSY; | ||
204 | } | ||
205 | cpu_relax(); | ||
206 | } | ||
207 | |||
208 | cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL); | ||
209 | while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) { | ||
210 | if (time_after_eq(jiffies, timeout)) { | ||
211 | dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); | ||
212 | return -EBUSY; | ||
213 | } | ||
214 | cpu_relax(); | ||
215 | } | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | int cxl_afu_slbia(struct cxl_afu *afu) | ||
220 | { | ||
221 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); | ||
222 | |||
223 | pr_devel("cxl_afu_slbia issuing SLBIA command\n"); | ||
224 | cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL); | ||
225 | while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) { | ||
226 | if (time_after_eq(jiffies, timeout)) { | ||
227 | dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n"); | ||
228 | return -EBUSY; | ||
229 | } | ||
230 | cpu_relax(); | ||
231 | } | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) | ||
236 | { | ||
237 | int rc; | ||
238 | |||
239 | /* 1. Disable SSTP by writing 0 to SSTP1[V] */ | ||
240 | cxl_p2n_write(afu, CXL_SSTP1_An, 0); | ||
241 | |||
242 | /* 2. Invalidate all SLB entries */ | ||
243 | if ((rc = cxl_afu_slbia(afu))) | ||
244 | return rc; | ||
245 | |||
246 | /* 3. Set SSTP0_An */ | ||
247 | cxl_p2n_write(afu, CXL_SSTP0_An, sstp0); | ||
248 | |||
249 | /* 4. Set SSTP1_An */ | ||
250 | cxl_p2n_write(afu, CXL_SSTP1_An, sstp1); | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | /* Using per slice version may improve performance here. (ie. SLBIA_An) */ | ||
256 | static void slb_invalid(struct cxl_context *ctx) | ||
257 | { | ||
258 | struct cxl *adapter = ctx->afu->adapter; | ||
259 | u64 slbia; | ||
260 | |||
261 | WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex)); | ||
262 | |||
263 | cxl_p1_write(adapter, CXL_PSL_LBISEL, | ||
264 | ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | | ||
265 | be32_to_cpu(ctx->elem->lpid)); | ||
266 | cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); | ||
267 | |||
268 | while (1) { | ||
269 | slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); | ||
270 | if (!(slbia & CXL_TLB_SLB_P)) | ||
271 | break; | ||
272 | cpu_relax(); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static int do_process_element_cmd(struct cxl_context *ctx, | ||
277 | u64 cmd, u64 pe_state) | ||
278 | { | ||
279 | u64 state; | ||
280 | |||
281 | WARN_ON(!ctx->afu->enabled); | ||
282 | |||
283 | ctx->elem->software_state = cpu_to_be32(pe_state); | ||
284 | smp_wmb(); | ||
285 | *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); | ||
286 | smp_mb(); | ||
287 | cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); | ||
288 | while (1) { | ||
289 | state = be64_to_cpup(ctx->afu->sw_command_status); | ||
290 | if (state == ~0ULL) { | ||
291 | pr_err("cxl: Error adding process element to AFU\n"); | ||
292 | return -1; | ||
293 | } | ||
294 | if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == | ||
295 | (cmd | (cmd >> 16) | ctx->pe)) | ||
296 | break; | ||
297 | /* | ||
298 | * The command won't finish in the PSL if there are | ||
299 | * outstanding DSIs. Hence we need to yield here in | ||
300 | * case there are outstanding DSIs that we need to | ||
301 | * service. Tuning possiblity: we could wait for a | ||
302 | * while before sched | ||
303 | */ | ||
304 | schedule(); | ||
305 | |||
306 | } | ||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | static int add_process_element(struct cxl_context *ctx) | ||
311 | { | ||
312 | int rc = 0; | ||
313 | |||
314 | mutex_lock(&ctx->afu->spa_mutex); | ||
315 | pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); | ||
316 | if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) | ||
317 | ctx->pe_inserted = true; | ||
318 | pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); | ||
319 | mutex_unlock(&ctx->afu->spa_mutex); | ||
320 | return rc; | ||
321 | } | ||
322 | |||
323 | static int terminate_process_element(struct cxl_context *ctx) | ||
324 | { | ||
325 | int rc = 0; | ||
326 | |||
327 | /* fast path terminate if it's already invalid */ | ||
328 | if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) | ||
329 | return rc; | ||
330 | |||
331 | mutex_lock(&ctx->afu->spa_mutex); | ||
332 | pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); | ||
333 | rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, | ||
334 | CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); | ||
335 | ctx->elem->software_state = 0; /* Remove Valid bit */ | ||
336 | pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); | ||
337 | mutex_unlock(&ctx->afu->spa_mutex); | ||
338 | return rc; | ||
339 | } | ||
340 | |||
341 | static int remove_process_element(struct cxl_context *ctx) | ||
342 | { | ||
343 | int rc = 0; | ||
344 | |||
345 | mutex_lock(&ctx->afu->spa_mutex); | ||
346 | pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); | ||
347 | if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0))) | ||
348 | ctx->pe_inserted = false; | ||
349 | slb_invalid(ctx); | ||
350 | pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); | ||
351 | mutex_unlock(&ctx->afu->spa_mutex); | ||
352 | |||
353 | return rc; | ||
354 | } | ||
355 | |||
356 | |||
357 | static void assign_psn_space(struct cxl_context *ctx) | ||
358 | { | ||
359 | if (!ctx->afu->pp_size || ctx->master) { | ||
360 | ctx->psn_phys = ctx->afu->psn_phys; | ||
361 | ctx->psn_size = ctx->afu->adapter->ps_size; | ||
362 | } else { | ||
363 | ctx->psn_phys = ctx->afu->psn_phys + | ||
364 | (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe); | ||
365 | ctx->psn_size = ctx->afu->pp_size; | ||
366 | } | ||
367 | } | ||
368 | |||
369 | static int activate_afu_directed(struct cxl_afu *afu) | ||
370 | { | ||
371 | int rc; | ||
372 | |||
373 | dev_info(&afu->dev, "Activating AFU directed mode\n"); | ||
374 | |||
375 | if (alloc_spa(afu)) | ||
376 | return -ENOMEM; | ||
377 | |||
378 | cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); | ||
379 | cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); | ||
380 | cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); | ||
381 | |||
382 | afu->current_mode = CXL_MODE_DIRECTED; | ||
383 | afu->num_procs = afu->max_procs_virtualised; | ||
384 | |||
385 | if ((rc = cxl_chardev_m_afu_add(afu))) | ||
386 | return rc; | ||
387 | |||
388 | if ((rc = cxl_sysfs_afu_m_add(afu))) | ||
389 | goto err; | ||
390 | |||
391 | if ((rc = cxl_chardev_s_afu_add(afu))) | ||
392 | goto err1; | ||
393 | |||
394 | return 0; | ||
395 | err1: | ||
396 | cxl_sysfs_afu_m_remove(afu); | ||
397 | err: | ||
398 | cxl_chardev_afu_remove(afu); | ||
399 | return rc; | ||
400 | } | ||
401 | |||
402 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
403 | #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE) | ||
404 | #else | ||
405 | #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) | ||
406 | #endif | ||
407 | |||
408 | static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) | ||
409 | { | ||
410 | u64 sr; | ||
411 | int r, result; | ||
412 | |||
413 | assign_psn_space(ctx); | ||
414 | |||
415 | ctx->elem->ctxtime = 0; /* disable */ | ||
416 | ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); | ||
417 | ctx->elem->haurp = 0; /* disable */ | ||
418 | ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); | ||
419 | |||
420 | sr = CXL_PSL_SR_An_SC; | ||
421 | if (ctx->master) | ||
422 | sr |= CXL_PSL_SR_An_MP; | ||
423 | if (mfspr(SPRN_LPCR) & LPCR_TC) | ||
424 | sr |= CXL_PSL_SR_An_TC; | ||
425 | /* HV=0, PR=1, R=1 for userspace | ||
426 | * For kernel contexts: this would need to change | ||
427 | */ | ||
428 | sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; | ||
429 | set_endian(sr); | ||
430 | sr &= ~(CXL_PSL_SR_An_HV); | ||
431 | if (!test_tsk_thread_flag(current, TIF_32BIT)) | ||
432 | sr |= CXL_PSL_SR_An_SF; | ||
433 | ctx->elem->common.pid = cpu_to_be32(current->pid); | ||
434 | ctx->elem->common.tid = 0; | ||
435 | ctx->elem->sr = cpu_to_be64(sr); | ||
436 | |||
437 | ctx->elem->common.csrp = 0; /* disable */ | ||
438 | ctx->elem->common.aurp0 = 0; /* disable */ | ||
439 | ctx->elem->common.aurp1 = 0; /* disable */ | ||
440 | |||
441 | cxl_prefault(ctx, wed); | ||
442 | |||
443 | ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0); | ||
444 | ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1); | ||
445 | |||
446 | for (r = 0; r < CXL_IRQ_RANGES; r++) { | ||
447 | ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); | ||
448 | ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); | ||
449 | } | ||
450 | |||
451 | ctx->elem->common.amr = cpu_to_be64(amr); | ||
452 | ctx->elem->common.wed = cpu_to_be64(wed); | ||
453 | |||
454 | /* first guy needs to enable */ | ||
455 | if ((result = afu_check_and_enable(ctx->afu))) | ||
456 | return result; | ||
457 | |||
458 | add_process_element(ctx); | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int deactivate_afu_directed(struct cxl_afu *afu) | ||
464 | { | ||
465 | dev_info(&afu->dev, "Deactivating AFU directed mode\n"); | ||
466 | |||
467 | afu->current_mode = 0; | ||
468 | afu->num_procs = 0; | ||
469 | |||
470 | cxl_sysfs_afu_m_remove(afu); | ||
471 | cxl_chardev_afu_remove(afu); | ||
472 | |||
473 | cxl_afu_reset(afu); | ||
474 | cxl_afu_disable(afu); | ||
475 | cxl_psl_purge(afu); | ||
476 | |||
477 | release_spa(afu); | ||
478 | |||
479 | return 0; | ||
480 | } | ||
481 | |||
482 | static int activate_dedicated_process(struct cxl_afu *afu) | ||
483 | { | ||
484 | dev_info(&afu->dev, "Activating dedicated process mode\n"); | ||
485 | |||
486 | cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); | ||
487 | |||
488 | cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */ | ||
489 | cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */ | ||
490 | cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); | ||
491 | cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID)); | ||
492 | cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */ | ||
493 | cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1)); | ||
494 | |||
495 | cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */ | ||
496 | cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */ | ||
497 | cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */ | ||
498 | |||
499 | afu->current_mode = CXL_MODE_DEDICATED; | ||
500 | afu->num_procs = 1; | ||
501 | |||
502 | return cxl_chardev_d_afu_add(afu); | ||
503 | } | ||
504 | |||
505 | static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) | ||
506 | { | ||
507 | struct cxl_afu *afu = ctx->afu; | ||
508 | u64 sr; | ||
509 | int rc; | ||
510 | |||
511 | sr = CXL_PSL_SR_An_SC; | ||
512 | set_endian(sr); | ||
513 | if (ctx->master) | ||
514 | sr |= CXL_PSL_SR_An_MP; | ||
515 | if (mfspr(SPRN_LPCR) & LPCR_TC) | ||
516 | sr |= CXL_PSL_SR_An_TC; | ||
517 | sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; | ||
518 | if (!test_tsk_thread_flag(current, TIF_32BIT)) | ||
519 | sr |= CXL_PSL_SR_An_SF; | ||
520 | cxl_p2n_write(afu, CXL_PSL_PID_TID_An, (u64)current->pid << 32); | ||
521 | cxl_p1n_write(afu, CXL_PSL_SR_An, sr); | ||
522 | |||
523 | if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) | ||
524 | return rc; | ||
525 | |||
526 | cxl_prefault(ctx, wed); | ||
527 | |||
528 | cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, | ||
529 | (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | | ||
530 | (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | | ||
531 | (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | | ||
532 | ((u64)ctx->irqs.offset[3] & 0xffff)); | ||
533 | cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) | ||
534 | (((u64)ctx->irqs.range[0] & 0xffff) << 48) | | ||
535 | (((u64)ctx->irqs.range[1] & 0xffff) << 32) | | ||
536 | (((u64)ctx->irqs.range[2] & 0xffff) << 16) | | ||
537 | ((u64)ctx->irqs.range[3] & 0xffff)); | ||
538 | |||
539 | cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); | ||
540 | |||
541 | /* master only context for dedicated */ | ||
542 | assign_psn_space(ctx); | ||
543 | |||
544 | if ((rc = cxl_afu_reset(afu))) | ||
545 | return rc; | ||
546 | |||
547 | cxl_p2n_write(afu, CXL_PSL_WED_An, wed); | ||
548 | |||
549 | return afu_enable(afu); | ||
550 | } | ||
551 | |||
552 | static int deactivate_dedicated_process(struct cxl_afu *afu) | ||
553 | { | ||
554 | dev_info(&afu->dev, "Deactivating dedicated process mode\n"); | ||
555 | |||
556 | afu->current_mode = 0; | ||
557 | afu->num_procs = 0; | ||
558 | |||
559 | cxl_chardev_afu_remove(afu); | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode) | ||
565 | { | ||
566 | if (mode == CXL_MODE_DIRECTED) | ||
567 | return deactivate_afu_directed(afu); | ||
568 | if (mode == CXL_MODE_DEDICATED) | ||
569 | return deactivate_dedicated_process(afu); | ||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | int cxl_afu_deactivate_mode(struct cxl_afu *afu) | ||
574 | { | ||
575 | return _cxl_afu_deactivate_mode(afu, afu->current_mode); | ||
576 | } | ||
577 | |||
578 | int cxl_afu_activate_mode(struct cxl_afu *afu, int mode) | ||
579 | { | ||
580 | if (!mode) | ||
581 | return 0; | ||
582 | if (!(mode & afu->modes_supported)) | ||
583 | return -EINVAL; | ||
584 | |||
585 | if (mode == CXL_MODE_DIRECTED) | ||
586 | return activate_afu_directed(afu); | ||
587 | if (mode == CXL_MODE_DEDICATED) | ||
588 | return activate_dedicated_process(afu); | ||
589 | |||
590 | return -EINVAL; | ||
591 | } | ||
592 | |||
593 | int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) | ||
594 | { | ||
595 | ctx->kernel = kernel; | ||
596 | if (ctx->afu->current_mode == CXL_MODE_DIRECTED) | ||
597 | return attach_afu_directed(ctx, wed, amr); | ||
598 | |||
599 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) | ||
600 | return attach_dedicated(ctx, wed, amr); | ||
601 | |||
602 | return -EINVAL; | ||
603 | } | ||
604 | |||
605 | static inline int detach_process_native_dedicated(struct cxl_context *ctx) | ||
606 | { | ||
607 | cxl_afu_reset(ctx->afu); | ||
608 | cxl_afu_disable(ctx->afu); | ||
609 | cxl_psl_purge(ctx->afu); | ||
610 | return 0; | ||
611 | } | ||
612 | |||
613 | /* | ||
614 | * TODO: handle case when this is called inside a rcu_read_lock() which may | ||
615 | * happen when we unbind the driver (ie. cxl_context_detach_all()) . Terminate | ||
616 | * & remove use a mutex lock and schedule which will not good with lock held. | ||
617 | * May need to write do_process_element_cmd() that handles outstanding page | ||
618 | * faults synchronously. | ||
619 | */ | ||
620 | static inline int detach_process_native_afu_directed(struct cxl_context *ctx) | ||
621 | { | ||
622 | if (!ctx->pe_inserted) | ||
623 | return 0; | ||
624 | if (terminate_process_element(ctx)) | ||
625 | return -1; | ||
626 | if (remove_process_element(ctx)) | ||
627 | return -1; | ||
628 | |||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | int cxl_detach_process(struct cxl_context *ctx) | ||
633 | { | ||
634 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) | ||
635 | return detach_process_native_dedicated(ctx); | ||
636 | |||
637 | return detach_process_native_afu_directed(ctx); | ||
638 | } | ||
639 | |||
640 | int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info) | ||
641 | { | ||
642 | u64 pidtid; | ||
643 | |||
644 | info->dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); | ||
645 | info->dar = cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An); | ||
646 | info->dsr = cxl_p2n_read(ctx->afu, CXL_PSL_DSR_An); | ||
647 | pidtid = cxl_p2n_read(ctx->afu, CXL_PSL_PID_TID_An); | ||
648 | info->pid = pidtid >> 32; | ||
649 | info->tid = pidtid & 0xffffffff; | ||
650 | info->afu_err = cxl_p2n_read(ctx->afu, CXL_AFU_ERR_An); | ||
651 | info->errstat = cxl_p2n_read(ctx->afu, CXL_PSL_ErrStat_An); | ||
652 | |||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | static void recover_psl_err(struct cxl_afu *afu, u64 errstat) | ||
657 | { | ||
658 | u64 dsisr; | ||
659 | |||
660 | pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat); | ||
661 | |||
662 | /* Clear PSL_DSISR[PE] */ | ||
663 | dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); | ||
664 | cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE); | ||
665 | |||
666 | /* Write 1s to clear error status bits */ | ||
667 | cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); | ||
668 | } | ||
669 | |||
670 | int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) | ||
671 | { | ||
672 | if (tfc) | ||
673 | cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); | ||
674 | if (psl_reset_mask) | ||
675 | recover_psl_err(ctx->afu, psl_reset_mask); | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | int cxl_check_error(struct cxl_afu *afu) | ||
681 | { | ||
682 | return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); | ||
683 | } | ||
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c new file mode 100644 index 000000000000..10c98ab7f46e --- /dev/null +++ b/drivers/misc/cxl/pci.c | |||
@@ -0,0 +1,1000 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/pci_regs.h> | ||
11 | #include <linux/pci_ids.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/sort.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <asm/opal.h> | ||
21 | #include <asm/msi_bitmap.h> | ||
22 | #include <asm/pci-bridge.h> /* for struct pci_controller */ | ||
23 | #include <asm/pnv-pci.h> | ||
24 | |||
25 | #include "cxl.h" | ||
26 | |||
27 | |||
28 | #define CXL_PCI_VSEC_ID 0x1280 | ||
29 | #define CXL_VSEC_MIN_SIZE 0x80 | ||
30 | |||
31 | #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \ | ||
32 | { \ | ||
33 | pci_read_config_word(dev, vsec + 0x6, dest); \ | ||
34 | *dest >>= 4; \ | ||
35 | } | ||
36 | #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \ | ||
37 | pci_read_config_byte(dev, vsec + 0x8, dest) | ||
38 | |||
39 | #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \ | ||
40 | pci_read_config_byte(dev, vsec + 0x9, dest) | ||
41 | #define CXL_STATUS_SECOND_PORT 0x80 | ||
42 | #define CXL_STATUS_MSI_X_FULL 0x40 | ||
43 | #define CXL_STATUS_MSI_X_SINGLE 0x20 | ||
44 | #define CXL_STATUS_FLASH_RW 0x08 | ||
45 | #define CXL_STATUS_FLASH_RO 0x04 | ||
46 | #define CXL_STATUS_LOADABLE_AFU 0x02 | ||
47 | #define CXL_STATUS_LOADABLE_PSL 0x01 | ||
48 | /* If we see these features we won't try to use the card */ | ||
49 | #define CXL_UNSUPPORTED_FEATURES \ | ||
50 | (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE) | ||
51 | |||
52 | #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \ | ||
53 | pci_read_config_byte(dev, vsec + 0xa, dest) | ||
54 | #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ | ||
55 | pci_write_config_byte(dev, vsec + 0xa, val) | ||
56 | #define CXL_VSEC_PROTOCOL_MASK 0xe0 | ||
57 | #define CXL_VSEC_PROTOCOL_1024TB 0x80 | ||
58 | #define CXL_VSEC_PROTOCOL_512TB 0x40 | ||
59 | #define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8 uses this */ | ||
60 | #define CXL_VSEC_PROTOCOL_ENABLE 0x01 | ||
61 | |||
62 | #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \ | ||
63 | pci_read_config_word(dev, vsec + 0xc, dest) | ||
64 | #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \ | ||
65 | pci_read_config_byte(dev, vsec + 0xe, dest) | ||
66 | #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \ | ||
67 | pci_read_config_byte(dev, vsec + 0xf, dest) | ||
68 | #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \ | ||
69 | pci_read_config_word(dev, vsec + 0x10, dest) | ||
70 | |||
71 | #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \ | ||
72 | pci_read_config_byte(dev, vsec + 0x13, dest) | ||
73 | #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \ | ||
74 | pci_write_config_byte(dev, vsec + 0x13, val) | ||
75 | #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */ | ||
76 | #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */ | ||
77 | #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */ | ||
78 | |||
79 | #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \ | ||
80 | pci_read_config_dword(dev, vsec + 0x20, dest) | ||
81 | #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \ | ||
82 | pci_read_config_dword(dev, vsec + 0x24, dest) | ||
83 | #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \ | ||
84 | pci_read_config_dword(dev, vsec + 0x28, dest) | ||
85 | #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \ | ||
86 | pci_read_config_dword(dev, vsec + 0x2c, dest) | ||
87 | |||
88 | |||
89 | /* This works a little different than the p1/p2 register accesses to make it | ||
90 | * easier to pull out individual fields */ | ||
91 | #define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off) | ||
92 | #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit))) | ||
93 | #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be)) | ||
94 | |||
95 | #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0) | ||
96 | #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15) | ||
97 | #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31) | ||
98 | #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47) | ||
99 | #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48) | ||
100 | #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55) | ||
101 | #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59) | ||
102 | #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61) | ||
103 | #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63) | ||
104 | #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20) | ||
105 | #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) | ||
106 | #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28) | ||
107 | #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30) | ||
108 | #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6) | ||
109 | #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7) | ||
110 | #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) | ||
111 | #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38) | ||
112 | #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40) | ||
113 | #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63) | ||
114 | #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48) | ||
115 | |||
116 | static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = { | ||
117 | { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), }, | ||
118 | { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), }, | ||
119 | { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), }, | ||
120 | { PCI_DEVICE_CLASS(0x120000, ~0), }, | ||
121 | |||
122 | { } | ||
123 | }; | ||
124 | MODULE_DEVICE_TABLE(pci, cxl_pci_tbl); | ||
125 | |||
126 | |||
127 | /* | ||
128 | * Mostly using these wrappers to avoid confusion: | ||
129 | * priv 1 is BAR2, while priv 2 is BAR0 | ||
130 | */ | ||
131 | static inline resource_size_t p1_base(struct pci_dev *dev) | ||
132 | { | ||
133 | return pci_resource_start(dev, 2); | ||
134 | } | ||
135 | |||
136 | static inline resource_size_t p1_size(struct pci_dev *dev) | ||
137 | { | ||
138 | return pci_resource_len(dev, 2); | ||
139 | } | ||
140 | |||
141 | static inline resource_size_t p2_base(struct pci_dev *dev) | ||
142 | { | ||
143 | return pci_resource_start(dev, 0); | ||
144 | } | ||
145 | |||
146 | static inline resource_size_t p2_size(struct pci_dev *dev) | ||
147 | { | ||
148 | return pci_resource_len(dev, 0); | ||
149 | } | ||
150 | |||
151 | static int find_cxl_vsec(struct pci_dev *dev) | ||
152 | { | ||
153 | int vsec = 0; | ||
154 | u16 val; | ||
155 | |||
156 | while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) { | ||
157 | pci_read_config_word(dev, vsec + 0x4, &val); | ||
158 | if (val == CXL_PCI_VSEC_ID) | ||
159 | return vsec; | ||
160 | } | ||
161 | return 0; | ||
162 | |||
163 | } | ||
164 | |||
165 | static void dump_cxl_config_space(struct pci_dev *dev) | ||
166 | { | ||
167 | int vsec; | ||
168 | u32 val; | ||
169 | |||
170 | dev_info(&dev->dev, "dump_cxl_config_space\n"); | ||
171 | |||
172 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val); | ||
173 | dev_info(&dev->dev, "BAR0: %#.8x\n", val); | ||
174 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val); | ||
175 | dev_info(&dev->dev, "BAR1: %#.8x\n", val); | ||
176 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val); | ||
177 | dev_info(&dev->dev, "BAR2: %#.8x\n", val); | ||
178 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val); | ||
179 | dev_info(&dev->dev, "BAR3: %#.8x\n", val); | ||
180 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val); | ||
181 | dev_info(&dev->dev, "BAR4: %#.8x\n", val); | ||
182 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val); | ||
183 | dev_info(&dev->dev, "BAR5: %#.8x\n", val); | ||
184 | |||
185 | dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n", | ||
186 | p1_base(dev), p1_size(dev)); | ||
187 | dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n", | ||
188 | p1_base(dev), p2_size(dev)); | ||
189 | dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n", | ||
190 | pci_resource_start(dev, 4), pci_resource_len(dev, 4)); | ||
191 | |||
192 | if (!(vsec = find_cxl_vsec(dev))) | ||
193 | return; | ||
194 | |||
195 | #define show_reg(name, what) \ | ||
196 | dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what) | ||
197 | |||
198 | pci_read_config_dword(dev, vsec + 0x0, &val); | ||
199 | show_reg("Cap ID", (val >> 0) & 0xffff); | ||
200 | show_reg("Cap Ver", (val >> 16) & 0xf); | ||
201 | show_reg("Next Cap Ptr", (val >> 20) & 0xfff); | ||
202 | pci_read_config_dword(dev, vsec + 0x4, &val); | ||
203 | show_reg("VSEC ID", (val >> 0) & 0xffff); | ||
204 | show_reg("VSEC Rev", (val >> 16) & 0xf); | ||
205 | show_reg("VSEC Length", (val >> 20) & 0xfff); | ||
206 | pci_read_config_dword(dev, vsec + 0x8, &val); | ||
207 | show_reg("Num AFUs", (val >> 0) & 0xff); | ||
208 | show_reg("Status", (val >> 8) & 0xff); | ||
209 | show_reg("Mode Control", (val >> 16) & 0xff); | ||
210 | show_reg("Reserved", (val >> 24) & 0xff); | ||
211 | pci_read_config_dword(dev, vsec + 0xc, &val); | ||
212 | show_reg("PSL Rev", (val >> 0) & 0xffff); | ||
213 | show_reg("CAIA Ver", (val >> 16) & 0xffff); | ||
214 | pci_read_config_dword(dev, vsec + 0x10, &val); | ||
215 | show_reg("Base Image Rev", (val >> 0) & 0xffff); | ||
216 | show_reg("Reserved", (val >> 16) & 0x0fff); | ||
217 | show_reg("Image Control", (val >> 28) & 0x3); | ||
218 | show_reg("Reserved", (val >> 30) & 0x1); | ||
219 | show_reg("Image Loaded", (val >> 31) & 0x1); | ||
220 | |||
221 | pci_read_config_dword(dev, vsec + 0x14, &val); | ||
222 | show_reg("Reserved", val); | ||
223 | pci_read_config_dword(dev, vsec + 0x18, &val); | ||
224 | show_reg("Reserved", val); | ||
225 | pci_read_config_dword(dev, vsec + 0x1c, &val); | ||
226 | show_reg("Reserved", val); | ||
227 | |||
228 | pci_read_config_dword(dev, vsec + 0x20, &val); | ||
229 | show_reg("AFU Descriptor Offset", val); | ||
230 | pci_read_config_dword(dev, vsec + 0x24, &val); | ||
231 | show_reg("AFU Descriptor Size", val); | ||
232 | pci_read_config_dword(dev, vsec + 0x28, &val); | ||
233 | show_reg("Problem State Offset", val); | ||
234 | pci_read_config_dword(dev, vsec + 0x2c, &val); | ||
235 | show_reg("Problem State Size", val); | ||
236 | |||
237 | pci_read_config_dword(dev, vsec + 0x30, &val); | ||
238 | show_reg("Reserved", val); | ||
239 | pci_read_config_dword(dev, vsec + 0x34, &val); | ||
240 | show_reg("Reserved", val); | ||
241 | pci_read_config_dword(dev, vsec + 0x38, &val); | ||
242 | show_reg("Reserved", val); | ||
243 | pci_read_config_dword(dev, vsec + 0x3c, &val); | ||
244 | show_reg("Reserved", val); | ||
245 | |||
246 | pci_read_config_dword(dev, vsec + 0x40, &val); | ||
247 | show_reg("PSL Programming Port", val); | ||
248 | pci_read_config_dword(dev, vsec + 0x44, &val); | ||
249 | show_reg("PSL Programming Control", val); | ||
250 | |||
251 | pci_read_config_dword(dev, vsec + 0x48, &val); | ||
252 | show_reg("Reserved", val); | ||
253 | pci_read_config_dword(dev, vsec + 0x4c, &val); | ||
254 | show_reg("Reserved", val); | ||
255 | |||
256 | pci_read_config_dword(dev, vsec + 0x50, &val); | ||
257 | show_reg("Flash Address Register", val); | ||
258 | pci_read_config_dword(dev, vsec + 0x54, &val); | ||
259 | show_reg("Flash Size Register", val); | ||
260 | pci_read_config_dword(dev, vsec + 0x58, &val); | ||
261 | show_reg("Flash Status/Control Register", val); | ||
262 | pci_read_config_dword(dev, vsec + 0x58, &val); | ||
263 | show_reg("Flash Data Port", val); | ||
264 | |||
265 | #undef show_reg | ||
266 | } | ||
267 | |||
268 | static void dump_afu_descriptor(struct cxl_afu *afu) | ||
269 | { | ||
270 | u64 val; | ||
271 | |||
272 | #define show_reg(name, what) \ | ||
273 | dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what) | ||
274 | |||
275 | val = AFUD_READ_INFO(afu); | ||
276 | show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val)); | ||
277 | show_reg("num_of_processes", AFUD_NUM_PROCS(val)); | ||
278 | show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val)); | ||
279 | show_reg("req_prog_mode", val & 0xffffULL); | ||
280 | |||
281 | val = AFUD_READ(afu, 0x8); | ||
282 | show_reg("Reserved", val); | ||
283 | val = AFUD_READ(afu, 0x10); | ||
284 | show_reg("Reserved", val); | ||
285 | val = AFUD_READ(afu, 0x18); | ||
286 | show_reg("Reserved", val); | ||
287 | |||
288 | val = AFUD_READ_CR(afu); | ||
289 | show_reg("Reserved", (val >> (63-7)) & 0xff); | ||
290 | show_reg("AFU_CR_len", AFUD_CR_LEN(val)); | ||
291 | |||
292 | val = AFUD_READ_CR_OFF(afu); | ||
293 | show_reg("AFU_CR_offset", val); | ||
294 | |||
295 | val = AFUD_READ_PPPSA(afu); | ||
296 | show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff); | ||
297 | show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val)); | ||
298 | |||
299 | val = AFUD_READ_PPPSA_OFF(afu); | ||
300 | show_reg("PerProcessPSA_offset", val); | ||
301 | |||
302 | val = AFUD_READ_EB(afu); | ||
303 | show_reg("Reserved", (val >> (63-7)) & 0xff); | ||
304 | show_reg("AFU_EB_len", AFUD_EB_LEN(val)); | ||
305 | |||
306 | val = AFUD_READ_EB_OFF(afu); | ||
307 | show_reg("AFU_EB_offset", val); | ||
308 | |||
309 | #undef show_reg | ||
310 | } | ||
311 | |||
312 | static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev) | ||
313 | { | ||
314 | struct device_node *np; | ||
315 | const __be32 *prop; | ||
316 | u64 psl_dsnctl; | ||
317 | u64 chipid; | ||
318 | |||
319 | if (!(np = pnv_pci_to_phb_node(dev))) | ||
320 | return -ENODEV; | ||
321 | |||
322 | while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL))) | ||
323 | np = of_get_next_parent(np); | ||
324 | if (!np) | ||
325 | return -ENODEV; | ||
326 | chipid = be32_to_cpup(prop); | ||
327 | of_node_put(np); | ||
328 | |||
329 | /* Tell PSL where to route data to */ | ||
330 | psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5)); | ||
331 | cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl); | ||
332 | cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL); | ||
333 | /* snoop write mask */ | ||
334 | cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL); | ||
335 | /* set fir_accum */ | ||
336 | cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL); | ||
337 | /* for debugging with trace arrays */ | ||
338 | cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL); | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | |||
343 | static int init_implementation_afu_regs(struct cxl_afu *afu) | ||
344 | { | ||
345 | /* read/write masks for this slice */ | ||
346 | cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); | ||
347 | /* APC read/write masks for this slice */ | ||
348 | cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL); | ||
349 | /* for debugging with trace arrays */ | ||
350 | cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL); | ||
351 | cxl_p1n_write(afu, CXL_PSL_RXCTL_A, 0xF000000000000000ULL); | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, | ||
357 | unsigned int virq) | ||
358 | { | ||
359 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | ||
360 | |||
361 | return pnv_cxl_ioda_msi_setup(dev, hwirq, virq); | ||
362 | } | ||
363 | |||
364 | int cxl_alloc_one_irq(struct cxl *adapter) | ||
365 | { | ||
366 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | ||
367 | |||
368 | return pnv_cxl_alloc_hwirqs(dev, 1); | ||
369 | } | ||
370 | |||
371 | void cxl_release_one_irq(struct cxl *adapter, int hwirq) | ||
372 | { | ||
373 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | ||
374 | |||
375 | return pnv_cxl_release_hwirqs(dev, hwirq, 1); | ||
376 | } | ||
377 | |||
378 | int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num) | ||
379 | { | ||
380 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | ||
381 | |||
382 | return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num); | ||
383 | } | ||
384 | |||
385 | void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter) | ||
386 | { | ||
387 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); | ||
388 | |||
389 | pnv_cxl_release_hwirq_ranges(irqs, dev); | ||
390 | } | ||
391 | |||
392 | static int setup_cxl_bars(struct pci_dev *dev) | ||
393 | { | ||
394 | /* Safety check in case we get backported to < 3.17 without M64 */ | ||
395 | if ((p1_base(dev) < 0x100000000ULL) || | ||
396 | (p2_base(dev) < 0x100000000ULL)) { | ||
397 | dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n"); | ||
398 | return -ENODEV; | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * BAR 4/5 has a special meaning for CXL and must be programmed with a | ||
403 | * special value corresponding to the CXL protocol address range. | ||
404 | * For POWER 8 that means bits 48:49 must be set to 10 | ||
405 | */ | ||
406 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000); | ||
407 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000); | ||
408 | |||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ | ||
413 | static int switch_card_to_cxl(struct pci_dev *dev) | ||
414 | { | ||
415 | int vsec; | ||
416 | u8 val; | ||
417 | int rc; | ||
418 | |||
419 | dev_info(&dev->dev, "switch card to CXL\n"); | ||
420 | |||
421 | if (!(vsec = find_cxl_vsec(dev))) { | ||
422 | dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); | ||
423 | return -ENODEV; | ||
424 | } | ||
425 | |||
426 | if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { | ||
427 | dev_err(&dev->dev, "failed to read current mode control: %i", rc); | ||
428 | return rc; | ||
429 | } | ||
430 | val &= ~CXL_VSEC_PROTOCOL_MASK; | ||
431 | val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; | ||
432 | if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { | ||
433 | dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); | ||
434 | return rc; | ||
435 | } | ||
436 | /* | ||
437 | * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states | ||
438 | * we must wait 100ms after this mode switch before touching | ||
439 | * PCIe config space. | ||
440 | */ | ||
441 | msleep(100); | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev) | ||
447 | { | ||
448 | u64 p1n_base, p2n_base, afu_desc; | ||
449 | const u64 p1n_size = 0x100; | ||
450 | const u64 p2n_size = 0x1000; | ||
451 | |||
452 | p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); | ||
453 | p2n_base = p2_base(dev) + (afu->slice * p2n_size); | ||
454 | afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size)); | ||
455 | afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size); | ||
456 | |||
457 | if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size))) | ||
458 | goto err; | ||
459 | if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) | ||
460 | goto err1; | ||
461 | if (afu_desc) { | ||
462 | if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size))) | ||
463 | goto err2; | ||
464 | } | ||
465 | |||
466 | return 0; | ||
467 | err2: | ||
468 | iounmap(afu->p2n_mmio); | ||
469 | err1: | ||
470 | iounmap(afu->p1n_mmio); | ||
471 | err: | ||
472 | dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); | ||
473 | return -ENOMEM; | ||
474 | } | ||
475 | |||
476 | static void cxl_unmap_slice_regs(struct cxl_afu *afu) | ||
477 | { | ||
478 | if (afu->p1n_mmio) | ||
479 | iounmap(afu->p2n_mmio); | ||
480 | if (afu->p1n_mmio) | ||
481 | iounmap(afu->p1n_mmio); | ||
482 | } | ||
483 | |||
484 | static void cxl_release_afu(struct device *dev) | ||
485 | { | ||
486 | struct cxl_afu *afu = to_cxl_afu(dev); | ||
487 | |||
488 | pr_devel("cxl_release_afu\n"); | ||
489 | |||
490 | kfree(afu); | ||
491 | } | ||
492 | |||
493 | static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) | ||
494 | { | ||
495 | struct cxl_afu *afu; | ||
496 | |||
497 | if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL))) | ||
498 | return NULL; | ||
499 | |||
500 | afu->adapter = adapter; | ||
501 | afu->dev.parent = &adapter->dev; | ||
502 | afu->dev.release = cxl_release_afu; | ||
503 | afu->slice = slice; | ||
504 | idr_init(&afu->contexts_idr); | ||
505 | spin_lock_init(&afu->contexts_lock); | ||
506 | spin_lock_init(&afu->afu_cntl_lock); | ||
507 | mutex_init(&afu->spa_mutex); | ||
508 | |||
509 | afu->prefault_mode = CXL_PREFAULT_NONE; | ||
510 | afu->irqs_max = afu->adapter->user_irqs; | ||
511 | |||
512 | return afu; | ||
513 | } | ||
514 | |||
515 | /* Expects AFU struct to have recently been zeroed out */ | ||
516 | static int cxl_read_afu_descriptor(struct cxl_afu *afu) | ||
517 | { | ||
518 | u64 val; | ||
519 | |||
520 | val = AFUD_READ_INFO(afu); | ||
521 | afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val); | ||
522 | afu->max_procs_virtualised = AFUD_NUM_PROCS(val); | ||
523 | |||
524 | if (AFUD_AFU_DIRECTED(val)) | ||
525 | afu->modes_supported |= CXL_MODE_DIRECTED; | ||
526 | if (AFUD_DEDICATED_PROCESS(val)) | ||
527 | afu->modes_supported |= CXL_MODE_DEDICATED; | ||
528 | if (AFUD_TIME_SLICED(val)) | ||
529 | afu->modes_supported |= CXL_MODE_TIME_SLICED; | ||
530 | |||
531 | val = AFUD_READ_PPPSA(afu); | ||
532 | afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; | ||
533 | afu->psa = AFUD_PPPSA_PSA(val); | ||
534 | if ((afu->pp_psa = AFUD_PPPSA_PP(val))) | ||
535 | afu->pp_offset = AFUD_READ_PPPSA_OFF(afu); | ||
536 | |||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) | ||
541 | { | ||
542 | if (afu->psa && afu->adapter->ps_size < | ||
543 | (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { | ||
544 | dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); | ||
545 | return -ENODEV; | ||
546 | } | ||
547 | |||
548 | if (afu->pp_psa && (afu->pp_size < PAGE_SIZE)) | ||
549 | dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!"); | ||
550 | |||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | static int sanitise_afu_regs(struct cxl_afu *afu) | ||
555 | { | ||
556 | u64 reg; | ||
557 | |||
558 | /* | ||
559 | * Clear out any regs that contain either an IVTE or address or may be | ||
560 | * waiting on an acknowledgement to try to be a bit safer as we bring | ||
561 | * it online | ||
562 | */ | ||
563 | reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An); | ||
564 | if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { | ||
565 | dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg); | ||
566 | if (cxl_afu_reset(afu)) | ||
567 | return -EIO; | ||
568 | if (cxl_afu_disable(afu)) | ||
569 | return -EIO; | ||
570 | if (cxl_psl_purge(afu)) | ||
571 | return -EIO; | ||
572 | } | ||
573 | cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000); | ||
574 | cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000); | ||
575 | cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000); | ||
576 | cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000); | ||
577 | cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000); | ||
578 | cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000); | ||
579 | cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000); | ||
580 | cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000); | ||
581 | cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000); | ||
582 | cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000); | ||
583 | cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000); | ||
584 | reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An); | ||
585 | if (reg) { | ||
586 | dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg); | ||
587 | if (reg & CXL_PSL_DSISR_TRANS) | ||
588 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); | ||
589 | else | ||
590 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); | ||
591 | } | ||
592 | reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); | ||
593 | if (reg) { | ||
594 | if (reg & ~0xffff) | ||
595 | dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg); | ||
596 | cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); | ||
597 | } | ||
598 | reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); | ||
599 | if (reg) { | ||
600 | dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg); | ||
601 | cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg); | ||
602 | } | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev) | ||
608 | { | ||
609 | struct cxl_afu *afu; | ||
610 | bool free = true; | ||
611 | int rc; | ||
612 | |||
613 | if (!(afu = cxl_alloc_afu(adapter, slice))) | ||
614 | return -ENOMEM; | ||
615 | |||
616 | if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice))) | ||
617 | goto err1; | ||
618 | |||
619 | if ((rc = cxl_map_slice_regs(afu, adapter, dev))) | ||
620 | goto err1; | ||
621 | |||
622 | if ((rc = sanitise_afu_regs(afu))) | ||
623 | goto err2; | ||
624 | |||
625 | /* We need to reset the AFU before we can read the AFU descriptor */ | ||
626 | if ((rc = cxl_afu_reset(afu))) | ||
627 | goto err2; | ||
628 | |||
629 | if (cxl_verbose) | ||
630 | dump_afu_descriptor(afu); | ||
631 | |||
632 | if ((rc = cxl_read_afu_descriptor(afu))) | ||
633 | goto err2; | ||
634 | |||
635 | if ((rc = cxl_afu_descriptor_looks_ok(afu))) | ||
636 | goto err2; | ||
637 | |||
638 | if ((rc = init_implementation_afu_regs(afu))) | ||
639 | goto err2; | ||
640 | |||
641 | if ((rc = cxl_register_serr_irq(afu))) | ||
642 | goto err2; | ||
643 | |||
644 | if ((rc = cxl_register_psl_irq(afu))) | ||
645 | goto err3; | ||
646 | |||
647 | /* Don't care if this fails */ | ||
648 | cxl_debugfs_afu_add(afu); | ||
649 | |||
650 | /* | ||
651 | * After we call this function we must not free the afu directly, even | ||
652 | * if it returns an error! | ||
653 | */ | ||
654 | if ((rc = cxl_register_afu(afu))) | ||
655 | goto err_put1; | ||
656 | |||
657 | if ((rc = cxl_sysfs_afu_add(afu))) | ||
658 | goto err_put1; | ||
659 | |||
660 | |||
661 | if ((rc = cxl_afu_select_best_mode(afu))) | ||
662 | goto err_put2; | ||
663 | |||
664 | adapter->afu[afu->slice] = afu; | ||
665 | |||
666 | return 0; | ||
667 | |||
668 | err_put2: | ||
669 | cxl_sysfs_afu_remove(afu); | ||
670 | err_put1: | ||
671 | device_unregister(&afu->dev); | ||
672 | free = false; | ||
673 | cxl_debugfs_afu_remove(afu); | ||
674 | cxl_release_psl_irq(afu); | ||
675 | err3: | ||
676 | cxl_release_serr_irq(afu); | ||
677 | err2: | ||
678 | cxl_unmap_slice_regs(afu); | ||
679 | err1: | ||
680 | if (free) | ||
681 | kfree(afu); | ||
682 | return rc; | ||
683 | } | ||
684 | |||
685 | static void cxl_remove_afu(struct cxl_afu *afu) | ||
686 | { | ||
687 | pr_devel("cxl_remove_afu\n"); | ||
688 | |||
689 | if (!afu) | ||
690 | return; | ||
691 | |||
692 | cxl_sysfs_afu_remove(afu); | ||
693 | cxl_debugfs_afu_remove(afu); | ||
694 | |||
695 | spin_lock(&afu->adapter->afu_list_lock); | ||
696 | afu->adapter->afu[afu->slice] = NULL; | ||
697 | spin_unlock(&afu->adapter->afu_list_lock); | ||
698 | |||
699 | cxl_context_detach_all(afu); | ||
700 | cxl_afu_deactivate_mode(afu); | ||
701 | |||
702 | cxl_release_psl_irq(afu); | ||
703 | cxl_release_serr_irq(afu); | ||
704 | cxl_unmap_slice_regs(afu); | ||
705 | |||
706 | device_unregister(&afu->dev); | ||
707 | } | ||
708 | |||
709 | |||
710 | static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev) | ||
711 | { | ||
712 | if (pci_request_region(dev, 2, "priv 2 regs")) | ||
713 | goto err1; | ||
714 | if (pci_request_region(dev, 0, "priv 1 regs")) | ||
715 | goto err2; | ||
716 | |||
717 | pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx", | ||
718 | p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev)); | ||
719 | |||
720 | if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) | ||
721 | goto err3; | ||
722 | |||
723 | if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) | ||
724 | goto err4; | ||
725 | |||
726 | return 0; | ||
727 | |||
728 | err4: | ||
729 | iounmap(adapter->p1_mmio); | ||
730 | adapter->p1_mmio = NULL; | ||
731 | err3: | ||
732 | pci_release_region(dev, 0); | ||
733 | err2: | ||
734 | pci_release_region(dev, 2); | ||
735 | err1: | ||
736 | return -ENOMEM; | ||
737 | } | ||
738 | |||
739 | static void cxl_unmap_adapter_regs(struct cxl *adapter) | ||
740 | { | ||
741 | if (adapter->p1_mmio) | ||
742 | iounmap(adapter->p1_mmio); | ||
743 | if (adapter->p2_mmio) | ||
744 | iounmap(adapter->p2_mmio); | ||
745 | } | ||
746 | |||
747 | static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev) | ||
748 | { | ||
749 | int vsec; | ||
750 | u32 afu_desc_off, afu_desc_size; | ||
751 | u32 ps_off, ps_size; | ||
752 | u16 vseclen; | ||
753 | u8 image_state; | ||
754 | |||
755 | if (!(vsec = find_cxl_vsec(dev))) { | ||
756 | dev_err(&adapter->dev, "ABORTING: CXL VSEC not found!\n"); | ||
757 | return -ENODEV; | ||
758 | } | ||
759 | |||
760 | CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen); | ||
761 | if (vseclen < CXL_VSEC_MIN_SIZE) { | ||
762 | pr_err("ABORTING: CXL VSEC too short\n"); | ||
763 | return -EINVAL; | ||
764 | } | ||
765 | |||
766 | CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status); | ||
767 | CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev); | ||
768 | CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major); | ||
769 | CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor); | ||
770 | CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image); | ||
771 | CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state); | ||
772 | adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); | ||
773 | adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE); | ||
774 | adapter->perst_select_user = !!(image_state & CXL_VSEC_PERST_SELECT_USER); | ||
775 | |||
776 | CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices); | ||
777 | CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off); | ||
778 | CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size); | ||
779 | CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off); | ||
780 | CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size); | ||
781 | |||
782 | /* Convert everything to bytes, because there is NO WAY I'd look at the | ||
783 | * code a month later and forget what units these are in ;-) */ | ||
784 | adapter->ps_off = ps_off * 64 * 1024; | ||
785 | adapter->ps_size = ps_size * 64 * 1024; | ||
786 | adapter->afu_desc_off = afu_desc_off * 64 * 1024; | ||
787 | adapter->afu_desc_size = afu_desc_size *64 * 1024; | ||
788 | |||
789 | /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ | ||
790 | adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev) | ||
796 | { | ||
797 | if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) | ||
798 | return -EBUSY; | ||
799 | |||
800 | if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) { | ||
801 | dev_err(&adapter->dev, "ABORTING: CXL requires unsupported features\n"); | ||
802 | return -EINVAL; | ||
803 | } | ||
804 | |||
805 | if (!adapter->slices) { | ||
806 | /* Once we support dynamic reprogramming we can use the card if | ||
807 | * it supports loadable AFUs */ | ||
808 | dev_err(&adapter->dev, "ABORTING: Device has no AFUs\n"); | ||
809 | return -EINVAL; | ||
810 | } | ||
811 | |||
812 | if (!adapter->afu_desc_off || !adapter->afu_desc_size) { | ||
813 | dev_err(&adapter->dev, "ABORTING: VSEC shows no AFU descriptors\n"); | ||
814 | return -EINVAL; | ||
815 | } | ||
816 | |||
817 | if (adapter->ps_size > p2_size(dev) - adapter->ps_off) { | ||
818 | dev_err(&adapter->dev, "ABORTING: Problem state size larger than " | ||
819 | "available in BAR2: 0x%llx > 0x%llx\n", | ||
820 | adapter->ps_size, p2_size(dev) - adapter->ps_off); | ||
821 | return -EINVAL; | ||
822 | } | ||
823 | |||
824 | return 0; | ||
825 | } | ||
826 | |||
827 | static void cxl_release_adapter(struct device *dev) | ||
828 | { | ||
829 | struct cxl *adapter = to_cxl_adapter(dev); | ||
830 | |||
831 | pr_devel("cxl_release_adapter\n"); | ||
832 | |||
833 | kfree(adapter); | ||
834 | } | ||
835 | |||
836 | static struct cxl *cxl_alloc_adapter(struct pci_dev *dev) | ||
837 | { | ||
838 | struct cxl *adapter; | ||
839 | |||
840 | if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL))) | ||
841 | return NULL; | ||
842 | |||
843 | adapter->dev.parent = &dev->dev; | ||
844 | adapter->dev.release = cxl_release_adapter; | ||
845 | pci_set_drvdata(dev, adapter); | ||
846 | spin_lock_init(&adapter->afu_list_lock); | ||
847 | |||
848 | return adapter; | ||
849 | } | ||
850 | |||
851 | static int sanitise_adapter_regs(struct cxl *adapter) | ||
852 | { | ||
853 | cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); | ||
854 | return cxl_tlb_slb_invalidate(adapter); | ||
855 | } | ||
856 | |||
857 | static struct cxl *cxl_init_adapter(struct pci_dev *dev) | ||
858 | { | ||
859 | struct cxl *adapter; | ||
860 | bool free = true; | ||
861 | int rc; | ||
862 | |||
863 | |||
864 | if (!(adapter = cxl_alloc_adapter(dev))) | ||
865 | return ERR_PTR(-ENOMEM); | ||
866 | |||
867 | if ((rc = switch_card_to_cxl(dev))) | ||
868 | goto err1; | ||
869 | |||
870 | if ((rc = cxl_alloc_adapter_nr(adapter))) | ||
871 | goto err1; | ||
872 | |||
873 | if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))) | ||
874 | goto err2; | ||
875 | |||
876 | if ((rc = cxl_read_vsec(adapter, dev))) | ||
877 | goto err2; | ||
878 | |||
879 | if ((rc = cxl_vsec_looks_ok(adapter, dev))) | ||
880 | goto err2; | ||
881 | |||
882 | if ((rc = cxl_map_adapter_regs(adapter, dev))) | ||
883 | goto err2; | ||
884 | |||
885 | if ((rc = sanitise_adapter_regs(adapter))) | ||
886 | goto err2; | ||
887 | |||
888 | if ((rc = init_implementation_adapter_regs(adapter, dev))) | ||
889 | goto err3; | ||
890 | |||
891 | if ((rc = pnv_phb_to_cxl(dev))) | ||
892 | goto err3; | ||
893 | |||
894 | if ((rc = cxl_register_psl_err_irq(adapter))) | ||
895 | goto err3; | ||
896 | |||
897 | /* Don't care if this one fails: */ | ||
898 | cxl_debugfs_adapter_add(adapter); | ||
899 | |||
900 | /* | ||
901 | * After we call this function we must not free the adapter directly, | ||
902 | * even if it returns an error! | ||
903 | */ | ||
904 | if ((rc = cxl_register_adapter(adapter))) | ||
905 | goto err_put1; | ||
906 | |||
907 | if ((rc = cxl_sysfs_adapter_add(adapter))) | ||
908 | goto err_put1; | ||
909 | |||
910 | return adapter; | ||
911 | |||
912 | err_put1: | ||
913 | device_unregister(&adapter->dev); | ||
914 | free = false; | ||
915 | cxl_debugfs_adapter_remove(adapter); | ||
916 | cxl_release_psl_err_irq(adapter); | ||
917 | err3: | ||
918 | cxl_unmap_adapter_regs(adapter); | ||
919 | err2: | ||
920 | cxl_remove_adapter_nr(adapter); | ||
921 | err1: | ||
922 | if (free) | ||
923 | kfree(adapter); | ||
924 | return ERR_PTR(rc); | ||
925 | } | ||
926 | |||
927 | static void cxl_remove_adapter(struct cxl *adapter) | ||
928 | { | ||
929 | struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); | ||
930 | |||
931 | pr_devel("cxl_release_adapter\n"); | ||
932 | |||
933 | cxl_sysfs_adapter_remove(adapter); | ||
934 | cxl_debugfs_adapter_remove(adapter); | ||
935 | cxl_release_psl_err_irq(adapter); | ||
936 | cxl_unmap_adapter_regs(adapter); | ||
937 | cxl_remove_adapter_nr(adapter); | ||
938 | |||
939 | device_unregister(&adapter->dev); | ||
940 | |||
941 | pci_release_region(pdev, 0); | ||
942 | pci_release_region(pdev, 2); | ||
943 | pci_disable_device(pdev); | ||
944 | } | ||
945 | |||
946 | static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) | ||
947 | { | ||
948 | struct cxl *adapter; | ||
949 | int slice; | ||
950 | int rc; | ||
951 | |||
952 | pci_dev_get(dev); | ||
953 | |||
954 | if (cxl_verbose) | ||
955 | dump_cxl_config_space(dev); | ||
956 | |||
957 | if ((rc = setup_cxl_bars(dev))) | ||
958 | return rc; | ||
959 | |||
960 | if ((rc = pci_enable_device(dev))) { | ||
961 | dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc); | ||
962 | return rc; | ||
963 | } | ||
964 | |||
965 | adapter = cxl_init_adapter(dev); | ||
966 | if (IS_ERR(adapter)) { | ||
967 | dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); | ||
968 | return PTR_ERR(adapter); | ||
969 | } | ||
970 | |||
971 | for (slice = 0; slice < adapter->slices; slice++) { | ||
972 | if ((rc = cxl_init_afu(adapter, slice, dev))) | ||
973 | dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); | ||
974 | } | ||
975 | |||
976 | return 0; | ||
977 | } | ||
978 | |||
979 | static void cxl_remove(struct pci_dev *dev) | ||
980 | { | ||
981 | struct cxl *adapter = pci_get_drvdata(dev); | ||
982 | int afu; | ||
983 | |||
984 | dev_warn(&dev->dev, "pci remove\n"); | ||
985 | |||
986 | /* | ||
987 | * Lock to prevent someone grabbing a ref through the adapter list as | ||
988 | * we are removing it | ||
989 | */ | ||
990 | for (afu = 0; afu < adapter->slices; afu++) | ||
991 | cxl_remove_afu(adapter->afu[afu]); | ||
992 | cxl_remove_adapter(adapter); | ||
993 | } | ||
994 | |||
995 | struct pci_driver cxl_pci_driver = { | ||
996 | .name = "cxl-pci", | ||
997 | .id_table = cxl_pci_tbl, | ||
998 | .probe = cxl_probe, | ||
999 | .remove = cxl_remove, | ||
1000 | }; | ||
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c new file mode 100644 index 000000000000..ce7ec06d87d1 --- /dev/null +++ b/drivers/misc/cxl/sysfs.c | |||
@@ -0,0 +1,385 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/device.h> | ||
12 | #include <linux/sysfs.h> | ||
13 | |||
14 | #include "cxl.h" | ||
15 | |||
16 | #define to_afu_chardev_m(d) dev_get_drvdata(d) | ||
17 | |||
18 | /********* Adapter attributes **********************************************/ | ||
19 | |||
20 | static ssize_t caia_version_show(struct device *device, | ||
21 | struct device_attribute *attr, | ||
22 | char *buf) | ||
23 | { | ||
24 | struct cxl *adapter = to_cxl_adapter(device); | ||
25 | |||
26 | return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major, | ||
27 | adapter->caia_minor); | ||
28 | } | ||
29 | |||
30 | static ssize_t psl_revision_show(struct device *device, | ||
31 | struct device_attribute *attr, | ||
32 | char *buf) | ||
33 | { | ||
34 | struct cxl *adapter = to_cxl_adapter(device); | ||
35 | |||
36 | return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev); | ||
37 | } | ||
38 | |||
39 | static ssize_t base_image_show(struct device *device, | ||
40 | struct device_attribute *attr, | ||
41 | char *buf) | ||
42 | { | ||
43 | struct cxl *adapter = to_cxl_adapter(device); | ||
44 | |||
45 | return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image); | ||
46 | } | ||
47 | |||
48 | static ssize_t image_loaded_show(struct device *device, | ||
49 | struct device_attribute *attr, | ||
50 | char *buf) | ||
51 | { | ||
52 | struct cxl *adapter = to_cxl_adapter(device); | ||
53 | |||
54 | if (adapter->user_image_loaded) | ||
55 | return scnprintf(buf, PAGE_SIZE, "user\n"); | ||
56 | return scnprintf(buf, PAGE_SIZE, "factory\n"); | ||
57 | } | ||
58 | |||
59 | static struct device_attribute adapter_attrs[] = { | ||
60 | __ATTR_RO(caia_version), | ||
61 | __ATTR_RO(psl_revision), | ||
62 | __ATTR_RO(base_image), | ||
63 | __ATTR_RO(image_loaded), | ||
64 | }; | ||
65 | |||
66 | |||
67 | /********* AFU master specific attributes **********************************/ | ||
68 | |||
69 | static ssize_t mmio_size_show_master(struct device *device, | ||
70 | struct device_attribute *attr, | ||
71 | char *buf) | ||
72 | { | ||
73 | struct cxl_afu *afu = to_afu_chardev_m(device); | ||
74 | |||
75 | return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size); | ||
76 | } | ||
77 | |||
78 | static ssize_t pp_mmio_off_show(struct device *device, | ||
79 | struct device_attribute *attr, | ||
80 | char *buf) | ||
81 | { | ||
82 | struct cxl_afu *afu = to_afu_chardev_m(device); | ||
83 | |||
84 | return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_offset); | ||
85 | } | ||
86 | |||
87 | static ssize_t pp_mmio_len_show(struct device *device, | ||
88 | struct device_attribute *attr, | ||
89 | char *buf) | ||
90 | { | ||
91 | struct cxl_afu *afu = to_afu_chardev_m(device); | ||
92 | |||
93 | return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size); | ||
94 | } | ||
95 | |||
96 | static struct device_attribute afu_master_attrs[] = { | ||
97 | __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL), | ||
98 | __ATTR_RO(pp_mmio_off), | ||
99 | __ATTR_RO(pp_mmio_len), | ||
100 | }; | ||
101 | |||
102 | |||
103 | /********* AFU attributes **************************************************/ | ||
104 | |||
105 | static ssize_t mmio_size_show(struct device *device, | ||
106 | struct device_attribute *attr, | ||
107 | char *buf) | ||
108 | { | ||
109 | struct cxl_afu *afu = to_cxl_afu(device); | ||
110 | |||
111 | if (afu->pp_size) | ||
112 | return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size); | ||
113 | return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size); | ||
114 | } | ||
115 | |||
116 | static ssize_t reset_store_afu(struct device *device, | ||
117 | struct device_attribute *attr, | ||
118 | const char *buf, size_t count) | ||
119 | { | ||
120 | struct cxl_afu *afu = to_cxl_afu(device); | ||
121 | int rc; | ||
122 | |||
123 | /* Not safe to reset if it is currently in use */ | ||
124 | spin_lock(&afu->contexts_lock); | ||
125 | if (!idr_is_empty(&afu->contexts_idr)) { | ||
126 | rc = -EBUSY; | ||
127 | goto err; | ||
128 | } | ||
129 | |||
130 | if ((rc = cxl_afu_reset(afu))) | ||
131 | goto err; | ||
132 | |||
133 | rc = count; | ||
134 | err: | ||
135 | spin_unlock(&afu->contexts_lock); | ||
136 | return rc; | ||
137 | } | ||
138 | |||
139 | static ssize_t irqs_min_show(struct device *device, | ||
140 | struct device_attribute *attr, | ||
141 | char *buf) | ||
142 | { | ||
143 | struct cxl_afu *afu = to_cxl_afu(device); | ||
144 | |||
145 | return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs); | ||
146 | } | ||
147 | |||
148 | static ssize_t irqs_max_show(struct device *device, | ||
149 | struct device_attribute *attr, | ||
150 | char *buf) | ||
151 | { | ||
152 | struct cxl_afu *afu = to_cxl_afu(device); | ||
153 | |||
154 | return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max); | ||
155 | } | ||
156 | |||
157 | static ssize_t irqs_max_store(struct device *device, | ||
158 | struct device_attribute *attr, | ||
159 | const char *buf, size_t count) | ||
160 | { | ||
161 | struct cxl_afu *afu = to_cxl_afu(device); | ||
162 | ssize_t ret; | ||
163 | int irqs_max; | ||
164 | |||
165 | ret = sscanf(buf, "%i", &irqs_max); | ||
166 | if (ret != 1) | ||
167 | return -EINVAL; | ||
168 | |||
169 | if (irqs_max < afu->pp_irqs) | ||
170 | return -EINVAL; | ||
171 | |||
172 | if (irqs_max > afu->adapter->user_irqs) | ||
173 | return -EINVAL; | ||
174 | |||
175 | afu->irqs_max = irqs_max; | ||
176 | return count; | ||
177 | } | ||
178 | |||
179 | static ssize_t modes_supported_show(struct device *device, | ||
180 | struct device_attribute *attr, char *buf) | ||
181 | { | ||
182 | struct cxl_afu *afu = to_cxl_afu(device); | ||
183 | char *p = buf, *end = buf + PAGE_SIZE; | ||
184 | |||
185 | if (afu->modes_supported & CXL_MODE_DEDICATED) | ||
186 | p += scnprintf(p, end - p, "dedicated_process\n"); | ||
187 | if (afu->modes_supported & CXL_MODE_DIRECTED) | ||
188 | p += scnprintf(p, end - p, "afu_directed\n"); | ||
189 | return (p - buf); | ||
190 | } | ||
191 | |||
192 | static ssize_t prefault_mode_show(struct device *device, | ||
193 | struct device_attribute *attr, | ||
194 | char *buf) | ||
195 | { | ||
196 | struct cxl_afu *afu = to_cxl_afu(device); | ||
197 | |||
198 | switch (afu->prefault_mode) { | ||
199 | case CXL_PREFAULT_WED: | ||
200 | return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n"); | ||
201 | case CXL_PREFAULT_ALL: | ||
202 | return scnprintf(buf, PAGE_SIZE, "all\n"); | ||
203 | default: | ||
204 | return scnprintf(buf, PAGE_SIZE, "none\n"); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | static ssize_t prefault_mode_store(struct device *device, | ||
209 | struct device_attribute *attr, | ||
210 | const char *buf, size_t count) | ||
211 | { | ||
212 | struct cxl_afu *afu = to_cxl_afu(device); | ||
213 | enum prefault_modes mode = -1; | ||
214 | |||
215 | if (!strncmp(buf, "work_element_descriptor", 23)) | ||
216 | mode = CXL_PREFAULT_WED; | ||
217 | if (!strncmp(buf, "all", 3)) | ||
218 | mode = CXL_PREFAULT_ALL; | ||
219 | if (!strncmp(buf, "none", 4)) | ||
220 | mode = CXL_PREFAULT_NONE; | ||
221 | |||
222 | if (mode == -1) | ||
223 | return -EINVAL; | ||
224 | |||
225 | afu->prefault_mode = mode; | ||
226 | return count; | ||
227 | } | ||
228 | |||
229 | static ssize_t mode_show(struct device *device, | ||
230 | struct device_attribute *attr, | ||
231 | char *buf) | ||
232 | { | ||
233 | struct cxl_afu *afu = to_cxl_afu(device); | ||
234 | |||
235 | if (afu->current_mode == CXL_MODE_DEDICATED) | ||
236 | return scnprintf(buf, PAGE_SIZE, "dedicated_process\n"); | ||
237 | if (afu->current_mode == CXL_MODE_DIRECTED) | ||
238 | return scnprintf(buf, PAGE_SIZE, "afu_directed\n"); | ||
239 | return scnprintf(buf, PAGE_SIZE, "none\n"); | ||
240 | } | ||
241 | |||
242 | static ssize_t mode_store(struct device *device, struct device_attribute *attr, | ||
243 | const char *buf, size_t count) | ||
244 | { | ||
245 | struct cxl_afu *afu = to_cxl_afu(device); | ||
246 | int old_mode, mode = -1; | ||
247 | int rc = -EBUSY; | ||
248 | |||
249 | /* can't change this if we have a user */ | ||
250 | spin_lock(&afu->contexts_lock); | ||
251 | if (!idr_is_empty(&afu->contexts_idr)) | ||
252 | goto err; | ||
253 | |||
254 | if (!strncmp(buf, "dedicated_process", 17)) | ||
255 | mode = CXL_MODE_DEDICATED; | ||
256 | if (!strncmp(buf, "afu_directed", 12)) | ||
257 | mode = CXL_MODE_DIRECTED; | ||
258 | if (!strncmp(buf, "none", 4)) | ||
259 | mode = 0; | ||
260 | |||
261 | if (mode == -1) { | ||
262 | rc = -EINVAL; | ||
263 | goto err; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * cxl_afu_deactivate_mode needs to be done outside the lock, prevent | ||
268 | * other contexts coming in before we are ready: | ||
269 | */ | ||
270 | old_mode = afu->current_mode; | ||
271 | afu->current_mode = 0; | ||
272 | afu->num_procs = 0; | ||
273 | |||
274 | spin_unlock(&afu->contexts_lock); | ||
275 | |||
276 | if ((rc = _cxl_afu_deactivate_mode(afu, old_mode))) | ||
277 | return rc; | ||
278 | if ((rc = cxl_afu_activate_mode(afu, mode))) | ||
279 | return rc; | ||
280 | |||
281 | return count; | ||
282 | err: | ||
283 | spin_unlock(&afu->contexts_lock); | ||
284 | return rc; | ||
285 | } | ||
286 | |||
287 | static ssize_t api_version_show(struct device *device, | ||
288 | struct device_attribute *attr, | ||
289 | char *buf) | ||
290 | { | ||
291 | return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION); | ||
292 | } | ||
293 | |||
294 | static ssize_t api_version_compatible_show(struct device *device, | ||
295 | struct device_attribute *attr, | ||
296 | char *buf) | ||
297 | { | ||
298 | return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE); | ||
299 | } | ||
300 | |||
301 | static struct device_attribute afu_attrs[] = { | ||
302 | __ATTR_RO(mmio_size), | ||
303 | __ATTR_RO(irqs_min), | ||
304 | __ATTR_RW(irqs_max), | ||
305 | __ATTR_RO(modes_supported), | ||
306 | __ATTR_RW(mode), | ||
307 | __ATTR_RW(prefault_mode), | ||
308 | __ATTR_RO(api_version), | ||
309 | __ATTR_RO(api_version_compatible), | ||
310 | __ATTR(reset, S_IWUSR, NULL, reset_store_afu), | ||
311 | }; | ||
312 | |||
313 | |||
314 | |||
315 | int cxl_sysfs_adapter_add(struct cxl *adapter) | ||
316 | { | ||
317 | int i, rc; | ||
318 | |||
319 | for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) { | ||
320 | if ((rc = device_create_file(&adapter->dev, &adapter_attrs[i]))) | ||
321 | goto err; | ||
322 | } | ||
323 | return 0; | ||
324 | err: | ||
325 | for (i--; i >= 0; i--) | ||
326 | device_remove_file(&adapter->dev, &adapter_attrs[i]); | ||
327 | return rc; | ||
328 | } | ||
329 | void cxl_sysfs_adapter_remove(struct cxl *adapter) | ||
330 | { | ||
331 | int i; | ||
332 | |||
333 | for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) | ||
334 | device_remove_file(&adapter->dev, &adapter_attrs[i]); | ||
335 | } | ||
336 | |||
337 | int cxl_sysfs_afu_add(struct cxl_afu *afu) | ||
338 | { | ||
339 | int i, rc; | ||
340 | |||
341 | for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { | ||
342 | if ((rc = device_create_file(&afu->dev, &afu_attrs[i]))) | ||
343 | goto err; | ||
344 | } | ||
345 | |||
346 | return 0; | ||
347 | |||
348 | err: | ||
349 | for (i--; i >= 0; i--) | ||
350 | device_remove_file(&afu->dev, &afu_attrs[i]); | ||
351 | return rc; | ||
352 | } | ||
353 | |||
354 | void cxl_sysfs_afu_remove(struct cxl_afu *afu) | ||
355 | { | ||
356 | int i; | ||
357 | |||
358 | for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) | ||
359 | device_remove_file(&afu->dev, &afu_attrs[i]); | ||
360 | } | ||
361 | |||
362 | int cxl_sysfs_afu_m_add(struct cxl_afu *afu) | ||
363 | { | ||
364 | int i, rc; | ||
365 | |||
366 | for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) { | ||
367 | if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i]))) | ||
368 | goto err; | ||
369 | } | ||
370 | |||
371 | return 0; | ||
372 | |||
373 | err: | ||
374 | for (i--; i >= 0; i--) | ||
375 | device_remove_file(afu->chardev_m, &afu_master_attrs[i]); | ||
376 | return rc; | ||
377 | } | ||
378 | |||
379 | void cxl_sysfs_afu_m_remove(struct cxl_afu *afu) | ||
380 | { | ||
381 | int i; | ||
382 | |||
383 | for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) | ||
384 | device_remove_file(afu->chardev_m, &afu_master_attrs[i]); | ||
385 | } | ||
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c index 5618b5fc7500..f575a9b5ede7 100644 --- a/drivers/tty/hvc/hvc_vio.c +++ b/drivers/tty/hvc/hvc_vio.c | |||
@@ -452,7 +452,7 @@ void __init hvc_vio_init_early(void) | |||
452 | return; | 452 | return; |
453 | #endif | 453 | #endif |
454 | /* Check whether the user has requested a different console. */ | 454 | /* Check whether the user has requested a different console. */ |
455 | if (!strstr(cmd_line, "console=")) | 455 | if (!strstr(boot_command_line, "console=")) |
456 | add_preferred_console("hvc", 0, NULL); | 456 | add_preferred_console("hvc", 0, NULL); |
457 | hvc_instantiate(0, 0, ops); | 457 | hvc_instantiate(0, 0, ops); |
458 | } | 458 | } |
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index f49ddb1b2273..84d60cb841b1 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h | |||
@@ -781,13 +781,13 @@ struct fsl_ifc_regs { | |||
781 | __be32 amask; | 781 | __be32 amask; |
782 | u32 res4[0x2]; | 782 | u32 res4[0x2]; |
783 | } amask_cs[FSL_IFC_BANK_COUNT]; | 783 | } amask_cs[FSL_IFC_BANK_COUNT]; |
784 | u32 res5[0x17]; | 784 | u32 res5[0x18]; |
785 | struct { | 785 | struct { |
786 | __be32 csor_ext; | ||
787 | __be32 csor; | 786 | __be32 csor; |
787 | __be32 csor_ext; | ||
788 | u32 res6; | 788 | u32 res6; |
789 | } csor_cs[FSL_IFC_BANK_COUNT]; | 789 | } csor_cs[FSL_IFC_BANK_COUNT]; |
790 | u32 res7[0x19]; | 790 | u32 res7[0x18]; |
791 | struct { | 791 | struct { |
792 | __be32 ftim[4]; | 792 | __be32 ftim[4]; |
793 | u32 res8[0x8]; | 793 | u32 res8[0x8]; |
diff --git a/include/misc/cxl.h b/include/misc/cxl.h new file mode 100644 index 000000000000..975cc7861f18 --- /dev/null +++ b/include/misc/cxl.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _MISC_CXL_H | ||
11 | #define _MISC_CXL_H | ||
12 | |||
13 | #ifdef CONFIG_CXL_BASE | ||
14 | |||
15 | #define CXL_IRQ_RANGES 4 | ||
16 | |||
17 | struct cxl_irq_ranges { | ||
18 | irq_hw_number_t offset[CXL_IRQ_RANGES]; | ||
19 | irq_hw_number_t range[CXL_IRQ_RANGES]; | ||
20 | }; | ||
21 | |||
22 | extern atomic_t cxl_use_count; | ||
23 | |||
24 | static inline bool cxl_ctx_in_use(void) | ||
25 | { | ||
26 | return (atomic_read(&cxl_use_count) != 0); | ||
27 | } | ||
28 | |||
29 | static inline void cxl_ctx_get(void) | ||
30 | { | ||
31 | atomic_inc(&cxl_use_count); | ||
32 | } | ||
33 | |||
34 | static inline void cxl_ctx_put(void) | ||
35 | { | ||
36 | atomic_dec(&cxl_use_count); | ||
37 | } | ||
38 | |||
39 | void cxl_slbia(struct mm_struct *mm); | ||
40 | |||
41 | #else /* CONFIG_CXL_BASE */ | ||
42 | |||
43 | static inline bool cxl_ctx_in_use(void) { return false; } | ||
44 | static inline void cxl_slbia(struct mm_struct *mm) {} | ||
45 | |||
46 | #endif /* CONFIG_CXL_BASE */ | ||
47 | |||
48 | #endif | ||
diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild index 81d2106287fe..245aa6e05e6a 100644 --- a/include/uapi/Kbuild +++ b/include/uapi/Kbuild | |||
@@ -12,3 +12,4 @@ header-y += video/ | |||
12 | header-y += drm/ | 12 | header-y += drm/ |
13 | header-y += xen/ | 13 | header-y += xen/ |
14 | header-y += scsi/ | 14 | header-y += scsi/ |
15 | header-y += misc/ | ||
diff --git a/include/uapi/misc/Kbuild b/include/uapi/misc/Kbuild new file mode 100644 index 000000000000..e96cae7d58c9 --- /dev/null +++ b/include/uapi/misc/Kbuild | |||
@@ -0,0 +1,2 @@ | |||
1 | # misc Header export list | ||
2 | header-y += cxl.h | ||
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h new file mode 100644 index 000000000000..cd6d789b73ec --- /dev/null +++ b/include/uapi/misc/cxl.h | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * Copyright 2014 IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _UAPI_MISC_CXL_H | ||
11 | #define _UAPI_MISC_CXL_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/ioctl.h> | ||
15 | |||
16 | |||
17 | struct cxl_ioctl_start_work { | ||
18 | __u64 flags; | ||
19 | __u64 work_element_descriptor; | ||
20 | __u64 amr; | ||
21 | __s16 num_interrupts; | ||
22 | __s16 reserved1; | ||
23 | __s32 reserved2; | ||
24 | __u64 reserved3; | ||
25 | __u64 reserved4; | ||
26 | __u64 reserved5; | ||
27 | __u64 reserved6; | ||
28 | }; | ||
29 | |||
30 | #define CXL_START_WORK_AMR 0x0000000000000001ULL | ||
31 | #define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL | ||
32 | #define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\ | ||
33 | CXL_START_WORK_NUM_IRQS) | ||
34 | |||
35 | /* ioctl numbers */ | ||
36 | #define CXL_MAGIC 0xCA | ||
37 | #define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work) | ||
38 | #define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32) | ||
39 | |||
40 | #define CXL_READ_MIN_SIZE 0x1000 /* 4K */ | ||
41 | |||
42 | /* Events from read() */ | ||
43 | enum cxl_event_type { | ||
44 | CXL_EVENT_RESERVED = 0, | ||
45 | CXL_EVENT_AFU_INTERRUPT = 1, | ||
46 | CXL_EVENT_DATA_STORAGE = 2, | ||
47 | CXL_EVENT_AFU_ERROR = 3, | ||
48 | }; | ||
49 | |||
50 | struct cxl_event_header { | ||
51 | __u16 type; | ||
52 | __u16 size; | ||
53 | __u16 process_element; | ||
54 | __u16 reserved1; | ||
55 | }; | ||
56 | |||
57 | struct cxl_event_afu_interrupt { | ||
58 | __u16 flags; | ||
59 | __u16 irq; /* Raised AFU interrupt number */ | ||
60 | __u32 reserved1; | ||
61 | }; | ||
62 | |||
63 | struct cxl_event_data_storage { | ||
64 | __u16 flags; | ||
65 | __u16 reserved1; | ||
66 | __u32 reserved2; | ||
67 | __u64 addr; | ||
68 | __u64 dsisr; | ||
69 | __u64 reserved3; | ||
70 | }; | ||
71 | |||
72 | struct cxl_event_afu_error { | ||
73 | __u16 flags; | ||
74 | __u16 reserved1; | ||
75 | __u32 reserved2; | ||
76 | __u64 error; | ||
77 | }; | ||
78 | |||
79 | struct cxl_event { | ||
80 | struct cxl_event_header header; | ||
81 | union { | ||
82 | struct cxl_event_afu_interrupt irq; | ||
83 | struct cxl_event_data_storage fault; | ||
84 | struct cxl_event_afu_error afu_error; | ||
85 | }; | ||
86 | }; | ||
87 | |||
88 | #endif /* _UAPI_MISC_CXL_H */ | ||
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 74a78cedce37..f6ff90a76bd7 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile | |||
@@ -13,7 +13,7 @@ CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CUR | |||
13 | 13 | ||
14 | export CC CFLAGS | 14 | export CC CFLAGS |
15 | 15 | ||
16 | TARGETS = pmu copyloops mm tm | 16 | TARGETS = pmu copyloops mm tm primitives |
17 | 17 | ||
18 | endif | 18 | endif |
19 | 19 | ||
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile new file mode 100644 index 000000000000..ea737ca01732 --- /dev/null +++ b/tools/testing/selftests/powerpc/primitives/Makefile | |||
@@ -0,0 +1,17 @@ | |||
1 | CFLAGS += -I$(CURDIR) | ||
2 | |||
3 | PROGS := load_unaligned_zeropad | ||
4 | |||
5 | all: $(PROGS) | ||
6 | |||
7 | $(PROGS): ../harness.c | ||
8 | |||
9 | run_tests: all | ||
10 | @-for PROG in $(PROGS); do \ | ||
11 | ./$$PROG; \ | ||
12 | done; | ||
13 | |||
14 | clean: | ||
15 | rm -f $(PROGS) *.o | ||
16 | |||
17 | .PHONY: all run_tests clean | ||
diff --git a/tools/testing/selftests/powerpc/primitives/asm/asm-compat.h b/tools/testing/selftests/powerpc/primitives/asm/asm-compat.h new file mode 120000 index 000000000000..b14255e15a25 --- /dev/null +++ b/tools/testing/selftests/powerpc/primitives/asm/asm-compat.h | |||
@@ -0,0 +1 @@ | |||
../.././../../../../arch/powerpc/include/asm/asm-compat.h \ No newline at end of file | |||
diff --git a/tools/testing/selftests/powerpc/primitives/asm/ppc-opcode.h b/tools/testing/selftests/powerpc/primitives/asm/ppc-opcode.h new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/tools/testing/selftests/powerpc/primitives/asm/ppc-opcode.h | |||
diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c new file mode 100644 index 000000000000..d1b647509596 --- /dev/null +++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * Userspace test harness for load_unaligned_zeropad. Creates two | ||
3 | * pages and uses mprotect to prevent access to the second page and | ||
4 | * a SEGV handler that walks the exception tables and runs the fixup | ||
5 | * routine. | ||
6 | * | ||
7 | * The results are compared against a normal load that is that is | ||
8 | * performed while access to the second page is enabled via mprotect. | ||
9 | * | ||
10 | * Copyright (C) 2014 Anton Blanchard <anton@au.ibm.com>, IBM | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <stdlib.h> | ||
19 | #include <string.h> | ||
20 | #include <stdio.h> | ||
21 | #include <stdbool.h> | ||
22 | #include <signal.h> | ||
23 | #include <unistd.h> | ||
24 | #include <sys/mman.h> | ||
25 | |||
26 | #define FIXUP_SECTION ".ex_fixup" | ||
27 | |||
28 | #include "word-at-a-time.h" | ||
29 | |||
30 | #include "utils.h" | ||
31 | |||
32 | |||
33 | static int page_size; | ||
34 | static char *mem_region; | ||
35 | |||
36 | static int protect_region(void) | ||
37 | { | ||
38 | if (mprotect(mem_region + page_size, page_size, PROT_NONE)) { | ||
39 | perror("mprotect"); | ||
40 | return 1; | ||
41 | } | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static int unprotect_region(void) | ||
47 | { | ||
48 | if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) { | ||
49 | perror("mprotect"); | ||
50 | return 1; | ||
51 | } | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | extern char __start___ex_table[]; | ||
57 | extern char __stop___ex_table[]; | ||
58 | |||
59 | #if defined(__powerpc64__) | ||
60 | #define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP] | ||
61 | #elif defined(__powerpc__) | ||
62 | #define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP] | ||
63 | #else | ||
64 | #error implement UCONTEXT_NIA | ||
65 | #endif | ||
66 | |||
67 | static int segv_error; | ||
68 | |||
69 | static void segv_handler(int signr, siginfo_t *info, void *ptr) | ||
70 | { | ||
71 | ucontext_t *uc = (ucontext_t *)ptr; | ||
72 | unsigned long addr = (unsigned long)info->si_addr; | ||
73 | unsigned long *ip = &UCONTEXT_NIA(uc); | ||
74 | unsigned long *ex_p = (unsigned long *)__start___ex_table; | ||
75 | |||
76 | while (ex_p < (unsigned long *)__stop___ex_table) { | ||
77 | unsigned long insn, fixup; | ||
78 | |||
79 | insn = *ex_p++; | ||
80 | fixup = *ex_p++; | ||
81 | |||
82 | if (insn == *ip) { | ||
83 | *ip = fixup; | ||
84 | return; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | printf("No exception table match for NIA %lx ADDR %lx\n", *ip, addr); | ||
89 | segv_error++; | ||
90 | } | ||
91 | |||
92 | static void setup_segv_handler(void) | ||
93 | { | ||
94 | struct sigaction action; | ||
95 | |||
96 | memset(&action, 0, sizeof(action)); | ||
97 | action.sa_sigaction = segv_handler; | ||
98 | action.sa_flags = SA_SIGINFO; | ||
99 | sigaction(SIGSEGV, &action, NULL); | ||
100 | } | ||
101 | |||
102 | static int do_one_test(char *p, int page_offset) | ||
103 | { | ||
104 | unsigned long should; | ||
105 | unsigned long got; | ||
106 | |||
107 | FAIL_IF(unprotect_region()); | ||
108 | should = *(unsigned long *)p; | ||
109 | FAIL_IF(protect_region()); | ||
110 | |||
111 | got = load_unaligned_zeropad(p); | ||
112 | |||
113 | if (should != got) | ||
114 | printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, should); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | static int test_body(void) | ||
120 | { | ||
121 | unsigned long i; | ||
122 | |||
123 | page_size = getpagesize(); | ||
124 | mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, | ||
125 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | ||
126 | |||
127 | FAIL_IF(mem_region == MAP_FAILED); | ||
128 | |||
129 | for (i = 0; i < page_size; i++) | ||
130 | mem_region[i] = i; | ||
131 | |||
132 | memset(mem_region+page_size, 0, page_size); | ||
133 | |||
134 | setup_segv_handler(); | ||
135 | |||
136 | for (i = 0; i < page_size; i++) | ||
137 | FAIL_IF(do_one_test(mem_region+i, i)); | ||
138 | |||
139 | FAIL_IF(segv_error); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | int main(void) | ||
145 | { | ||
146 | return test_harness(test_body, "load_unaligned_zeropad"); | ||
147 | } | ||
diff --git a/tools/testing/selftests/powerpc/primitives/word-at-a-time.h b/tools/testing/selftests/powerpc/primitives/word-at-a-time.h new file mode 120000 index 000000000000..eb74401b591f --- /dev/null +++ b/tools/testing/selftests/powerpc/primitives/word-at-a-time.h | |||
@@ -0,0 +1 @@ | |||
../../../../../arch/powerpc/include/asm/word-at-a-time.h \ No newline at end of file | |||